From bae3280c78ef9b4940dce17c6576d3d19b92ca2d Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sat, 9 Nov 2024 10:06:17 +0400 Subject: [PATCH 01/65] [Backport 8.x] Vectorstore: use a retriever query for hybrid search (#2681) Co-authored-by: Miguel Grinberg Fixes #2651 --- .../helpers/vectorstore/_async/strategies.py | 39 +++- .../helpers/vectorstore/_sync/strategies.py | 39 +++- .../test_vectorstore/test_vectorstore.py | 188 +++++++++++++----- 3 files changed, 194 insertions(+), 72 deletions(-) diff --git a/elasticsearch/helpers/vectorstore/_async/strategies.py b/elasticsearch/helpers/vectorstore/_async/strategies.py index a7f813f43..10524e243 100644 --- a/elasticsearch/helpers/vectorstore/_async/strategies.py +++ b/elasticsearch/helpers/vectorstore/_async/strategies.py @@ -283,10 +283,9 @@ def _hybrid( ) -> Dict[str, Any]: # Add a query to the knn query. # RRF is used to even the score from the knn query and text query - # RRF has two optional parameters: {'rank_constant':int, 'window_size':int} + # RRF has two optional parameters: {'rank_constant':int, 'rank_window_size':int} # https://www.elastic.co/guide/en/elasticsearch/reference/current/rrf.html - query_body = { - "knn": knn, + standard_query = { "query": { "bool": { "must": [ @@ -300,14 +299,36 @@ def _hybrid( ], "filter": filter, } - }, + } } - if isinstance(self.rrf, Dict): - query_body["rank"] = {"rrf": self.rrf} - elif isinstance(self.rrf, bool) and self.rrf is True: - query_body["rank"] = {"rrf": {}} - + if self.rrf is False: + query_body = { + "knn": knn, + **standard_query, + } + else: + rrf_options = {} + if isinstance(self.rrf, Dict): + if "rank_constant" in self.rrf: + rrf_options["rank_constant"] = self.rrf["rank_constant"] + if "window_size" in self.rrf: + # 'window_size' was renamed to 'rank_window_size', but we support + # the older name for backwards compatibility + rrf_options["rank_window_size"] = self.rrf["window_size"] + if "rank_window_size" in self.rrf: + rrf_options["rank_window_size"] = self.rrf["rank_window_size"] + query_body = { + "retriever": { + "rrf": { + "retrievers": [ + {"standard": standard_query}, + {"knn": knn}, + ], + **rrf_options, + }, + }, + } return query_body def needs_inference(self) -> bool: diff --git a/elasticsearch/helpers/vectorstore/_sync/strategies.py b/elasticsearch/helpers/vectorstore/_sync/strategies.py index 928d34143..99c9baec2 100644 --- a/elasticsearch/helpers/vectorstore/_sync/strategies.py +++ b/elasticsearch/helpers/vectorstore/_sync/strategies.py @@ -283,10 +283,9 @@ def _hybrid( ) -> Dict[str, Any]: # Add a query to the knn query. # RRF is used to even the score from the knn query and text query - # RRF has two optional parameters: {'rank_constant':int, 'window_size':int} + # RRF has two optional parameters: {'rank_constant':int, 'rank_window_size':int} # https://www.elastic.co/guide/en/elasticsearch/reference/current/rrf.html - query_body = { - "knn": knn, + standard_query = { "query": { "bool": { "must": [ @@ -300,14 +299,36 @@ def _hybrid( ], "filter": filter, } - }, + } } - if isinstance(self.rrf, Dict): - query_body["rank"] = {"rrf": self.rrf} - elif isinstance(self.rrf, bool) and self.rrf is True: - query_body["rank"] = {"rrf": {}} - + if self.rrf is False: + query_body = { + "knn": knn, + **standard_query, + } + else: + rrf_options = {} + if isinstance(self.rrf, Dict): + if "rank_constant" in self.rrf: + rrf_options["rank_constant"] = self.rrf["rank_constant"] + if "window_size" in self.rrf: + # 'window_size' was renamed to 'rank_window_size', but we support + # the older name for backwards compatibility + rrf_options["rank_window_size"] = self.rrf["window_size"] + if "rank_window_size" in self.rrf: + rrf_options["rank_window_size"] = self.rrf["rank_window_size"] + query_body = { + "retriever": { + "rrf": { + "retrievers": [ + {"standard": standard_query}, + {"knn": knn}, + ], + **rrf_options, + }, + }, + } return query_body def needs_inference(self) -> bool: diff --git a/test_elasticsearch/test_server/test_vectorstore/test_vectorstore.py b/test_elasticsearch/test_server/test_vectorstore/test_vectorstore.py index 820746acd..096beaef5 100644 --- a/test_elasticsearch/test_server/test_vectorstore/test_vectorstore.py +++ b/test_elasticsearch/test_server/test_vectorstore/test_vectorstore.py @@ -33,6 +33,7 @@ VectorStore, ) from elasticsearch.helpers.vectorstore._sync._utils import model_is_deployed +from test_elasticsearch.utils import es_version from . import ConsistentFakeEmbeddings, FakeEmbeddings @@ -337,6 +338,9 @@ def test_search_knn_with_hybrid_search( self, sync_client: Elasticsearch, index: str ) -> None: """Test end to end construction and search with metadata.""" + if es_version(sync_client) < (8, 14): + pytest.skip("This test requires Elasticsearch 8.14 or newer") + store = VectorStore( index=index, retrieval_strategy=DenseVectorStrategy(hybrid=True), @@ -349,20 +353,48 @@ def test_search_knn_with_hybrid_search( def assert_query(query_body: dict, query: Optional[str]) -> dict: assert query_body == { - "knn": { - "field": "vector_field", - "filter": [], - "k": 1, - "num_candidates": 50, - "query_vector": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - }, - "query": { - "bool": { - "filter": [], - "must": [{"match": {"text_field": {"query": "foo"}}}], + "retriever": { + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "bool": { + "filter": [], + "must": [ + { + "match": { + "text_field": {"query": "foo"} + } + } + ], + } + }, + }, + }, + { + "knn": { + "field": "vector_field", + "filter": [], + "k": 1, + "num_candidates": 50, + "query_vector": [ + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 0.0, + ], + }, + }, + ], } - }, - "rank": {"rrf": {}}, + } } return query_body @@ -373,6 +405,9 @@ def test_search_knn_with_hybrid_search_rrf( self, sync_client: Elasticsearch, index: str ) -> None: """Test end to end construction and rrf hybrid search with metadata.""" + if es_version(sync_client) < (8, 14): + pytest.skip("This test requires Elasticsearch 8.14 or newer") + texts = ["foo", "bar", "baz"] def assert_query( @@ -380,48 +415,67 @@ def assert_query( query: Optional[str], expected_rrf: Union[dict, bool], ) -> dict: - cmp_query_body = { - "knn": { - "field": "vector_field", - "filter": [], - "k": 3, - "num_candidates": 50, - "query_vector": [ - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 1.0, - 0.0, - ], - }, + standard_query = { "query": { "bool": { "filter": [], "must": [{"match": {"text_field": {"query": "foo"}}}], } - }, + } + } + knn_query = { + "field": "vector_field", + "filter": [], + "k": 3, + "num_candidates": 50, + "query_vector": [ + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 0.0, + ], } - if isinstance(expected_rrf, dict): - cmp_query_body["rank"] = {"rrf": expected_rrf} - elif isinstance(expected_rrf, bool) and expected_rrf is True: - cmp_query_body["rank"] = {"rrf": {}} + if expected_rrf is not False: + cmp_query_body = { + "retriever": { + "rrf": { + "retrievers": [ + {"standard": standard_query}, + {"knn": knn_query}, + ], + } + } + } + if isinstance(expected_rrf, dict): + cmp_query_body["retriever"]["rrf"].update(expected_rrf) + else: + cmp_query_body = { + "knn": knn_query, + **standard_query, + } assert query_body == cmp_query_body return query_body # 1. check query_body is okay - rrf_test_cases: List[Union[dict, bool]] = [ - True, - False, - {"rank_constant": 1, "window_size": 5}, - ] + if es_version(sync_client) >= (8, 14): + rrf_test_cases: List[Union[dict, bool]] = [ + True, + False, + {"rank_constant": 1, "rank_window_size": 5}, + ] + else: + # for 8.13.x and older there is no retriever query, so we can only + # run hybrid searches with rrf=False + rrf_test_cases: List[Union[dict, bool]] = [False] for rrf_test_case in rrf_test_cases: store = VectorStore( index=index, @@ -441,21 +495,47 @@ def assert_query( # 2. check query result is okay es_output = store.client.search( index=index, - query={ - "bool": { - "filter": [], - "must": [{"match": {"text_field": {"query": "foo"}}}], + retriever={ + "rrf": { + "retrievers": [ + { + "knn": { + "field": "vector_field", + "filter": [], + "k": 3, + "num_candidates": 50, + "query_vector": [ + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 1.0, + 0.0, + ], + }, + }, + { + "standard": { + "query": { + "bool": { + "filter": [], + "must": [ + {"match": {"text_field": {"query": "foo"}}} + ], + } + }, + }, + }, + ], + "rank_constant": 1, + "rank_window_size": 5, } }, - knn={ - "field": "vector_field", - "filter": [], - "k": 3, - "num_candidates": 50, - "query_vector": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], - }, size=3, - rank={"rrf": {"rank_constant": 1, "window_size": 5}}, ) assert [o["_source"]["text_field"] for o in output] == [ From 9669072b38e590239e249bcc9df37884d3ae6321 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sat, 9 Nov 2024 10:20:26 +0400 Subject: [PATCH 02/65] [Backport 8.x] Switch from Elasticsearch YAML tests to client tests (#2683) Co-authored-by: Quentin Pradet --- .buildkite/pipeline.yml | 2 +- .buildkite/run-repository.sh | 1 + noxfile.py | 2 +- .../test_async/test_server/conftest.py | 16 +- .../test_server/test_rest_api_spec.py | 14 +- .../test_server/test_rest_api_spec.py | 194 ++++++------------ .../test_server/test_vectorstore/__init__.py | 2 +- .../test_vectorstore/test_vectorstore.py | 11 +- test_elasticsearch/utils.py | 104 ++++++---- 9 files changed, 157 insertions(+), 189 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index ff911719e..16bf81360 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -5,7 +5,7 @@ steps: env: PYTHON_VERSION: "{{ matrix.python }}" TEST_SUITE: "platinum" - STACK_VERSION: "8.11.0-SNAPSHOT" + STACK_VERSION: "8.16.0-SNAPSHOT" PYTHON_CONNECTION_CLASS: "{{ matrix.connection }}" NOX_SESSION: "{{ matrix.nox_session }}" matrix: diff --git a/.buildkite/run-repository.sh b/.buildkite/run-repository.sh index 2f1f3c263..ce9344e8d 100755 --- a/.buildkite/run-repository.sh +++ b/.buildkite/run-repository.sh @@ -43,6 +43,7 @@ docker run \ --env "TEST_SUITE=${TEST_SUITE}" \ --env "PYTHON_CONNECTION_CLASS=${PYTHON_CONNECTION_CLASS}" \ --env "TEST_TYPE=server" \ + --env "FORCE_COLOR=1" \ --name elasticsearch-py \ --rm \ elastic/elasticsearch-py \ diff --git a/noxfile.py b/noxfile.py index 600120bb3..2f9bc3322 100644 --- a/noxfile.py +++ b/noxfile.py @@ -49,7 +49,7 @@ def pytest_argv(): def test(session): session.install(".[dev]", env=INSTALL_ENV, silent=False) - session.run(*pytest_argv()) + session.run(*pytest_argv(), *session.posargs) @nox.session(python=["3.8", "3.12"]) diff --git a/test_elasticsearch/test_async/test_server/conftest.py b/test_elasticsearch/test_async/test_server/conftest.py index fc475bc75..623646e7d 100644 --- a/test_elasticsearch/test_async/test_server/conftest.py +++ b/test_elasticsearch/test_async/test_server/conftest.py @@ -26,7 +26,7 @@ @pytest_asyncio.fixture(scope="function") -async def async_client(elasticsearch_url): +async def async_client_factory(elasticsearch_url): if not hasattr(elasticsearch, "AsyncElasticsearch"): pytest.skip("test requires 'AsyncElasticsearch' and aiohttp to be installed") @@ -36,11 +36,17 @@ async def async_client(elasticsearch_url): # event loops (one per test!) client = None try: - client = elasticsearch.AsyncElasticsearch( - elasticsearch_url, request_timeout=3, ca_certs=CA_CERTS - ) + client = elasticsearch.AsyncElasticsearch(elasticsearch_url, ca_certs=CA_CERTS) yield client finally: if client: - wipe_cluster(client) await client.close() + + +@pytest.fixture(scope="function") +def async_client(async_client_factory): + try: + yield async_client_factory + finally: + # Wipe the cluster clean after every test execution. + wipe_cluster(async_client_factory) diff --git a/test_elasticsearch/test_async/test_server/test_rest_api_spec.py b/test_elasticsearch/test_async/test_server/test_rest_api_spec.py index eee2364f6..c48262b61 100644 --- a/test_elasticsearch/test_async/test_server/test_rest_api_spec.py +++ b/test_elasticsearch/test_async/test_server/test_rest_api_spec.py @@ -130,7 +130,9 @@ async def run_do(self, action): headers.pop("Authorization") method, args = list(action.items())[0] - args["headers"] = headers + + if headers: + args["headers"] = headers # locate api endpoint for m in method.split("."): @@ -239,15 +241,17 @@ async def _feature_enabled(self, name): @pytest_asyncio.fixture(scope="function") -def async_runner(async_client): - return AsyncYamlRunner(async_client) +def async_runner(async_client_factory): + return AsyncYamlRunner(async_client_factory) if RUN_ASYNC_REST_API_TESTS: @pytest.mark.parametrize("test_spec", YAML_TEST_SPECS) async def test_rest_api_spec(test_spec, async_runner): - if test_spec.get("skip", False): - pytest.skip("Manually skipped in 'SKIP_TESTS'") + if test_spec.get("fail", False): + pytest.xfail("Manually marked as failing in 'FAILING_TESTS'") + elif test_spec.get("skip", False): + pytest.xfail("Manually skipped") async_runner.use_spec(test_spec) await async_runner.run() diff --git a/test_elasticsearch/test_server/test_rest_api_spec.py b/test_elasticsearch/test_server/test_rest_api_spec.py index 6ede3b753..058daa121 100644 --- a/test_elasticsearch/test_server/test_rest_api_spec.py +++ b/test_elasticsearch/test_server/test_rest_api_spec.py @@ -32,12 +32,10 @@ import urllib3 import yaml -from elasticsearch import ApiError, Elasticsearch, ElasticsearchWarning, RequestError +from elasticsearch import ApiError, ElasticsearchWarning, RequestError from elasticsearch._sync.client.utils import _base64_auth_header from elasticsearch.compat import string_types -from ..utils import CA_CERTS, es_url, parse_version - # some params had to be changed in python, keep track of them so we can rename # those in the tests accordingly PARAMS_RENAMES = {"from": "from_"} @@ -70,66 +68,37 @@ } # broken YAML tests on some releases -SKIP_TESTS = { - # Warning about date_histogram.interval deprecation is raised randomly - "search/aggregation/250_moving_fn[1]", - # body: null - "indices/simulate_index_template/10_basic[2]", - # No ML node with sufficient capacity / random ML failing - "ml/start_stop_datafeed", - "ml/post_data", - "ml/jobs_crud", - "ml/datafeeds_crud", - "ml/set_upgrade_mode", - "ml/reset_job[2]", - "ml/jobs_get_stats", - "ml/get_datafeed_stats", - "ml/get_trained_model_stats", - "ml/delete_job_force", - "ml/jobs_get_result_overall_buckets", - "ml/bucket_correlation_agg[0]", - "ml/job_groups", - "transform/transforms_stats_continuous[0]", - # Fails bad request instead of 404? - "ml/inference_crud", - # rollup/security_tests time out? - "rollup/security_tests", - # Our TLS certs are custom - "ssl/10_basic[0]", - # Our user is custom - "users/10_basic[3]", - # License warning not sent? - "license/30_enterprise_license[0]", - # Shards/snapshots aren't right? - "searchable_snapshots/10_usage[1]", - # flaky data streams? - "data_stream/10_basic[1]", - "data_stream/80_resolve_index_data_streams[1]", - # bad formatting? - "cat/allocation/10_basic", - "runtime_fields/10_keyword[8]", - # service account number not right? - "service_accounts/10_basic[1]", - # doesn't use 'contains' properly? - "xpack/10_basic[0]", - "privileges/40_get_user_privs[0]", - "privileges/40_get_user_privs[1]", - "features/get_features/10_basic[0]", - "features/reset_features/10_basic[0]", - # bad use of 'is_false'? - "indices/get_alias/10_basic[22]", - # unique usage of 'set' - "indices/stats/50_disk_usage[0]", - "indices/stats/60_field_usage[0]", - # actual Elasticsearch failure? - "transform/transforms_stats", - "transform/transforms_cat_apis", - "transform/transforms_update", +FAILING_TESTS = { + # ping has a custom implementation in Python and returns a boolean + "ping/ping", + # Not investigated yet + "cat/aliases", + "cat/fielddata", + "cluster/delete_voting_config_exclusions", + "cluster/voting_config_exclusions", + "entsearch/10_basic", + "indices/clone", + "indices/resolve_cluster", + "indices/settings", + "indices/split", + "indices/simulate_template_stack", + "logstash/10_basic", + "machine_learning/30_trained_model_stack", + "machine_learning/jobs_crud", + "scroll/10_basic", + "security/10_api_key_basic", + "transform/10_basic", +} +SKIPPED_TESTS = { + # Timeouts + # https://github.com/elastic/elasticsearch-serverless-python/issues/63 + "cluster/cluster_info[0]", + "inference/10_basic[0]", + "machine_learning/20_trained_model[0]", } XPACK_FEATURES = None -ES_VERSION = None RUN_ASYNC_REST_API_TESTS = os.environ.get("PYTHON_CONNECTION_CLASS") == "requests" FALSEY_VALUES = ("", None, False, 0, 0.0) @@ -173,16 +142,6 @@ def teardown(self): self.section("teardown") self.run_code(self._teardown_code) - def es_version(self): - global ES_VERSION - if ES_VERSION is None: - version_string = (self.client.info())["version"]["number"] - if "." not in version_string: - return () - version = version_string.strip().split(".") - ES_VERSION = tuple(int(v) if v.isdigit() else 999 for v in version) - return ES_VERSION - def section(self, name): print(("=" * 10) + " " + name + " " + ("=" * 10)) @@ -331,16 +290,6 @@ def run_skip(self, skip): continue pytest.skip(f"feature '{feature}' is not supported") - if "version" in skip: - version, reason = skip["version"], skip["reason"] - if version == "all": - pytest.skip(reason) - min_version, _, max_version = version.partition("-") - min_version = parse_version(min_version.strip()) or (0,) - max_version = parse_version(max_version.strip()) or (999,) - if min_version <= (self.es_version()) <= max_version: - pytest.skip(reason) - def run_gt(self, action): for key, value in action.items(): value = self._resolve(value) @@ -516,8 +465,9 @@ def _skip_intentional_type_errors(self, e: Exception): @pytest.fixture(scope="function") -def sync_runner(sync_client): - return YamlRunner(sync_client) +def sync_runner(sync_client_factory): + # sync_client_factory does not wipe the cluster between tests + return YamlRunner(sync_client_factory) # Source: https://stackoverflow.com/a/37958106/5763213 @@ -546,62 +496,36 @@ def remove_implicit_resolver(cls, tag_to_remove): try: # Construct the HTTP and Elasticsearch client http = urllib3.PoolManager(retries=10) - client = Elasticsearch(es_url(), request_timeout=3, ca_certs=CA_CERTS) - - # Make a request to Elasticsearch for the build hash, we'll be looking for - # an artifact with this same hash to download test specs for. - client_info = client.info() - version_number = client_info["version"]["number"] - build_hash = client_info["version"]["build_hash"] - - # Now talk to the artifacts API with the 'STACK_VERSION' environment variable - resp = http.request( - "GET", - f"https://artifacts-api.elastic.co/v1/versions/{version_number}", + + yaml_tests_url = ( + "https://api.github.com/repos/elastic/elasticsearch-clients-tests/zipball/main" ) - resp = json.loads(resp.data.decode("utf-8")) - - # Look through every build and see if one matches the commit hash - # we're looking for. If not it's okay, we'll just use the latest and - # hope for the best! - builds = resp["version"]["builds"] - for build in builds: - if build["projects"]["elasticsearch"]["commit_hash"] == build_hash: - break - else: - build = builds[0] # Use the latest - - # Now we're looking for the 'rest-api-spec--sources.jar' file - # to download and extract in-memory. - packages = build["projects"]["elasticsearch"]["packages"] - for package in packages: - if re.match(r"rest-resources-zip-.*\.zip", package): - package_url = packages[package]["url"] - break - else: - raise RuntimeError( - f"Could not find the package 'rest-resources-zip-*.zip' in build {build!r}" - ) # Download the zip and start reading YAML from the files in memory - package_zip = zipfile.ZipFile(io.BytesIO(http.request("GET", package_url).data)) + package_zip = zipfile.ZipFile(io.BytesIO(http.request("GET", yaml_tests_url).data)) + for yaml_file in package_zip.namelist(): - if not re.match(r"^rest-api-spec/test/.*\.ya?ml$", yaml_file): + if not re.match(r"^.*\/tests\/.*\.ya?ml$", yaml_file): continue yaml_tests = list( yaml.load_all(package_zip.read(yaml_file), Loader=NoDatesSafeLoader) ) - # Each file may have a "test" named 'setup' or 'teardown', - # these sets of steps should be run at the beginning and end - # of every other test within the file so we do one pass to capture those. - setup_steps = teardown_steps = None + # Each file has a `requires` section with `serverless` and `stack` + # boolean entries indicating whether the test should run with + # serverless, stack or both. Additionally, each file may have a section + # named 'setup' or 'teardown', these sets of steps should be run at the + # beginning and end of every other test within the file so we do one + # pass to capture those. + requires = setup_steps = teardown_steps = None test_numbers_and_steps = [] test_number = 0 for yaml_test in yaml_tests: test_name, test_step = yaml_test.popitem() - if test_name == "setup": + if test_name == "requires": + requires = test_step + elif test_name == "setup": setup_steps = test_step elif test_name == "teardown": teardown_steps = test_step @@ -609,14 +533,17 @@ def remove_implicit_resolver(cls, tag_to_remove): test_numbers_and_steps.append((test_number, test_step)) test_number += 1 + if not requires["stack"]: + continue + # Now we combine setup, teardown, and test_steps into # a set of pytest.param() instances for test_number, test_step in test_numbers_and_steps: - # Build the id from the name of the YAML file and - # the number within that file. Most important step - # is to remove most of the file path prefixes and - # the .yml suffix. - pytest_test_name = yaml_file.rpartition(".")[0].replace(".", "/") + # Build the id from the name of the YAML file and the number within + # that file. Most important step is to remove most of the file path + # prefixes and the .yml suffix. + test_path = "/".join(yaml_file.split("/")[2:]) + pytest_test_name = test_path.rpartition(".")[0].replace(".", "/") for prefix in ("rest-api-spec/", "test/", "free/", "platinum/"): if pytest_test_name.startswith(prefix): pytest_test_name = pytest_test_name[len(prefix) :] @@ -628,7 +555,9 @@ def remove_implicit_resolver(cls, tag_to_remove): "teardown": teardown_steps, } # Skip either 'test_name' or 'test_name[x]' - if pytest_test_name in SKIP_TESTS or pytest_param_id in SKIP_TESTS: + if pytest_test_name in FAILING_TESTS or pytest_param_id in FAILING_TESTS: + pytest_param["fail"] = True + elif pytest_test_name in SKIPPED_TESTS or pytest_param_id in SKIPPED_TESTS: pytest_param["skip"] = True YAML_TEST_SPECS.append(pytest.param(pytest_param, id=pytest_param_id)) @@ -645,12 +574,13 @@ def _pytest_param_sort_key(param: pytest.param) -> Tuple[Union[str, int], ...]: # Sort the tests by ID so they're grouped together nicely. YAML_TEST_SPECS = sorted(YAML_TEST_SPECS, key=_pytest_param_sort_key) - if not RUN_ASYNC_REST_API_TESTS: @pytest.mark.parametrize("test_spec", YAML_TEST_SPECS) def test_rest_api_spec(test_spec, sync_runner): - if test_spec.get("skip", False): - pytest.skip("Manually skipped in 'SKIP_TESTS'") + if test_spec.get("fail", False): + pytest.xfail("Manually marked as failing in 'FAILING_TESTS'") + elif test_spec.get("skip", False): + pytest.skip("Manually marked as skipped") sync_runner.use_spec(test_spec) sync_runner.run() diff --git a/test_elasticsearch/test_server/test_vectorstore/__init__.py b/test_elasticsearch/test_server/test_vectorstore/__init__.py index 87710976a..1b480978c 100644 --- a/test_elasticsearch/test_server/test_vectorstore/__init__.py +++ b/test_elasticsearch/test_server/test_vectorstore/__init__.py @@ -69,7 +69,7 @@ def embed_documents(self, texts: List[str]) -> List[List[float]]: if text not in self.known_texts: self.known_texts.append(text) vector = [float(1.0)] * (self.dimensionality - 1) + [ - float(self.known_texts.index(text)) + float(self.known_texts.index(text) + 1) ] out_vectors.append(vector) return out_vectors diff --git a/test_elasticsearch/test_server/test_vectorstore/test_vectorstore.py b/test_elasticsearch/test_server/test_vectorstore/test_vectorstore.py index 096beaef5..3e17442eb 100644 --- a/test_elasticsearch/test_server/test_vectorstore/test_vectorstore.py +++ b/test_elasticsearch/test_server/test_vectorstore/test_vectorstore.py @@ -73,7 +73,7 @@ def assert_query(query_body: dict, query: Optional[str]) -> dict: "filter": [], "k": 1, "num_candidates": 50, - "query_vector": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], + "query_vector": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0], } } return query_body @@ -81,7 +81,7 @@ def assert_query(query_body: dict, query: Optional[str]) -> dict: store = VectorStore( index=index, retrieval_strategy=DenseVectorStrategy(), - embedding_service=FakeEmbeddings(), + embedding_service=ConsistentFakeEmbeddings(), client=sync_client, ) @@ -98,7 +98,7 @@ def test_search_without_metadata_async( store = VectorStore( index=index, retrieval_strategy=DenseVectorStrategy(), - embedding_service=FakeEmbeddings(), + embedding_service=ConsistentFakeEmbeddings(), client=sync_client, ) @@ -1030,6 +1030,11 @@ def test_metadata_mapping(self, sync_client: Elasticsearch, index: str) -> None: "type": "dense_vector", "dims": 10, "index": True, + "index_options": { + "ef_construction": 100, + "m": 16, + "type": "int8_hnsw", + }, "similarity": "cosine", } diff --git a/test_elasticsearch/utils.py b/test_elasticsearch/utils.py index abff98a55..4a26aa4c0 100644 --- a/test_elasticsearch/utils.py +++ b/test_elasticsearch/utils.py @@ -26,7 +26,6 @@ ConnectionError, Elasticsearch, NotFoundError, - RequestError, ) SOURCE_DIR = Path(__file__).absolute().parent.parent @@ -235,6 +234,7 @@ def wipe_searchable_snapshot_indices(client): def wipe_xpack_templates(client): + # Delete index templates (including legacy) templates = [ x.strip() for x in client.cat.templates(h="name").split("\n") if x.strip() ] @@ -247,26 +247,15 @@ def wipe_xpack_templates(client): if f"index_template [{template}] missing" in str(e): client.indices.delete_index_template(name=template) - # Delete component templates, need to retry because sometimes - # indices aren't cleaned up in time before we issue the delete. + # Delete component templates templates = client.cluster.get_component_template()["component_templates"] templates_to_delete = [ - template for template in templates if not is_xpack_template(template["name"]) + template["name"] + for template in templates + if not is_xpack_template(template["name"]) ] - for _ in range(3): - for template in list(templates_to_delete): - try: - client.cluster.delete_component_template( - name=template["name"], - ) - except RequestError: - pass - else: - templates_to_delete.remove(template) - - if not templates_to_delete: - break - time.sleep(0.01) + if templates_to_delete: + client.cluster.delete_component_template(name=",".join(templates_to_delete)) def wipe_ilm_policies(client): @@ -292,6 +281,9 @@ def wipe_ilm_policies(client): ".monitoring-8-ilm-policy", } and "-history-ilm-polcy" not in policy + and "-meta-ilm-policy" not in policy + and "-data-ilm-policy" not in policy + and "@lifecycle" not in policy ): client.ilm.delete_lifecycle(name=policy) @@ -419,38 +411,68 @@ def wait_for_cluster_state_updates_to_finish(client, timeout=30): def is_xpack_template(name): - if name.startswith(".monitoring-"): + if name.startswith("."): return True - elif name.startswith(".watch") or name.startswith(".triggered_watches"): + elif name.startswith("behavioral_analytics-events"): return True - elif name.startswith(".data-frame-"): + elif name.startswith("elastic-connectors-"): return True - elif name.startswith(".ml-"): + elif name.startswith("entities_v1_"): return True - elif name.startswith(".transform-"): + elif name.endswith("@ilm"): return True - elif name.startswith(".deprecation-"): + elif name.endswith("@template"): return True - if name in { - ".watches", - "security_audit_log", - ".slm-history", - ".async-search", - "saml-service-provider", + + return name in { + "apm-10d@lifecycle", + "apm-180d@lifecycle", + "apm-390d@lifecycle", + "apm-90d@lifecycle", + "apm@mappings", + "apm@settings", + "data-streams-mappings", + "data-streams@mappings", + "elastic-connectors", + "ecs@dynamic_templates", + "ecs@mappings", + "ilm-history-7", + "kibana-reporting@settings", "logs", - "logs-settings", + "logs-apm.error@mappings", + "logs-apm@settings", "logs-mappings", + "logs@mappings", + "logs-settings", + "logs@settings", "metrics", - "metrics-settings", + "metrics-apm@mappings", + "metrics-apm.service_destination@mappings", + "metrics-apm.service_summary@mappings", + "metrics-apm.service_transaction@mappings", + "metrics-apm@settings", + "metrics-apm.transaction@mappings", "metrics-mappings", + "metrics@mappings", + "metrics-settings", + "metrics@settings", + "metrics-tsdb-settings", + "metrics@tsdb-settings", + "search-acl-filter", "synthetics", - "synthetics-settings", "synthetics-mappings", - ".snapshot-blob-cache", - "ilm-history", - "logstash-index-template", - "security-index-template", - "data-streams-mappings", - }: - return True - return False + "synthetics@mappings", + "synthetics-settings", + "synthetics@settings", + "traces-apm@mappings", + "traces-apm.rum@mappings", + "traces@mappings", + "traces@settings", + # otel + "metrics-otel@mappings", + "semconv-resource-to-ecs@mappings", + "traces-otel@mappings", + "ecs-tsdb@mappings", + "logs-otel@mappings", + "otel@mappings", + } From 1ca29629be03f63b0741002d862975590bcafe0c Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Sun, 10 Nov 2024 18:44:18 +0100 Subject: [PATCH 03/65] Auto-generated code for 8.x (#2665) --- elasticsearch/_async/client/__init__.py | 155 +++++--- elasticsearch/_async/client/async_search.py | 44 +-- elasticsearch/_async/client/autoscaling.py | 8 +- elasticsearch/_async/client/cat.py | 52 +-- elasticsearch/_async/client/ccr.py | 26 +- elasticsearch/_async/client/cluster.py | 40 +-- elasticsearch/_async/client/connector.py | 130 ++++--- .../_async/client/dangling_indices.py | 39 +- elasticsearch/_async/client/enrich.py | 10 +- elasticsearch/_async/client/eql.py | 8 +- elasticsearch/_async/client/esql.py | 11 +- elasticsearch/_async/client/features.py | 4 +- elasticsearch/_async/client/fleet.py | 2 +- elasticsearch/_async/client/graph.py | 2 +- elasticsearch/_async/client/ilm.py | 28 +- elasticsearch/_async/client/indices.py | 121 +++---- elasticsearch/_async/client/inference.py | 8 +- elasticsearch/_async/client/ingest.py | 18 +- elasticsearch/_async/client/license.py | 14 +- elasticsearch/_async/client/logstash.py | 6 +- elasticsearch/_async/client/migration.py | 6 +- elasticsearch/_async/client/ml.py | 146 ++++---- elasticsearch/_async/client/monitoring.py | 2 +- elasticsearch/_async/client/nodes.py | 14 +- elasticsearch/_async/client/query_rules.py | 67 +++- elasticsearch/_async/client/rollup.py | 16 +- .../_async/client/search_application.py | 35 +- .../_async/client/searchable_snapshots.py | 8 +- elasticsearch/_async/client/security.py | 333 ++++++++++-------- elasticsearch/_async/client/slm.py | 18 +- elasticsearch/_async/client/snapshot.py | 100 +++++- elasticsearch/_async/client/sql.py | 18 +- elasticsearch/_async/client/ssl.py | 21 +- elasticsearch/_async/client/synonyms.py | 14 +- elasticsearch/_async/client/tasks.py | 6 +- elasticsearch/_async/client/text_structure.py | 4 +- elasticsearch/_async/client/transform.py | 22 +- elasticsearch/_async/client/watcher.py | 22 +- elasticsearch/_async/client/xpack.py | 8 +- elasticsearch/_sync/client/__init__.py | 155 +++++--- elasticsearch/_sync/client/async_search.py | 44 +-- elasticsearch/_sync/client/autoscaling.py | 8 +- elasticsearch/_sync/client/cat.py | 52 +-- elasticsearch/_sync/client/ccr.py | 26 +- elasticsearch/_sync/client/cluster.py | 40 +-- elasticsearch/_sync/client/connector.py | 130 ++++--- .../_sync/client/dangling_indices.py | 39 +- elasticsearch/_sync/client/enrich.py | 10 +- elasticsearch/_sync/client/eql.py | 8 +- elasticsearch/_sync/client/esql.py | 11 +- elasticsearch/_sync/client/features.py | 4 +- elasticsearch/_sync/client/fleet.py | 2 +- elasticsearch/_sync/client/graph.py | 2 +- elasticsearch/_sync/client/ilm.py | 28 +- elasticsearch/_sync/client/indices.py | 121 +++---- elasticsearch/_sync/client/inference.py | 8 +- elasticsearch/_sync/client/ingest.py | 18 +- elasticsearch/_sync/client/license.py | 14 +- elasticsearch/_sync/client/logstash.py | 6 +- elasticsearch/_sync/client/migration.py | 6 +- elasticsearch/_sync/client/ml.py | 146 ++++---- elasticsearch/_sync/client/monitoring.py | 2 +- elasticsearch/_sync/client/nodes.py | 14 +- elasticsearch/_sync/client/query_rules.py | 67 +++- elasticsearch/_sync/client/rollup.py | 16 +- .../_sync/client/search_application.py | 35 +- .../_sync/client/searchable_snapshots.py | 8 +- elasticsearch/_sync/client/security.py | 333 ++++++++++-------- elasticsearch/_sync/client/slm.py | 18 +- elasticsearch/_sync/client/snapshot.py | 100 +++++- elasticsearch/_sync/client/sql.py | 18 +- elasticsearch/_sync/client/ssl.py | 21 +- elasticsearch/_sync/client/synonyms.py | 14 +- elasticsearch/_sync/client/tasks.py | 6 +- elasticsearch/_sync/client/text_structure.py | 4 +- elasticsearch/_sync/client/transform.py | 22 +- elasticsearch/_sync/client/watcher.py | 22 +- elasticsearch/_sync/client/xpack.py | 8 +- 78 files changed, 1880 insertions(+), 1292 deletions(-) diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index c07e82039..771420ced 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -644,7 +644,7 @@ async def bulk( in a single API call. This reduces overhead and can greatly increase indexing speed. - ``_ + ``_ :param operations: :param index: Name of the data stream, index, or index alias to perform bulk @@ -743,7 +743,7 @@ async def clear_scroll( """ Clears the search context and results for a scrolling search. - ``_ + ``_ :param scroll_id: Scroll IDs to clear. To clear all scroll IDs, use `_all`. """ @@ -793,7 +793,7 @@ async def close_point_in_time( """ Closes a point-in-time. - ``_ + ``_ :param id: The ID of the point-in-time. """ @@ -867,7 +867,7 @@ async def count( """ Returns number of documents matching a query. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams and indices, omit this @@ -1002,7 +1002,7 @@ async def create( and makes it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. - ``_ + ``_ :param index: Name of the data stream or index to target. If the target doesn’t exist and matches the name or wildcard (`*`) pattern of an index template @@ -1106,7 +1106,7 @@ async def delete( """ Delete a document. Removes a JSON document from the specified index. - ``_ + ``_ :param index: Name of the target index. :param id: Unique identifier for the document. @@ -1228,7 +1228,7 @@ async def delete_by_query( """ Delete documents. Deletes documents that match the specified query. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this @@ -1404,9 +1404,12 @@ async def delete_by_query_rethrottle( requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ - Changes the number of requests per second for a particular Delete By Query operation. + Throttle a delete by query operation. Change the number of requests per second + for a particular delete by query operation. Rethrottling that speeds up the query + takes effect immediately but rethrotting that slows down the query takes effect + after completing the current batch to prevent scroll timeouts. - ``_ + ``_ :param task_id: The ID for the task. :param requests_per_second: The throttle for this request in sub-requests per @@ -1452,7 +1455,7 @@ async def delete_script( """ Delete a script or search template. Deletes a stored script or search template. - ``_ + ``_ :param id: Identifier for the stored script or search template. :param master_timeout: Period to wait for a connection to the master node. If @@ -1520,7 +1523,7 @@ async def exists( """ Check a document. Checks if a specified document exists. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases. Supports wildcards (`*`). @@ -1621,7 +1624,7 @@ async def exists_source( """ Check for a document source. Checks if a document's `_source` is stored. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases. Supports wildcards (`*`). @@ -1722,7 +1725,7 @@ async def explain( Explain a document match result. Returns information about why a specific document matches, or doesn’t match, a query. - ``_ + ``_ :param index: Index names used to limit the request. Only a single index name can be provided to this parameter. @@ -1844,7 +1847,7 @@ async def field_caps( like any other field. For example, a runtime field with a type of keyword is returned as any other field that belongs to the `keyword` family. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams @@ -1961,7 +1964,7 @@ async def get( Get a document by its ID. Retrieves the document with the specified ID from an index. - ``_ + ``_ :param index: Name of the index that contains the document. :param id: Unique identifier of the document. @@ -2050,7 +2053,7 @@ async def get_script( """ Get a script or search template. Retrieves a stored script or search template. - ``_ + ``_ :param id: Identifier for the stored script or search template. :param master_timeout: Specify timeout for connection to master @@ -2090,9 +2093,9 @@ async def get_script_context( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns all script contexts. + Get script contexts. Get a list of supported script contexts and their methods. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_script_context" @@ -2125,9 +2128,9 @@ async def get_script_languages( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns available script types, languages and contexts + Get script languages. Get a list of available script types, languages, and contexts. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_script_language" @@ -2182,7 +2185,7 @@ async def get_source( """ Get a document's source. Returns the source of a document. - ``_ + ``_ :param index: Name of the index that contains the document. :param id: Unique identifier of the document. @@ -2265,7 +2268,7 @@ async def health_report( """ Returns the health of the cluster. - ``_ + ``_ :param feature: A feature of the cluster, as returned by the top-level health report API. @@ -2342,7 +2345,7 @@ async def index( and makes it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. - ``_ + ``_ :param index: Name of the data stream or index to target. :param document: @@ -2451,7 +2454,7 @@ async def info( """ Get cluster info. Returns basic information about the cluster. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/" @@ -2507,7 +2510,7 @@ async def knn_search( """ Performs a kNN search. - ``_ + ``_ :param index: A comma-separated list of index names to search; use `_all` or to perform the operation on all indices @@ -2606,9 +2609,12 @@ async def mget( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to get multiple documents in one request. + Get multiple documents. Get multiple JSON documents by ID from one or more indices. + If you specify an index in the request URI, you only need to specify the document + IDs in the request body. To ensure fast responses, this multi get (mget) API + responds with partial results if one or more shards fail. - ``_ + ``_ :param index: Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. @@ -2727,9 +2733,15 @@ async def msearch( typed_keys: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to execute several search operations in one request. + Run multiple searches. The format of the request is similar to the bulk API format + and makes use of the newline delimited JSON (NDJSON) format. The structure is + as follows: ``` header\\n body\\n header\\n body\\n ``` This structure is specifically + optimized to reduce parsing if a specific search ends up redirected to another + node. IMPORTANT: The final line of data must end with a newline character `\\n`. + Each newline character may be preceded by a carriage return `\\r`. When sending + requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. - ``_ + ``_ :param searches: :param index: Comma-separated list of data streams, indices, and index aliases @@ -2861,7 +2873,7 @@ async def msearch_template( """ Runs multiple templated searches with a single request. - ``_ + ``_ :param search_templates: :param index: Comma-separated list of data streams, indices, and aliases to search. @@ -2954,9 +2966,13 @@ async def mtermvectors( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns multiple termvectors in one request. + Get multiple term vectors. You can specify existing documents by index and ID + or provide artificial documents in the body of the request. You can specify the + index in the request body or request URI. The response contains a `docs` array + with all the fetched termvectors. Each element has the structure provided by + the termvectors API. - ``_ + ``_ :param index: Name of the index that contains the documents. :param docs: Array of existing or artificial documents. @@ -3073,7 +3089,7 @@ async def open_point_in_time( then the results of those requests might not be consistent as changes happening between searches are only visible to the more recent point in time. - ``_ + ``_ :param index: A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices @@ -3155,7 +3171,7 @@ async def put_script( Create or update a script or search template. Creates or updates a stored script or search template. - ``_ + ``_ :param id: Identifier for the stored script or search template. Must be unique within the cluster. @@ -3241,7 +3257,7 @@ async def rank_eval( Enables you to evaluate the quality of ranked search results over a set of typical search queries. - ``_ + ``_ :param requests: A set of typical search requests, together with their provided ratings. @@ -3337,7 +3353,7 @@ async def reindex( can be any existing index, alias, or data stream. The destination must differ from the source. For example, you cannot reindex a data stream into itself. - ``_ + ``_ :param dest: The destination you are copying to. :param source: The source you are copying from. @@ -3431,9 +3447,10 @@ async def reindex_rethrottle( requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ - Copies documents from a source to a destination. + Throttle a reindex operation. Change the number of requests per second for a + particular reindex operation. - ``_ + ``_ :param task_id: Identifier for the task. :param requests_per_second: The throttle for this request in sub-requests per @@ -3484,7 +3501,7 @@ async def render_search_template( """ Renders a search template as a search request body. - ``_ + ``_ :param id: ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. @@ -3552,7 +3569,7 @@ async def scripts_painless_execute( """ Run a script. Runs a script and returns a result. - ``_ + ``_ :param context: The context that the script should run in. :param context_setup: Additional parameters for the `context`. @@ -3608,9 +3625,24 @@ async def scroll( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to retrieve a large numbers of results from a single search request. - - ``_ + Run a scrolling search. IMPORTANT: The scroll API is no longer recommend for + deep pagination. If you need to preserve the index state while paging through + more than 10,000 hits, use the `search_after` parameter with a point in time + (PIT). The scroll API gets large sets of results from a single scrolling search + request. To get the necessary scroll ID, submit a search API request that includes + an argument for the `scroll` query parameter. The `scroll` parameter indicates + how long Elasticsearch should retain the search context for the request. The + search response returns a scroll ID in the `_scroll_id` response body parameter. + You can then use the scroll ID with the scroll API to retrieve the next batch + of results for the request. If the Elasticsearch security features are enabled, + the access to the results of a specific scroll ID is restricted to the user or + API key that submitted the search. You can also use the scroll API to specify + a new scroll parameter that extends or shortens the retention period for the + search context. IMPORTANT: Results from a scrolling search reflect the state + of the index at the time of the initial search request. Subsequent indexing or + document changes only affect later search and scroll requests. + + ``_ :param scroll_id: Scroll ID of the search. :param rest_total_hits_as_int: If true, the API response’s hit.total property @@ -3802,7 +3834,7 @@ async def search( search queries using the `q` query string parameter or the request body. If both are specified, only the query parameter is used. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams and indices, omit this @@ -4232,7 +4264,7 @@ async def search_mvt( """ Search a vector tile. Searches a vector tile for geospatial values. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, or aliases to search :param field: Field containing geospatial data to return @@ -4387,7 +4419,7 @@ async def search_shards( Returns information about the indices and shards that a search request would be executed against. - ``_ + ``_ :param index: Returns the indices and shards that a search request would be executed against. @@ -4488,7 +4520,7 @@ async def search_template( """ Runs a search with a search template. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (*). @@ -4618,11 +4650,17 @@ async def terms_enum( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - The terms enum API can be used to discover terms in the index that begin with - the provided string. It is designed for low-latency look-ups used in auto-complete - scenarios. - - ``_ + Get terms in an index. Discover terms that match a partial string in an index. + This "terms enum" API is designed for low-latency look-ups used in auto-complete + scenarios. If the `complete` property in the response is false, the returned + terms set may be incomplete and should be treated as approximate. This can occur + due to a few reasons, such as a request timeout or a node error. NOTE: The terms + enum API may return terms from deleted documents. Deleted documents are initially + only marked as deleted. It is not until their segments are merged that documents + are actually deleted. Until that happens, the terms enum API will return terms + from these documents. + + ``_ :param index: Comma-separated list of data streams, indices, and index aliases to search. Wildcard (*) expressions are supported. @@ -4721,7 +4759,7 @@ async def termvectors( Get term vector information. Returns information and statistics about terms in the fields of a particular document. - ``_ + ``_ :param index: Name of the index that contains the document. :param id: Unique identifier of the document. @@ -4864,7 +4902,7 @@ async def update( Update a document. Updates a document by running a script or passing a partial document. - ``_ + ``_ :param index: The name of the index :param id: Document ID @@ -5030,7 +5068,7 @@ async def update_by_query( is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this @@ -5224,9 +5262,12 @@ async def update_by_query_rethrottle( requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ - Changes the number of requests per second for a particular Update By Query operation. + Throttle an update by query operation. Change the number of requests per second + for a particular update by query operation. Rethrottling that speeds up the query + takes effect immediately but rethrotting that slows down the query takes effect + after completing the current batch to prevent scroll timeouts. - ``_ + ``_ :param task_id: The ID for the task. :param requests_per_second: The throttle for this request in sub-requests per diff --git a/elasticsearch/_async/client/async_search.py b/elasticsearch/_async/client/async_search.py index 74bfdc0fc..d5f4e78b8 100644 --- a/elasticsearch/_async/client/async_search.py +++ b/elasticsearch/_async/client/async_search.py @@ -36,13 +36,13 @@ async def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an async search by identifier. If the search is still running, the search - request will be cancelled. Otherwise, the saved search results are deleted. If - the Elasticsearch security features are enabled, the deletion of a specific async - search is restricted to: the authenticated user that submitted the original search - request; users that have the `cancel_task` cluster privilege. + Delete an async search. If the asynchronous search is still running, it is cancelled. + Otherwise, the saved search results are deleted. If the Elasticsearch security + features are enabled, the deletion of a specific async search is restricted to: + the authenticated user that submitted the original search request; users that + have the `cancel_task` cluster privilege. - ``_ + ``_ :param id: A unique identifier for the async search. """ @@ -85,12 +85,12 @@ async def get( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the results of a previously submitted async search request given its - identifier. If the Elasticsearch security features are enabled, access to the - results of a specific async search is restricted to the user or API key that + Get async search results. Retrieve the results of a previously submitted asynchronous + search request. If the Elasticsearch security features are enabled, access to + the results of a specific async search is restricted to the user or API key that submitted it. - ``_ + ``_ :param id: A unique identifier for the async search. :param keep_alive: Specifies how long the async search should be available in @@ -148,12 +148,12 @@ async def status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get async search status Retrieves the status of a previously submitted async + Get async search status. Retrieve the status of a previously submitted async search request given its identifier, without retrieving search results. If the Elasticsearch security features are enabled, use of this API is restricted to the `monitoring_user` role. - ``_ + ``_ :param id: A unique identifier for the async search. """ @@ -323,17 +323,17 @@ async def submit( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Runs a search request asynchronously. When the primary sort of the results is - an indexed field, shards get sorted based on minimum and maximum value that they - hold for that field, hence partial results become available following the sort - criteria that was requested. Warning: Async search does not support scroll nor - search requests that only include the suggest section. By default, Elasticsearch - doesn’t allow you to store an async search response larger than 10Mb and an attempt - to do this results in an error. The maximum allowed size for a stored async search - response can be set by changing the `search.max_async_search_response_size` cluster - level setting. + Run an async search. When the primary sort of the results is an indexed field, + shards get sorted based on minimum and maximum value that they hold for that + field. Partial results become available following the sort criteria that was + requested. Warning: Asynchronous search does not support scroll or search requests + that include only the suggest section. By default, Elasticsearch does not allow + you to store an async search response larger than 10Mb and an attempt to do this + results in an error. The maximum allowed size for a stored async search response + can be set by changing the `search.max_async_search_response_size` cluster level + setting. - ``_ + ``_ :param index: A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices diff --git a/elasticsearch/_async/client/autoscaling.py b/elasticsearch/_async/client/autoscaling.py index b558e94a5..f4bdd444e 100644 --- a/elasticsearch/_async/client/autoscaling.py +++ b/elasticsearch/_async/client/autoscaling.py @@ -39,7 +39,7 @@ async def delete_autoscaling_policy( Deletes an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. - ``_ + ``_ :param name: the name of the autoscaling policy """ @@ -79,7 +79,7 @@ async def get_autoscaling_capacity( Gets the current autoscaling capacity based on the configured autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_autoscaling/capacity" @@ -116,7 +116,7 @@ async def get_autoscaling_policy( Retrieves an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. - ``_ + ``_ :param name: the name of the autoscaling policy """ @@ -161,7 +161,7 @@ async def put_autoscaling_policy( Creates a new autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. - ``_ + ``_ :param name: the name of the autoscaling policy :param policy: diff --git a/elasticsearch/_async/client/cat.py b/elasticsearch/_async/client/cat.py index 83a393e7a..2c8e8e055 100644 --- a/elasticsearch/_async/client/cat.py +++ b/elasticsearch/_async/client/cat.py @@ -57,7 +57,7 @@ async def aliases( not intended for use by applications. For application consumption, use the aliases API. - ``_ + ``_ :param name: A comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. @@ -145,7 +145,7 @@ async def allocation( disk space. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. - ``_ + ``_ :param node_id: Comma-separated list of node identifiers or names used to limit the returned information. @@ -232,7 +232,7 @@ async def component_templates( for use by applications. For application consumption, use the get component template API. - ``_ + ``_ :param name: The name of the component template. Accepts wildcard expressions. If omitted, all component templates are returned. @@ -316,7 +316,7 @@ async def count( console. They are not intended for use by applications. For application consumption, use the count API. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -402,7 +402,7 @@ async def fielddata( using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes stats API. - ``_ + ``_ :param fields: Comma-separated list of fields used to limit returned information. To retrieve all fields, omit this parameter. @@ -497,7 +497,7 @@ async def health( across multiple nodes. You also can use the API to track the recovery of a large cluster over a longer period of time. - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -574,7 +574,7 @@ async def help( """ Get CAT help. Returns help for the CAT APIs. - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -672,7 +672,7 @@ async def indices( using the command line or Kibana console. They are not intended for use by applications. For application consumption, use an index endpoint. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -774,7 +774,7 @@ async def master( command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -949,7 +949,7 @@ async def ml_data_frame_analytics( For application consumption, use the get data frame analytics jobs statistics API. - ``_ + ``_ :param id: The ID of the data frame analytics to fetch :param allow_no_match: Whether to ignore if a wildcard expression matches no @@ -1123,7 +1123,7 @@ async def ml_datafeeds( using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get datafeed statistics API. - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. @@ -1496,7 +1496,7 @@ async def ml_jobs( for use by applications. For application consumption, use the get anomaly detection job statistics API. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param allow_no_match: Specifies what to do when the request: * Contains wildcard @@ -1685,7 +1685,7 @@ async def ml_trained_models( console or command line. They are not intended for use by applications. For application consumption, use the get trained models statistics API. - ``_ + ``_ :param model_id: A unique identifier for the trained model. :param allow_no_match: Specifies what to do when the request: contains wildcard @@ -1782,7 +1782,7 @@ async def nodeattrs( are not intended for use by applications. For application consumption, use the nodes info API. - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -1861,7 +1861,7 @@ async def nodes( are not intended for use by applications. For application consumption, use the nodes info API. - ``_ + ``_ :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set @@ -1946,7 +1946,7 @@ async def pending_tasks( console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API. - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -2020,7 +2020,7 @@ async def plugins( They are not intended for use by applications. For application consumption, use the nodes info API. - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -2104,7 +2104,7 @@ async def recovery( line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API. - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2196,7 +2196,7 @@ async def repositories( are not intended for use by applications. For application consumption, use the get snapshot repository API. - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -2275,7 +2275,7 @@ async def segments( console. They are not intended for use by applications. For application consumption, use the index segments API. - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2364,7 +2364,7 @@ async def shards( for human consumption using the command line or Kibana console. They are not intended for use by applications. - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2452,7 +2452,7 @@ async def snapshots( console. They are not intended for use by applications. For application consumption, use the get snapshot API. - ``_ + ``_ :param repository: A comma-separated list of snapshot repositories used to limit the request. Accepts wildcard expressions. `_all` returns all repositories. @@ -2542,7 +2542,7 @@ async def tasks( console. They are not intended for use by applications. For application consumption, use the task management API. - ``_ + ``_ :param actions: The task action names, which are used to limit the response. :param detailed: If `true`, the response includes detailed information about @@ -2632,7 +2632,7 @@ async def templates( console. They are not intended for use by applications. For application consumption, use the get index template API. - ``_ + ``_ :param name: The name of the template to return. Accepts wildcard expressions. If omitted, all templates are returned. @@ -2718,7 +2718,7 @@ async def thread_pool( They are not intended for use by applications. For application consumption, use the nodes info API. - ``_ + ``_ :param thread_pool_patterns: A comma-separated list of thread pool names used to limit the request. Accepts wildcard expressions. @@ -2973,7 +2973,7 @@ async def transforms( command line. They are not intended for use by applications. For application consumption, use the get transform statistics API. - ``_ + ``_ :param transform_id: A transform identifier or a wildcard expression. If you do not specify one of these options, the API returns information for all diff --git a/elasticsearch/_async/client/ccr.py b/elasticsearch/_async/client/ccr.py index c4f7b0c05..0cd6c7c4c 100644 --- a/elasticsearch/_async/client/ccr.py +++ b/elasticsearch/_async/client/ccr.py @@ -38,7 +38,7 @@ async def delete_auto_follow_pattern( """ Deletes auto-follow patterns. - ``_ + ``_ :param name: The name of the auto follow pattern. """ @@ -109,7 +109,7 @@ async def follow( """ Creates a new follower index configured to follow the referenced leader index. - ``_ + ``_ :param index: The name of the follower index :param leader_index: @@ -201,7 +201,7 @@ async def follow_info( Retrieves information about all follower indices, including parameters and status for each follower index - ``_ + ``_ :param index: A comma-separated list of index patterns; use `_all` to perform the operation on all indices @@ -243,7 +243,7 @@ async def follow_stats( Retrieves follower stats. return shard-level stats about the following tasks associated with each shard for the specified indices. - ``_ + ``_ :param index: A comma-separated list of index patterns; use `_all` to perform the operation on all indices @@ -296,7 +296,7 @@ async def forget_follower( """ Removes the follower retention leases from the leader. - ``_ + ``_ :param index: the name of the leader index for which specified follower retention leases should be removed @@ -353,7 +353,7 @@ async def get_auto_follow_pattern( Gets configured auto-follow patterns. Returns the specified auto-follow pattern collection. - ``_ + ``_ :param name: Specifies the auto-follow pattern collection that you want to retrieve. If you do not specify a name, the API returns information for all collections. @@ -397,7 +397,7 @@ async def pause_auto_follow_pattern( """ Pauses an auto-follow pattern - ``_ + ``_ :param name: The name of the auto follow pattern that should pause discovering new indices to follow. @@ -439,7 +439,7 @@ async def pause_follow( Pauses a follower index. The follower index will not fetch any additional operations from the leader index. - ``_ + ``_ :param index: The name of the follower index that should pause following its leader index. @@ -516,7 +516,7 @@ async def put_auto_follow_pattern( cluster. Newly created indices on the remote cluster matching any of the specified patterns will be automatically configured as follower indices. - ``_ + ``_ :param name: The name of the collection of auto-follow patterns. :param remote_cluster: The remote cluster containing the leader indices to match @@ -640,7 +640,7 @@ async def resume_auto_follow_pattern( """ Resumes an auto-follow pattern that has been paused - ``_ + ``_ :param name: The name of the auto follow pattern to resume discovering new indices to follow. @@ -705,7 +705,7 @@ async def resume_follow( """ Resumes a follower index that has been paused - ``_ + ``_ :param index: The name of the follow index to resume following. :param max_outstanding_read_requests: @@ -787,7 +787,7 @@ async def stats( """ Gets all stats related to cross-cluster replication. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ccr/stats" @@ -824,7 +824,7 @@ async def unfollow( Stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. - ``_ + ``_ :param index: The name of the follower index that should be turned into a regular index. diff --git a/elasticsearch/_async/client/cluster.py b/elasticsearch/_async/client/cluster.py index dfd44b588..c3f5ec8dc 100644 --- a/elasticsearch/_async/client/cluster.py +++ b/elasticsearch/_async/client/cluster.py @@ -46,7 +46,7 @@ async def allocation_explain( """ Provides explanations for shard allocations in the cluster. - ``_ + ``_ :param current_node: Specifies the node ID or the name of the node to only explain a shard that is currently located on the specified node. @@ -117,7 +117,7 @@ async def delete_component_template( are building blocks for constructing index templates that specify index mappings, settings, and aliases. - ``_ + ``_ :param name: Comma-separated list or wildcard expression of component template names used to limit the request. @@ -167,7 +167,7 @@ async def delete_voting_config_exclusions( """ Clears cluster voting config exclusions. - ``_ + ``_ :param wait_for_removal: Specifies whether to wait for all excluded nodes to be removed from the cluster before clearing the voting configuration exclusions @@ -215,7 +215,7 @@ async def exists_component_template( Check component templates. Returns information about whether a particular component template exists. - ``_ + ``_ :param name: Comma-separated list of component template names used to limit the request. Wildcard (*) expressions are supported. @@ -270,7 +270,7 @@ async def get_component_template( """ Get component templates. Retrieves information about component templates. - ``_ + ``_ :param name: Comma-separated list of component template names used to limit the request. Wildcard (`*`) expressions are supported. @@ -334,7 +334,7 @@ async def get_settings( Returns cluster-wide settings. By default, it returns only settings that have been explicitly defined. - ``_ + ``_ :param flat_settings: If `true`, returns settings in flat format. :param include_defaults: If `true`, returns default cluster settings from the @@ -424,7 +424,7 @@ async def health( by the worst shard status. The cluster status is controlled by the worst index status. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target @@ -528,7 +528,7 @@ async def info( """ Get cluster info. Returns basic information about the cluster. - ``_ + ``_ :param target: Limits the information returned to the specific target. Supports a comma-separated list, such as http,ingest. @@ -577,7 +577,7 @@ async def pending_tasks( update, the activity of this task might be reported by both task api and pending cluster tasks API. - ``_ + ``_ :param local: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. @@ -625,7 +625,7 @@ async def post_voting_config_exclusions( """ Updates the cluster voting config exclusions by node ids or node names. - ``_ + ``_ :param node_ids: A comma-separated list of the persistent ids of the nodes to exclude from the voting configuration. If specified, you may not also specify @@ -700,7 +700,7 @@ async def put_component_template( You can include comments anywhere in the request body except before the opening curly bracket. - ``_ + ``_ :param name: Name of the component template to create. Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; @@ -789,7 +789,7 @@ async def put_settings( """ Updates the cluster settings. - ``_ + ``_ :param flat_settings: Return settings in flat format (default: false) :param master_timeout: Explicit operation timeout for connection to master node @@ -845,7 +845,7 @@ async def remote_info( cluster information. It returns connection and endpoint information keyed by the configured remote cluster alias. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_remote/info" @@ -890,7 +890,7 @@ async def reroute( """ Allows to manually change the allocation of individual shards in the cluster. - ``_ + ``_ :param commands: Defines the commands to perform. :param dry_run: If true, then the request simulates the operation only and returns @@ -977,7 +977,7 @@ async def state( """ Returns a comprehensive information about the state of the cluster. - ``_ + ``_ :param metric: Limit the information returned to the specified metrics :param index: A comma-separated list of index names; use `_all` or empty string @@ -1053,8 +1053,8 @@ async def stats( node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, - flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, + include_remotes: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: @@ -1063,11 +1063,11 @@ async def stats( size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). - ``_ + ``_ :param node_id: Comma-separated list of node filters used to limit returned information. Defaults to all nodes in the cluster. - :param flat_settings: If `true`, returns settings in flat format. + :param include_remotes: Include remote cluster data into the response :param timeout: Period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its stats. However, timed out nodes are included in the response’s `_nodes.failed` property. @@ -1085,10 +1085,10 @@ async def stats( __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path - if flat_settings is not None: - __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human + if include_remotes is not None: + __query["include_remotes"] = include_remotes if pretty is not None: __query["pretty"] = pretty if timeout is not None: diff --git a/elasticsearch/_async/client/connector.py b/elasticsearch/_async/client/connector.py index 8f3fe042d..ac2f1de31 100644 --- a/elasticsearch/_async/client/connector.py +++ b/elasticsearch/_async/client/connector.py @@ -36,9 +36,10 @@ async def check_in( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the last_seen field in the connector, and sets it to current timestamp + Check in a connector. Update the `last_seen` field in the connector and set it + to the current timestamp. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be checked in """ @@ -77,9 +78,12 @@ async def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a connector. + Delete a connector. Removes a connector and associated sync jobs. This is a destructive + action that is not recoverable. NOTE: This action doesn’t delete any API keys, + ingest pipelines, or data indices associated with the connector. These need to + be removed manually. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be deleted :param delete_sync_jobs: A flag indicating if associated sync jobs should be @@ -121,9 +125,9 @@ async def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a connector. + Get a connector. Get the details about a connector. - ``_ + ``_ :param connector_id: The unique identifier of the connector """ @@ -215,9 +219,10 @@ async def last_sync( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates last sync stats in the connector document + Update the connector last sync stats. Update the fields related to the last sync + of a connector. This action is used for analytics and monitoring. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param last_access_control_sync_error: @@ -309,9 +314,9 @@ async def list( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns existing connectors. + Get all connectors. Get information about all connectors. - ``_ + ``_ :param connector_name: A comma-separated list of connector names to fetch connector documents for @@ -383,9 +388,13 @@ async def post( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a connector. + Create a connector. Connectors are Elasticsearch integrations that bring content + from third-party data sources, which can be deployed on Elastic Cloud or hosted + on your own infrastructure. Elastic managed connectors (Native connectors) are + a managed service on Elastic Cloud. Self-managed connectors (Connector clients) + are self-managed on your infrastructure. - ``_ + ``_ :param description: :param index_name: @@ -461,9 +470,9 @@ async def put( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a connector. + Create or update a connector. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. @@ -530,9 +539,12 @@ async def sync_job_cancel( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Cancels a connector sync job. + Cancel a connector sync job. Cancel a connector sync job, which sets the status + to cancelling and updates `cancellation_requested_at` to the current time. The + connector service is then responsible for setting the status of connector sync + jobs to cancelled. - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job """ @@ -574,9 +586,10 @@ async def sync_job_delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a connector sync job. + Delete a connector sync job. Remove a connector sync job and its associated data. + This is a destructive action that is not recoverable. - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job to be deleted @@ -617,9 +630,9 @@ async def sync_job_get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a connector sync job. + Get a connector sync job. - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job """ @@ -685,9 +698,10 @@ async def sync_job_list( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Lists connector sync jobs. + Get all connector sync jobs. Get information about all stored connector sync + jobs listed by their creation date in ascending order. - ``_ + ``_ :param connector_id: A connector id to fetch connector sync jobs for :param from_: Starting offset (default: 0) @@ -746,9 +760,10 @@ async def sync_job_post( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a connector sync job. + Create a connector sync job. Create a connector sync job document in the internal + index and initialize its counters and timestamps with default values. - ``_ + ``_ :param id: The id of the associated connector :param job_type: @@ -797,9 +812,10 @@ async def update_active_filtering( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Activates the valid draft filtering for a connector. + Activate the connector draft filter. Activates the valid draft filtering for + a connector. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated """ @@ -842,9 +858,13 @@ async def update_api_key_id( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the API key id in the connector document + Update the connector API key ID. Update the `api_key_id` and `api_key_secret_id` + fields of a connector. You can specify the ID of the API key used for authorization + and the ID of the connector secret where the API key is stored. The connector + secret ID is required only for Elastic managed (native) connectors. Self-managed + connectors (connector clients) do not use this field. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param api_key_id: @@ -896,9 +916,10 @@ async def update_configuration( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the configuration field in the connector document + Update the connector configuration. Update the configuration field in the connector + document. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param configuration: @@ -949,9 +970,12 @@ async def update_error( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the filtering field in the connector document + Update the connector error field. Set the error field for the connector. If the + error provided in the request body is non-null, the connector’s status is updated + to error. Otherwise, if the error is reset to null, the connector status is updated + to connected. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param error: @@ -1003,9 +1027,12 @@ async def update_filtering( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the filtering field in the connector document + Update the connector filtering. Update the draft filtering configuration of a + connector and marks the draft validation state as edited. The filtering draft + is activated once validated by the running Elastic connector service. The filtering + property is used to configure sync rules (both basic and advanced) for a connector. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param advanced_snippet: @@ -1059,9 +1086,10 @@ async def update_filtering_validation( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the draft filtering validation info for a connector. + Update the connector draft filtering validation. Update the draft filtering validation + info for a connector. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param validation: @@ -1111,9 +1139,10 @@ async def update_index_name( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the index_name in the connector document + Update the connector index name. Update the `index_name` field of a connector, + specifying the index where the data ingested by the connector is stored. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param index_name: @@ -1164,9 +1193,9 @@ async def update_name( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the name and description fields in the connector document + Update the connector name and description. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param description: @@ -1217,9 +1246,9 @@ async def update_native( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the is_native flag in the connector document + Update the connector is_native flag. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param is_native: @@ -1269,9 +1298,10 @@ async def update_pipeline( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the pipeline field in the connector document + Update the connector pipeline. When you create a new connector, the configuration + of an ingest pipeline is populated with default settings. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param pipeline: @@ -1321,9 +1351,9 @@ async def update_scheduling( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the scheduling field in the connector document + Update the connector scheduling. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param scheduling: @@ -1373,9 +1403,9 @@ async def update_service_type( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the service type of the connector + Update the connector service type. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param service_type: @@ -1432,9 +1462,9 @@ async def update_status( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the status of the connector + Update the connector status. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param status: diff --git a/elasticsearch/_async/client/dangling_indices.py b/elasticsearch/_async/client/dangling_indices.py index b65002f44..e5b23d720 100644 --- a/elasticsearch/_async/client/dangling_indices.py +++ b/elasticsearch/_async/client/dangling_indices.py @@ -39,13 +39,17 @@ async def delete_dangling_index( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes the specified dangling index + Delete a dangling index. If Elasticsearch encounters index data that is absent + from the current cluster state, those indices are considered to be dangling. + For example, this can happen if you delete more than `cluster.indices.tombstones.size` + indices while an Elasticsearch node is offline. - ``_ + ``_ - :param index_uuid: The UUID of the dangling index - :param accept_data_loss: Must be set to true in order to delete the dangling - index + :param index_uuid: The UUID of the index to delete. Use the get dangling indices + API to find the UUID. + :param accept_data_loss: This parameter must be set to true to acknowledge that + it will no longer be possible to recove data from the dangling index. :param master_timeout: Specify timeout for connection to master :param timeout: Explicit operation timeout """ @@ -94,13 +98,20 @@ async def import_dangling_index( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Imports the specified dangling index + Import a dangling index. If Elasticsearch encounters index data that is absent + from the current cluster state, those indices are considered to be dangling. + For example, this can happen if you delete more than `cluster.indices.tombstones.size` + indices while an Elasticsearch node is offline. - ``_ + ``_ - :param index_uuid: The UUID of the dangling index - :param accept_data_loss: Must be set to true in order to import the dangling - index + :param index_uuid: The UUID of the index to import. Use the get dangling indices + API to locate the UUID. + :param accept_data_loss: This parameter must be set to true to import a dangling + index. Because Elasticsearch cannot know where the dangling index data came + from or determine which shard copies are fresh and which are stale, it cannot + guarantee that the imported data represents the latest state of the index + when it was last in the cluster. :param master_timeout: Specify timeout for connection to master :param timeout: Explicit operation timeout """ @@ -145,9 +156,13 @@ async def list_dangling_indices( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns all dangling indices. + Get the dangling indices. If Elasticsearch encounters index data that is absent + from the current cluster state, those indices are considered to be dangling. + For example, this can happen if you delete more than `cluster.indices.tombstones.size` + indices while an Elasticsearch node is offline. Use this API to list dangling + indices, which you can then import or delete. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_dangling" diff --git a/elasticsearch/_async/client/enrich.py b/elasticsearch/_async/client/enrich.py index 6663826b9..6cb22bf26 100644 --- a/elasticsearch/_async/client/enrich.py +++ b/elasticsearch/_async/client/enrich.py @@ -38,7 +38,7 @@ async def delete_policy( """ Delete an enrich policy. Deletes an existing enrich policy and its enrich index. - ``_ + ``_ :param name: Enrich policy to delete. """ @@ -79,7 +79,7 @@ async def execute_policy( """ Creates the enrich index for an existing enrich policy. - ``_ + ``_ :param name: Enrich policy to execute. :param wait_for_completion: If `true`, the request blocks other enrich policy @@ -123,7 +123,7 @@ async def get_policy( """ Get an enrich policy. Returns information about an enrich policy. - ``_ + ``_ :param name: Comma-separated list of enrich policy names used to limit the request. To return information for all enrich policies, omit this parameter. @@ -173,7 +173,7 @@ async def put_policy( """ Create an enrich policy. Creates an enrich policy. - ``_ + ``_ :param name: Name of the enrich policy to create or update. :param geo_match: Matches enrich data to incoming documents based on a `geo_shape` @@ -227,7 +227,7 @@ async def stats( Get enrich stats. Returns enrich coordinator statistics and information about enrich policies that are currently executing. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_enrich/_stats" diff --git a/elasticsearch/_async/client/eql.py b/elasticsearch/_async/client/eql.py index 3944130a1..e835620fd 100644 --- a/elasticsearch/_async/client/eql.py +++ b/elasticsearch/_async/client/eql.py @@ -39,7 +39,7 @@ async def delete( Deletes an async EQL search or a stored synchronous EQL search. The API also deletes results for the search. - ``_ + ``_ :param id: Identifier for the search to delete. A search ID is provided in the EQL search API's response for an async search. A search ID is also provided @@ -86,7 +86,7 @@ async def get( Returns the current status and available results for an async EQL search or a stored synchronous EQL search. - ``_ + ``_ :param id: Identifier for the search. :param keep_alive: Period for which the search and its results are stored on @@ -137,7 +137,7 @@ async def get_status( Returns the current status for an async EQL search or a stored synchronous EQL search without returning results. - ``_ + ``_ :param id: Identifier for the search. """ @@ -225,7 +225,7 @@ async def search( """ Returns results matching a query expressed in Event Query Language (EQL) - ``_ + ``_ :param index: The name of the index to scope the operation :param query: EQL query you wish to run. diff --git a/elasticsearch/_async/client/esql.py b/elasticsearch/_async/client/esql.py index d0849643e..68eb37243 100644 --- a/elasticsearch/_async/client/esql.py +++ b/elasticsearch/_async/client/esql.py @@ -47,7 +47,14 @@ async def query( error_trace: t.Optional[bool] = None, filter: t.Optional[t.Mapping[str, t.Any]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, - format: t.Optional[str] = None, + format: t.Optional[ + t.Union[ + str, + t.Literal[ + "arrow", "cbor", "csv", "json", "smile", "tsv", "txt", "yaml" + ], + ] + ] = None, human: t.Optional[bool] = None, locale: t.Optional[str] = None, params: t.Optional[ @@ -63,7 +70,7 @@ async def query( """ Executes an ES|QL request - ``_ + ``_ :param query: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. diff --git a/elasticsearch/_async/client/features.py b/elasticsearch/_async/client/features.py index a6fbf4d14..62e730285 100644 --- a/elasticsearch/_async/client/features.py +++ b/elasticsearch/_async/client/features.py @@ -38,7 +38,7 @@ async def get_features( Gets a list of features which can be included in snapshots using the feature_states field when creating a snapshot - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_features" @@ -73,7 +73,7 @@ async def reset_features( """ Resets the internal state of features, usually by deleting system indices - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_features/_reset" diff --git a/elasticsearch/_async/client/fleet.py b/elasticsearch/_async/client/fleet.py index e34aa0fd9..a2ca88f00 100644 --- a/elasticsearch/_async/client/fleet.py +++ b/elasticsearch/_async/client/fleet.py @@ -43,7 +43,7 @@ async def global_checkpoints( Returns the current global checkpoints for an index. This API is design for internal use by the fleet server project. - ``_ + ``_ :param index: A single index or index alias that resolves to a single index. :param checkpoints: A comma separated list of previous global checkpoints. When diff --git a/elasticsearch/_async/client/graph.py b/elasticsearch/_async/client/graph.py index 963428a45..2ce75e7f2 100644 --- a/elasticsearch/_async/client/graph.py +++ b/elasticsearch/_async/client/graph.py @@ -48,7 +48,7 @@ async def explore( Extracts and summarizes information about the documents and terms in an Elasticsearch data stream or index. - ``_ + ``_ :param index: Name of the index. :param connections: Specifies or more fields from which you want to extract terms diff --git a/elasticsearch/_async/client/ilm.py b/elasticsearch/_async/client/ilm.py index 9a644f797..66df40190 100644 --- a/elasticsearch/_async/client/ilm.py +++ b/elasticsearch/_async/client/ilm.py @@ -42,7 +42,7 @@ async def delete_lifecycle( that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. - ``_ + ``_ :param name: Identifier for the policy. :param master_timeout: Period to wait for a connection to the master node. If @@ -97,7 +97,7 @@ async def explain_lifecycle( currently executing phase, action, and step. Shows when the index entered each one, the definition of the running phase, and information about any failures. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to target. Supports wildcards (`*`). To target all data streams and indices, use `*` @@ -159,7 +159,7 @@ async def get_lifecycle( """ Retrieves a lifecycle policy. - ``_ + ``_ :param name: Identifier for the policy. :param master_timeout: Period to wait for a connection to the master node. If @@ -210,7 +210,7 @@ async def get_status( """ Retrieves the current index lifecycle management (ILM) status. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ilm/status" @@ -254,7 +254,7 @@ async def migrate_to_data_tiers( data tiers, and optionally deletes one legacy index template.+ Using node roles enables ILM to automatically move the indices between data tiers. - ``_ + ``_ :param dry_run: If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. This provides @@ -304,17 +304,17 @@ async def move_to_step( *, index: str, current_step: t.Optional[t.Mapping[str, t.Any]] = None, + next_step: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, - next_step: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ Manually moves an index into the specified step and executes that step. - ``_ + ``_ :param index: The name of the index whose lifecycle step is to change :param current_step: @@ -322,6 +322,10 @@ async def move_to_step( """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") + if current_step is None and body is None: + raise ValueError("Empty value passed for parameter 'current_step'") + if next_step is None and body is None: + raise ValueError("Empty value passed for parameter 'next_step'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/_ilm/move/{__path_parts["index"]}' __query: t.Dict[str, t.Any] = {} @@ -374,7 +378,7 @@ async def put_lifecycle( Creates a lifecycle policy. If the specified policy exists, the policy is replaced and the policy version is incremented. - ``_ + ``_ :param name: Identifier for the policy. :param master_timeout: Period to wait for a connection to the master node. If @@ -433,7 +437,7 @@ async def remove_policy( """ Removes the assigned lifecycle policy and stops managing the specified index - ``_ + ``_ :param index: The name of the index to remove policy on """ @@ -473,7 +477,7 @@ async def retry( """ Retries executing the policy for an index that is in the ERROR step. - ``_ + ``_ :param index: The name of the indices (comma-separated) whose failed lifecycle step is to be retry @@ -515,7 +519,7 @@ async def start( """ Start the index lifecycle management (ILM) plugin. - ``_ + ``_ :param master_timeout: :param timeout: @@ -560,7 +564,7 @@ async def stop( Halts all lifecycle management operations and stops the index lifecycle management (ILM) plugin - ``_ + ``_ :param master_timeout: :param timeout: diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index f60e358c9..5c8c337d1 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -52,7 +52,7 @@ async def add_block( Add an index block. Limits the operations allowed on an index by blocking specific operation types. - ``_ + ``_ :param index: A comma separated list of indices to add a block to :param block: The block to add (one of read, write, read_only or metadata) @@ -137,9 +137,10 @@ async def analyze( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs analysis on a text string and returns the resulting tokens. + Get tokens from text analysis. The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) + on a text string and returns the resulting tokens. - ``_ + ``_ :param index: Index used to derive the analyzer. If specified, the `analyzer` or field parameter overrides this value. If no index is specified or the @@ -241,7 +242,7 @@ async def clear_cache( Clears the caches of one or more indices. For data streams, the API clears the caches of the stream’s backing indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -326,7 +327,7 @@ async def clone( """ Clones an existing index. - ``_ + ``_ :param index: Name of the source index to clone. :param target: Name of the target index to create. @@ -414,7 +415,7 @@ async def close( """ Closes an index. - ``_ + ``_ :param index: Comma-separated list or wildcard expression of index names used to limit the request. @@ -495,7 +496,7 @@ async def create( """ Create an index. Creates a new index. - ``_ + ``_ :param index: Name of the index you wish to create. :param aliases: Aliases for the index. @@ -569,7 +570,7 @@ async def create_data_stream( Create a data stream. Creates a data stream. You must have a matching index template with data stream enabled. - ``_ + ``_ :param name: Name of the data stream, which must meet the following criteria: Lowercase only; Cannot include `\\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, @@ -630,7 +631,7 @@ async def data_streams_stats( """ Get data stream stats. Retrieves statistics for one or more data streams. - ``_ + ``_ :param name: Comma-separated list of data streams used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams in a @@ -691,7 +692,7 @@ async def delete( """ Delete indices. Deletes one or more indices. - ``_ + ``_ :param index: Comma-separated list of indices to delete. You cannot specify index aliases. By default, this parameter does not support wildcards (`*`) or `_all`. @@ -761,7 +762,7 @@ async def delete_alias( """ Delete an alias. Removes a data stream or index from an alias. - ``_ + ``_ :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). @@ -826,7 +827,7 @@ async def delete_data_lifecycle( Delete data stream lifecycles. Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle. - ``_ + ``_ :param name: A comma-separated list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams @@ -886,7 +887,7 @@ async def delete_data_stream( """ Delete data streams. Deletes one or more data streams and their backing indices. - ``_ + ``_ :param name: Comma-separated list of data streams to delete. Wildcard (`*`) expressions are supported. @@ -941,7 +942,7 @@ async def delete_index_template( then there is no wildcard support and the provided names should match completely with existing templates. - ``_ + ``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. @@ -993,7 +994,7 @@ async def delete_template( """ Deletes a legacy index template. - ``_ + ``_ :param name: The name of the legacy index template to delete. Wildcard (`*`) expressions are supported. @@ -1055,7 +1056,7 @@ async def disk_usage( """ Analyzes the disk usage of each field of an index or data stream. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. It’s recommended to execute this API with a single @@ -1130,7 +1131,7 @@ async def downsample( (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. - ``_ + ``_ :param index: Name of the time series index to downsample. :param target_index: Name of the index to create. @@ -1199,7 +1200,7 @@ async def exists( Check indices. Checks if one or more indices, index aliases, or data streams exist. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases. Supports wildcards (`*`). @@ -1277,7 +1278,7 @@ async def exists_alias( """ Check aliases. Checks if one or more data stream or index aliases exist. - ``_ + ``_ :param name: Comma-separated list of aliases to check. Supports wildcards (`*`). :param index: Comma-separated list of data streams or indices used to limit the @@ -1347,7 +1348,7 @@ async def exists_index_template( """ Returns information about whether a particular index template exists. - ``_ + ``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. @@ -1397,7 +1398,7 @@ async def exists_template( Check existence of index templates. Returns information about whether a particular index template exists. - ``_ + ``_ :param name: The comma separated names of the index templates :param flat_settings: Return settings in flat format (default: false) @@ -1452,7 +1453,7 @@ async def explain_data_lifecycle( creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. - ``_ + ``_ :param index: The name of the index to explain :param include_defaults: indicates if the API should return the default values @@ -1515,7 +1516,7 @@ async def field_usage_stats( """ Returns field usage information for each shard and field of an index. - ``_ + ``_ :param index: Comma-separated list or wildcard expression of index names used to limit the request. @@ -1603,7 +1604,7 @@ async def flush( """ Flushes one or more data streams or indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to flush. Supports wildcards (`*`). To flush all data streams and indices, omit this @@ -1686,7 +1687,7 @@ async def forcemerge( """ Performs the force merge operation on one or more indices. - ``_ + ``_ :param index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices @@ -1780,7 +1781,7 @@ async def get( Get index information. Returns information about one or more indices. For data streams, the API returns information about the stream’s backing indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (*) are supported. @@ -1869,7 +1870,7 @@ async def get_alias( """ Get aliases. Retrieves information for one or more data stream or index aliases. - ``_ + ``_ :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, @@ -1952,7 +1953,7 @@ async def get_data_lifecycle( Get data stream lifecycles. Retrieves the data stream lifecycle configuration of one or more data streams. - ``_ + ``_ :param name: Comma-separated list of data streams to limit the request. Supports wildcards (`*`). To target all data streams, omit this parameter or use `*` @@ -2018,7 +2019,7 @@ async def get_data_stream( """ Get data streams. Retrieves information about one or more data streams. - ``_ + ``_ :param name: Comma-separated list of data stream names used to limit the request. Wildcard (`*`) expressions are supported. If omitted, all data streams are @@ -2094,7 +2095,7 @@ async def get_field_mapping( Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. - ``_ + ``_ :param fields: Comma-separated list or wildcard expression of fields used to limit returned information. @@ -2171,7 +2172,7 @@ async def get_index_template( """ Get index templates. Returns information about one or more index templates. - ``_ + ``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. @@ -2245,7 +2246,7 @@ async def get_mapping( Get mapping definitions. Retrieves mapping definitions for one or more indices. For data streams, the API retrieves mappings for the stream’s backing indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2330,7 +2331,7 @@ async def get_settings( Get index settings. Returns setting information for one or more indices. For data streams, returns setting information for the stream’s backing indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2417,7 +2418,7 @@ async def get_template( """ Get index templates. Retrieves information about one or more index templates. - ``_ + ``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (`*`) expressions are supported. To return all index templates, @@ -2483,7 +2484,7 @@ async def migrate_to_data_stream( with the same name. The indices for the alias become hidden backing indices for the stream. The write index for the alias becomes the write index for the stream. - ``_ + ``_ :param name: Name of the index alias to convert to a data stream. :param master_timeout: Period to wait for a connection to the master node. If @@ -2536,7 +2537,7 @@ async def modify_data_stream( Update data streams. Performs one or more data stream modification actions in a single atomic operation. - ``_ + ``_ :param actions: Actions to perform. """ @@ -2596,7 +2597,7 @@ async def open( """ Opens a closed index. For data streams, the API opens any closed backing indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). By default, you must explicitly @@ -2672,7 +2673,7 @@ async def promote_data_stream( Promotes a data stream from a replicated data stream managed by CCR to a regular data stream - ``_ + ``_ :param name: The name of the data stream :param master_timeout: Period to wait for a connection to the master node. If @@ -2734,7 +2735,7 @@ async def put_alias( """ Create or update an alias. Adds a data stream or index to an alias. - ``_ + ``_ :param index: Comma-separated list of data streams or indices to add. Supports wildcards (`*`). Wildcard patterns that match both data streams and indices @@ -2837,7 +2838,7 @@ async def put_data_lifecycle( Update data stream lifecycles. Update the data stream lifecycle of the specified data streams. - ``_ + ``_ :param name: Comma-separated list of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. @@ -2939,7 +2940,7 @@ async def put_index_template( Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. - ``_ + ``_ :param name: Index or template name :param allow_auto_create: This setting overrides the value of the `action.auto_create_index` @@ -3100,7 +3101,7 @@ async def put_mapping( can also use this API to change the search settings of existing fields. For data streams, these changes are applied to all backing indices by default. - ``_ + ``_ :param index: A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. @@ -3230,7 +3231,7 @@ async def put_settings( Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. - ``_ + ``_ :param settings: :param index: Comma-separated list of data streams, indices, and aliases used @@ -3335,7 +3336,7 @@ async def put_template( Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. - ``_ + ``_ :param name: The name of the template :param aliases: Aliases for the index. @@ -3417,7 +3418,7 @@ async def recovery( indices. For data streams, the API returns information for the stream’s backing indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -3481,7 +3482,7 @@ async def refresh( indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -3551,7 +3552,7 @@ async def reload_search_analyzers( """ Reloads an index's search analyzers and their resources. - ``_ + ``_ :param index: A comma-separated list of index names to reload analyzers for :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves @@ -3617,7 +3618,7 @@ async def resolve_cluster( including the local cluster, if included. Multiple patterns and remote clusters are supported. - ``_ + ``_ :param name: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified @@ -3691,7 +3692,7 @@ async def resolve_index( Resolves the specified name(s) and/or index patterns for indices, aliases, and data streams. Multiple patterns and remote clusters are supported. - ``_ + ``_ :param name: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified @@ -3764,7 +3765,7 @@ async def rollover( """ Roll over to a new index. Creates a new index for a data stream or index alias. - ``_ + ``_ :param alias: Name of the data stream or index alias to roll over. :param new_index: Name of the index to create. Supports date math. Data streams @@ -3870,7 +3871,7 @@ async def segments( Returns low-level information about the Lucene segments in index shards. For data streams, the API returns information about the stream’s backing indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -3950,7 +3951,7 @@ async def shard_stores( Retrieves store information about replica shards in one or more indices. For data streams, the API retrieves store information for the stream’s backing indices. - ``_ + ``_ :param index: List of data streams, indices, and aliases used to limit the request. :param allow_no_indices: If false, the request returns an error if any wildcard @@ -4021,7 +4022,7 @@ async def shrink( """ Shrinks an existing index into a new index with fewer primary shards. - ``_ + ``_ :param index: Name of the source index to shrink. :param target: Name of the target index to create. @@ -4097,7 +4098,7 @@ async def simulate_index_template( Simulate an index. Returns the index configuration that would be applied to the specified index from an existing index template. - ``_ + ``_ :param name: Name of the index to simulate :param include_defaults: If true, returns all relevant default configurations @@ -4175,7 +4176,7 @@ async def simulate_template( Simulate an index template. Returns the index configuration that would be applied by a particular index template. - ``_ + ``_ :param name: Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit this parameter and specify the template @@ -4306,7 +4307,7 @@ async def split( """ Splits an existing index into a new index with more primary shards. - ``_ + ``_ :param index: Name of the source index to split. :param target: Name of the target index to create. @@ -4399,7 +4400,7 @@ async def stats( Returns statistics for one or more indices. For data streams, the API retrieves statistics for the stream’s backing indices. - ``_ + ``_ :param index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices @@ -4502,7 +4503,7 @@ async def unfreeze( """ Unfreezes an index. - ``_ + ``_ :param index: Identifier for the index. :param allow_no_indices: If `false`, the request returns an error if any wildcard @@ -4576,7 +4577,7 @@ async def update_aliases( """ Create or update an alias. Adds a data stream or index to an alias. - ``_ + ``_ :param actions: Actions to perform. :param master_timeout: Period to wait for a connection to the master node. If @@ -4651,7 +4652,7 @@ async def validate_query( """ Validate a query. Validates a query without running it. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index b7fd1b7a3..701ba6835 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -46,7 +46,7 @@ async def delete( """ Delete an inference endpoint - ``_ + ``_ :param inference_id: The inference Id :param task_type: The task type @@ -111,7 +111,7 @@ async def get( """ Get an inference endpoint - ``_ + ``_ :param task_type: The task type :param inference_id: The inference Id @@ -174,7 +174,7 @@ async def inference( """ Perform inference on the service - ``_ + ``_ :param inference_id: The inference Id :param input: Inference input. Either a string or an array of strings. @@ -257,7 +257,7 @@ async def put( """ Create an inference endpoint - ``_ + ``_ :param inference_id: The inference Id :param inference_config: diff --git a/elasticsearch/_async/client/ingest.py b/elasticsearch/_async/client/ingest.py index c98e11b2a..0d78dc03c 100644 --- a/elasticsearch/_async/client/ingest.py +++ b/elasticsearch/_async/client/ingest.py @@ -40,7 +40,7 @@ async def delete_geoip_database( """ Deletes a geoip database configuration. - ``_ + ``_ :param id: A comma-separated list of geoip database configurations to delete :param master_timeout: Period to wait for a connection to the master node. If @@ -91,7 +91,7 @@ async def delete_pipeline( """ Deletes one or more existing ingest pipeline. - ``_ + ``_ :param id: Pipeline ID or wildcard expression of pipeline IDs used to limit the request. To delete all ingest pipelines in a cluster, use a value of `*`. @@ -140,7 +140,7 @@ async def geo_ip_stats( """ Gets download statistics for GeoIP2 databases used with the geoip processor. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ingest/geoip/stats" @@ -177,7 +177,7 @@ async def get_geoip_database( """ Returns information about one or more geoip database configurations. - ``_ + ``_ :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit @@ -230,7 +230,7 @@ async def get_pipeline( Returns information about one or more ingest pipelines. This API returns a local reference of the pipeline. - ``_ + ``_ :param id: Comma-separated list of pipeline IDs to retrieve. Wildcard (`*`) expressions are supported. To get all ingest pipelines, omit this parameter or use `*`. @@ -284,7 +284,7 @@ async def processor_grok( you expect will match. A grok pattern is like a regular expression that supports aliased expressions that can be reused. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ingest/processor/grok" @@ -327,7 +327,7 @@ async def put_geoip_database( """ Returns information about one or more geoip database configurations. - ``_ + ``_ :param id: ID of the database configuration to create or update. :param maxmind: The configuration necessary to identify which IP geolocation @@ -414,7 +414,7 @@ async def put_pipeline( Creates or updates an ingest pipeline. Changes made using this API take effect immediately. - ``_ + ``_ :param id: ID of the ingest pipeline to create or update. :param deprecated: Marks this ingest pipeline as deprecated. When a deprecated @@ -506,7 +506,7 @@ async def simulate( """ Executes an ingest pipeline against a set of provided documents. - ``_ + ``_ :param docs: Sample documents to test in the pipeline. :param id: Pipeline to test. If you don’t specify a `pipeline` in the request diff --git a/elasticsearch/_async/client/license.py b/elasticsearch/_async/client/license.py index a43d7064d..fd9f0957c 100644 --- a/elasticsearch/_async/client/license.py +++ b/elasticsearch/_async/client/license.py @@ -37,7 +37,7 @@ async def delete( """ Deletes licensing information for the cluster - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_license" @@ -76,7 +76,7 @@ async def get( its type, its status, when it was issued, and when it expires. For more information about the different types of licenses, refer to [Elastic Stack subscriptions](https://www.elastic.co/subscriptions). - ``_ + ``_ :param accept_enterprise: If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum @@ -122,7 +122,7 @@ async def get_basic_status( """ Retrieves information about the status of the basic license. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_license/basic_status" @@ -157,7 +157,7 @@ async def get_trial_status( """ Retrieves information about the status of the trial license. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_license/trial_status" @@ -198,7 +198,7 @@ async def post( """ Updates the license for the cluster. - ``_ + ``_ :param acknowledge: Specifies whether you acknowledge the license changes. :param license: @@ -257,7 +257,7 @@ async def post_start_basic( acknowledge parameter set to true. To check the status of your basic license, use the following API: [Get basic status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). - ``_ + ``_ :param acknowledge: whether the user has acknowledged acknowledge messages (default: false) @@ -300,7 +300,7 @@ async def post_start_trial( The start trial API enables you to start a 30-day trial, which gives access to all subscription features. - ``_ + ``_ :param acknowledge: whether the user has acknowledged acknowledge messages (default: false) diff --git a/elasticsearch/_async/client/logstash.py b/elasticsearch/_async/client/logstash.py index 882a1f633..c25a79bdd 100644 --- a/elasticsearch/_async/client/logstash.py +++ b/elasticsearch/_async/client/logstash.py @@ -38,7 +38,7 @@ async def delete_pipeline( """ Deletes a pipeline used for Logstash Central Management. - ``_ + ``_ :param id: Identifier for the pipeline. """ @@ -78,7 +78,7 @@ async def get_pipeline( """ Retrieves pipelines used for Logstash Central Management. - ``_ + ``_ :param id: Comma-separated list of pipeline identifiers. """ @@ -125,7 +125,7 @@ async def put_pipeline( """ Creates or updates a pipeline used for Logstash Central Management. - ``_ + ``_ :param id: Identifier for the pipeline. :param pipeline: diff --git a/elasticsearch/_async/client/migration.py b/elasticsearch/_async/client/migration.py index cbfc2c7d2..f73dff7c3 100644 --- a/elasticsearch/_async/client/migration.py +++ b/elasticsearch/_async/client/migration.py @@ -40,7 +40,7 @@ async def deprecations( that use deprecated features that will be removed or changed in the next major version. - ``_ + ``_ :param index: Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. @@ -83,7 +83,7 @@ async def get_feature_upgrade_status( """ Find out whether system features need to be upgraded or not - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_migration/system_features" @@ -118,7 +118,7 @@ async def post_feature_upgrade( """ Begin upgrades for system features - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_migration/system_features" diff --git a/elasticsearch/_async/client/ml.py b/elasticsearch/_async/client/ml.py index cf86f37a6..da3a23b1c 100644 --- a/elasticsearch/_async/client/ml.py +++ b/elasticsearch/_async/client/ml.py @@ -42,7 +42,7 @@ async def clear_trained_model_deployment_cache( may be cached on that individual node. Calling this API clears the caches without restarting the deployment. - ``_ + ``_ :param model_id: The unique identifier of the trained model. """ @@ -102,7 +102,7 @@ async def close_job( force parameters as the close job request. When a datafeed that has a specified end date stops, it automatically closes its associated job. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection @@ -164,7 +164,7 @@ async def delete_calendar( Delete a calendar. Removes all scheduled events from a calendar, then deletes it. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. """ @@ -205,7 +205,7 @@ async def delete_calendar_event( """ Delete events from a calendar. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param event_id: Identifier for the scheduled event. You can obtain this identifier @@ -253,7 +253,7 @@ async def delete_calendar_job( """ Delete anomaly jobs from a calendar. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param job_id: An identifier for the anomaly detection jobs. It can be a job @@ -302,7 +302,7 @@ async def delete_data_frame_analytics( """ Delete a data frame analytics job. - ``_ + ``_ :param id: Identifier for the data frame analytics job. :param force: If `true`, it deletes a job that is not stopped; this method is @@ -350,7 +350,7 @@ async def delete_datafeed( """ Delete a datafeed. - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -408,7 +408,7 @@ async def delete_expired_data( expired data for all anomaly detection jobs by using _all, by specifying * as the , or by omitting the . - ``_ + ``_ :param job_id: Identifier for an anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. @@ -469,7 +469,7 @@ async def delete_filter( delete the filter. You must update or delete the job before you can delete the filter. - ``_ + ``_ :param filter_id: A string that uniquely identifies a filter. """ @@ -515,7 +515,7 @@ async def delete_forecast( in the forecast jobs API. The delete forecast API enables you to delete one or more forecasts before they expire. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param forecast_id: A comma-separated list of forecast identifiers. If you do @@ -587,7 +587,7 @@ async def delete_job( delete datafeed API with the same timeout and force parameters as the delete job request. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param delete_user_annotations: Specifies whether annotations that have been @@ -643,7 +643,7 @@ async def delete_model_snapshot( that snapshot, first revert to a different one. To identify the active model snapshot, refer to the `model_snapshot_id` in the results from the get jobs API. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: Identifier for the model snapshot. @@ -691,7 +691,7 @@ async def delete_trained_model( Delete an unreferenced trained model. The request deletes a trained inference model that is not referenced by an ingest pipeline. - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param force: Forcefully deletes a trained model that is referenced by ingest @@ -738,7 +738,7 @@ async def delete_trained_model_alias( to a trained model. If the model alias is missing or refers to a model other than the one identified by the `model_id`, this API returns an error. - ``_ + ``_ :param model_id: The trained model ID to which the model alias refers. :param model_alias: The model alias to delete. @@ -795,7 +795,7 @@ async def estimate_model_memory( an anomaly detection job model. It is based on analysis configuration details for the job and cardinality estimates for the fields it references. - ``_ + ``_ :param analysis_config: For a list of the properties that you can specify in the `analysis_config` component of the body of this API. @@ -863,7 +863,7 @@ async def evaluate_data_frame( for use on indexes created by data frame analytics. Evaluation requires both a ground truth field and an analytics result field to be present. - ``_ + ``_ :param evaluation: Defines the type of evaluation you want to perform. :param index: Defines the `index` in which the evaluation will be performed. @@ -943,7 +943,7 @@ async def explain_data_frame_analytics( setting later on. If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -1050,7 +1050,7 @@ async def flush_job( and persists the model state to disk and the job must be opened again before analyzing further data. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param advance_time: Refer to the description for the `advance_time` query parameter. @@ -1121,7 +1121,7 @@ async def forecast( for a job that has an `over_field_name` in its configuration. Forcasts predict future behavior based on historical data. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. The job must be open when you create a forecast; otherwise, an error occurs. @@ -1204,7 +1204,7 @@ async def get_buckets( Get anomaly detection job results for buckets. The API presents a chronological view of the records, grouped by bucket. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param timestamp: The timestamp of a single bucket result. If you do not specify @@ -1299,7 +1299,7 @@ async def get_calendar_events( """ Get info about events in calendars. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids @@ -1365,7 +1365,7 @@ async def get_calendars( """ Get calendar configuration info. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids @@ -1438,7 +1438,7 @@ async def get_categories( """ Get anomaly detection job results for categories. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param category_id: Identifier for the category, which is unique in the job. @@ -1522,7 +1522,7 @@ async def get_data_frame_analytics( multiple data frame analytics jobs in a single API request by using a comma-separated list of data frame analytics jobs or a wildcard expression. - ``_ + ``_ :param id: Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame @@ -1594,7 +1594,7 @@ async def get_data_frame_analytics_stats( """ Get data frame analytics jobs usage info. - ``_ + ``_ :param id: Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame @@ -1664,7 +1664,7 @@ async def get_datafeed_stats( the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds. - ``_ + ``_ :param datafeed_id: Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the @@ -1724,7 +1724,7 @@ async def get_datafeeds( `*` as the ``, or by omitting the ``. This API returns a maximum of 10,000 datafeeds. - ``_ + ``_ :param datafeed_id: Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the @@ -1787,7 +1787,7 @@ async def get_filters( """ Get filters. You can get a single filter or all filters. - ``_ + ``_ :param filter_id: A string that uniquely identifies a filter. :param from_: Skips the specified number of filters. @@ -1851,7 +1851,7 @@ async def get_influencers( that have contributed to, or are to blame for, the anomalies. Influencer results are available only if an `influencer_field_name` is specified in the job configuration. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param desc: If true, the results are sorted in descending order. @@ -1932,7 +1932,7 @@ async def get_job_stats( """ Get anomaly detection jobs usage info. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. If @@ -1993,7 +1993,7 @@ async def get_jobs( detection jobs by using `_all`, by specifying `*` as the ``, or by omitting the ``. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. If you do not specify one of these @@ -2056,7 +2056,7 @@ async def get_memory_stats( jobs and trained models are using memory, on each node, both within the JVM heap, and natively, outside of the JVM. - ``_ + ``_ :param node_id: The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or `ml:true` @@ -2111,7 +2111,7 @@ async def get_model_snapshot_upgrade_stats( """ Get anomaly detection job model snapshot upgrade usage info. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: A numerical character string that uniquely identifies the @@ -2182,7 +2182,7 @@ async def get_model_snapshots( """ Get model snapshots info. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: A numerical character string that uniquely identifies the @@ -2295,7 +2295,7 @@ async def get_overall_buckets( its default), the `overall_score` is the maximum `overall_score` of the overall buckets that have a span equal to the jobs' largest bucket span. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, a comma-separated list of jobs or groups, or a wildcard expression. @@ -2400,7 +2400,7 @@ async def get_records( found in each bucket, which relates to the number of time series being modeled and the number of detectors. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param desc: Refer to the description for the `desc` query parameter. @@ -2495,7 +2495,7 @@ async def get_trained_models( """ Get trained model configuration info. - ``_ + ``_ :param model_id: The unique identifier of the trained model or a model alias. You can get information for multiple trained models in a single API request @@ -2579,7 +2579,7 @@ async def get_trained_models_stats( models in a single API request by using a comma-separated list of model IDs or a wildcard expression. - ``_ + ``_ :param model_id: The unique identifier of the trained model or a model alias. It can be a comma-separated list or a wildcard expression. @@ -2642,7 +2642,7 @@ async def infer_trained_model( """ Evaluate a trained model. - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param docs: An array of objects to pass to the model for inference. The objects @@ -2704,7 +2704,7 @@ async def info( what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ml/info" @@ -2749,7 +2749,7 @@ async def open_job( job is ready to resume its analysis from where it left off, once new data is received. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param timeout: Refer to the description for the `timeout` query parameter. @@ -2803,7 +2803,7 @@ async def post_calendar_events( """ Add scheduled events to the calendar. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param events: A list of one of more scheduled events. The event’s start and @@ -2861,7 +2861,7 @@ async def post_data( data can be accepted from only a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. @@ -2925,7 +2925,7 @@ async def preview_data_frame_analytics( Preview features used by data frame analytics. Previews the extracted features used by a data frame analytics config. - ``_ + ``_ :param id: Identifier for the data frame analytics job. :param config: A data frame analytics config as described in create data frame @@ -2995,7 +2995,7 @@ async def preview_datafeed( that accurately reflects the behavior of the datafeed, use the appropriate credentials. You can also use secondary authorization headers to supply the credentials. - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -3071,7 +3071,7 @@ async def put_calendar( """ Create a calendar. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param description: A description of the calendar. @@ -3125,7 +3125,7 @@ async def put_calendar_job( """ Add anomaly detection job to calendar. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param job_id: An identifier for the anomaly detection jobs. It can be a job @@ -3199,7 +3199,7 @@ async def put_data_frame_analytics( that performs an analysis on the source indices and stores the outcome in a destination index. - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -3377,7 +3377,7 @@ async def put_datafeed( directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -3532,7 +3532,7 @@ async def put_filter( more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. - ``_ + ``_ :param filter_id: A string that uniquely identifies a filter. :param description: A description of the filter. @@ -3619,7 +3619,7 @@ async def put_job( Create an anomaly detection job. If you include a `datafeed_config`, you must have read index privileges on the source index. - ``_ + ``_ :param job_id: The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and @@ -3800,7 +3800,7 @@ async def put_trained_model( Create a trained model. Enable you to supply a trained model that is not created by data frame analytics. - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param compressed_definition: The compressed (GZipped and Base64 encoded) inference @@ -3914,7 +3914,7 @@ async def put_trained_model_alias( common between the old and new trained models for the model alias, the API returns a warning. - ``_ + ``_ :param model_id: The identifier for the trained model that the alias refers to. :param model_alias: The alias to create or update. This value cannot end in numbers. @@ -3972,7 +3972,7 @@ async def put_trained_model_definition_part( """ Create part of a trained model definition. - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param part: The definition part number. When the definition is loaded for inference @@ -4051,7 +4051,7 @@ async def put_trained_model_vocabulary( processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param vocabulary: The model vocabulary, which must not be empty. @@ -4109,7 +4109,7 @@ async def reset_job( job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list. - ``_ + ``_ :param job_id: The ID of the job to reset. :param delete_user_annotations: Specifies whether annotations that have been @@ -4169,7 +4169,7 @@ async def revert_model_snapshot( For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: You can specify `empty` as the . Reverting to @@ -4239,7 +4239,7 @@ async def set_upgrade_mode( the current value for the upgrade_mode setting by using the get machine learning info API. - ``_ + ``_ :param enabled: When `true`, it enables `upgrade_mode` which temporarily halts all job and datafeed tasks and prohibits new job and datafeed tasks from @@ -4294,7 +4294,7 @@ async def start_data_frame_analytics( exists, it is used as is. You can therefore set up the destination index in advance with custom settings and mappings. - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -4356,7 +4356,7 @@ async def start_datafeed( headers when you created or updated the datafeed, those credentials are used instead. - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -4426,7 +4426,7 @@ async def start_trained_model_deployment( Start a trained model deployment. It allocates the model to every machine learning node. - ``_ + ``_ :param model_id: The unique identifier of the trained model. Currently, only PyTorch models are supported. @@ -4510,7 +4510,7 @@ async def stop_data_frame_analytics( Stop data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -4576,7 +4576,7 @@ async def stop_datafeed( Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. - ``_ + ``_ :param datafeed_id: Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a @@ -4638,7 +4638,7 @@ async def stop_trained_model_deployment( """ Stop a trained model deployment. - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param allow_no_match: Specifies what to do when the request: contains wildcard @@ -4703,7 +4703,7 @@ async def update_data_frame_analytics( """ Update a data frame analytics job. - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -4815,7 +4815,7 @@ async def update_datafeed( query using those same roles. If you provide secondary authorization headers, those credentials are used instead. - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -4979,7 +4979,7 @@ async def update_filter( Update a filter. Updates the description of a filter, adds items, or removes items from the list. - ``_ + ``_ :param filter_id: A string that uniquely identifies a filter. :param add_items: The items to add to the filter. @@ -5070,7 +5070,7 @@ async def update_job( Update an anomaly detection job. Updates certain properties of an anomaly detection job. - ``_ + ``_ :param job_id: Identifier for the job. :param allow_lazy_open: Advanced configuration option. Specifies whether this @@ -5198,7 +5198,7 @@ async def update_model_snapshot( """ Update a snapshot. Updates certain properties of a snapshot. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: Identifier for the model snapshot. @@ -5259,7 +5259,7 @@ async def update_trained_model_deployment( """ Update a trained model deployment. - ``_ + ``_ :param model_id: The unique identifier of the trained model. Currently, only PyTorch models are supported. @@ -5325,7 +5325,7 @@ async def upgrade_job_snapshot( a time and the upgraded snapshot cannot be the current snapshot of the anomaly detection job. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: A numerical character string that uniquely identifies the @@ -5401,7 +5401,7 @@ async def validate( """ Validates an anomaly detection job. - ``_ + ``_ :param analysis_config: :param analysis_limits: @@ -5471,7 +5471,7 @@ async def validate_detector( """ Validates an anomaly detection detector. - ``_ + ``_ :param detector: """ diff --git a/elasticsearch/_async/client/monitoring.py b/elasticsearch/_async/client/monitoring.py index aa8dc41fc..e5eeacc3a 100644 --- a/elasticsearch/_async/client/monitoring.py +++ b/elasticsearch/_async/client/monitoring.py @@ -44,7 +44,7 @@ async def bulk( """ Used by the monitoring features to send monitoring data. - ``_ + ``_ :param interval: Collection interval (e.g., '10s' or '10000ms') of the payload :param operations: diff --git a/elasticsearch/_async/client/nodes.py b/elasticsearch/_async/client/nodes.py index 3056354c4..17c0f5401 100644 --- a/elasticsearch/_async/client/nodes.py +++ b/elasticsearch/_async/client/nodes.py @@ -40,7 +40,7 @@ async def clear_repositories_metering_archive( You can use this API to clear the archived repositories metering information in the cluster. - ``_ + ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). @@ -92,7 +92,7 @@ async def get_repositories_metering_info( compute aggregations over a period of time. Additionally, the information exposed by this API is volatile, meaning that it won’t be present after node restarts. - ``_ + ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). @@ -146,7 +146,7 @@ async def hot_threads( This API yields a breakdown of the hot threads on each selected node in the cluster. The output is plain text with a breakdown of each node’s top hot threads. - ``_ + ``_ :param node_id: List of node IDs or names used to limit returned information. :param ignore_idle_threads: If true, known idle threads (e.g. waiting in a socket @@ -221,7 +221,7 @@ async def info( """ Returns cluster nodes information. - ``_ + ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. @@ -290,7 +290,7 @@ async def reload_secure_settings( """ Reloads the keystore on nodes in the cluster. - ``_ + ``_ :param node_id: The names of particular nodes in the cluster to target. :param secure_settings_password: The password for the Elasticsearch keystore. @@ -361,7 +361,7 @@ async def stats( """ Returns cluster nodes statistics. - ``_ + ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. @@ -478,7 +478,7 @@ async def usage( """ Returns information on the usage of features. - ``_ + ``_ :param node_id: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting diff --git a/elasticsearch/_async/client/query_rules.py b/elasticsearch/_async/client/query_rules.py index 5f9a931ee..cbf39e121 100644 --- a/elasticsearch/_async/client/query_rules.py +++ b/elasticsearch/_async/client/query_rules.py @@ -39,7 +39,7 @@ async def delete_rule( """ Deletes a query rule within a query ruleset. - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset containing the rule to delete @@ -87,7 +87,7 @@ async def delete_ruleset( """ Deletes a query ruleset. - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset to delete """ @@ -128,7 +128,7 @@ async def get_rule( """ Returns the details about a query rule within a query ruleset - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset containing the rule to retrieve @@ -176,7 +176,7 @@ async def get_ruleset( """ Returns the details about a query ruleset - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset """ @@ -219,7 +219,7 @@ async def list_rulesets( """ Returns summarized information about existing query rulesets. - ``_ + ``_ :param from_: Starting offset (default: 0) :param size: specifies a max number of results to get @@ -272,7 +272,7 @@ async def put_rule( """ Creates or updates a query rule within a query ruleset. - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset containing the rule to be created or updated @@ -347,7 +347,7 @@ async def put_ruleset( """ Creates or updates a query ruleset. - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset to be created or updated @@ -382,3 +382,56 @@ async def put_ruleset( endpoint_id="query_rules.put_ruleset", path_parts=__path_parts, ) + + @_rewrite_parameters( + body_fields=("match_criteria",), + ) + async def test( + self, + *, + ruleset_id: str, + match_criteria: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Creates or updates a query ruleset. + + ``_ + + :param ruleset_id: The unique identifier of the query ruleset to be created or + updated + :param match_criteria: + """ + if ruleset_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'ruleset_id'") + if match_criteria is None and body is None: + raise ValueError("Empty value passed for parameter 'match_criteria'") + __path_parts: t.Dict[str, str] = {"ruleset_id": _quote(ruleset_id)} + __path = f'/_query_rules/{__path_parts["ruleset_id"]}/_test' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if match_criteria is not None: + __body["match_criteria"] = match_criteria + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="query_rules.test", + path_parts=__path_parts, + ) diff --git a/elasticsearch/_async/client/rollup.py b/elasticsearch/_async/client/rollup.py index 5c6c157f0..6ccba08ab 100644 --- a/elasticsearch/_async/client/rollup.py +++ b/elasticsearch/_async/client/rollup.py @@ -38,7 +38,7 @@ async def delete_job( """ Deletes an existing rollup job. - ``_ + ``_ :param id: Identifier for the job. """ @@ -78,7 +78,7 @@ async def get_jobs( """ Retrieves the configuration, stats, and status of rollup jobs. - ``_ + ``_ :param id: Identifier for the rollup job. If it is `_all` or omitted, the API returns all rollup jobs. @@ -123,7 +123,7 @@ async def get_rollup_caps( Returns the capabilities of any rollup jobs that have been configured for a specific index or index pattern. - ``_ + ``_ :param id: Index, indices or index-pattern to return rollup capabilities for. `_all` may be used to fetch rollup capabilities from all jobs. @@ -168,7 +168,7 @@ async def get_rollup_index_caps( Returns the rollup capabilities of all jobs inside of a rollup index (for example, the index where rollup data is stored). - ``_ + ``_ :param index: Data stream or index to check for rollup capabilities. Wildcard (`*`) expressions are supported. @@ -230,7 +230,7 @@ async def put_job( """ Creates a rollup job. - ``_ + ``_ :param id: Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the data that is associated with the rollup job. @@ -346,7 +346,7 @@ async def rollup_search( """ Enables searching rolled-up data using the standard Query DSL. - ``_ + ``_ :param index: Enables searching rolled-up data using the standard Query DSL. :param aggregations: Specifies aggregations. @@ -409,7 +409,7 @@ async def start_job( """ Starts an existing, stopped rollup job. - ``_ + ``_ :param id: Identifier for the rollup job. """ @@ -451,7 +451,7 @@ async def stop_job( """ Stops an existing, started rollup job. - ``_ + ``_ :param id: Identifier for the rollup job. :param timeout: If `wait_for_completion` is `true`, the API blocks for (at maximum) diff --git a/elasticsearch/_async/client/search_application.py b/elasticsearch/_async/client/search_application.py index 73fc36897..e4d0838e5 100644 --- a/elasticsearch/_async/client/search_application.py +++ b/elasticsearch/_async/client/search_application.py @@ -36,9 +36,10 @@ async def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a search application. + Delete a search application. Remove a search application and its associated alias. + Indices attached to the search application are not removed. - ``_ + ``_ :param name: The name of the search application to delete """ @@ -76,9 +77,10 @@ async def delete_behavioral_analytics( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a behavioral analytics collection. + Delete a behavioral analytics collection. The associated data stream is also + deleted. - ``_ + ``_ :param name: The name of the analytics collection to be deleted """ @@ -116,9 +118,9 @@ async def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the details about a search application + Get search application details. - ``_ + ``_ :param name: The name of the search application """ @@ -156,9 +158,9 @@ async def get_behavioral_analytics( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the existing behavioral analytics collections. + Get behavioral analytics collections. - ``_ + ``_ :param name: A list of analytics collections to limit the returned information """ @@ -205,7 +207,7 @@ async def list( """ Returns the existing search applications. - ``_ + ``_ :param from_: Starting offset. :param q: Query in the Lucene query string syntax. @@ -254,9 +256,9 @@ async def put( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a search application. + Create or update a search application. - ``_ + ``_ :param name: The name of the search application to be created or updated. :param search_application: @@ -307,9 +309,9 @@ async def put_behavioral_analytics( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a behavioral analytics collection. + Create a behavioral analytics collection. - ``_ + ``_ :param name: The name of the analytics collection to be created or updated. """ @@ -353,9 +355,12 @@ async def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Perform a search against a search application. + Run a search application search. Generate and run an Elasticsearch query that + uses the specified query parameteter and the search template associated with + the search application or default template. Unspecified template parameters are + assigned their default values if applicable. - ``_ + ``_ :param name: The name of the search application to be searched. :param params: Query parameters specific to this request, which will override diff --git a/elasticsearch/_async/client/searchable_snapshots.py b/elasticsearch/_async/client/searchable_snapshots.py index b6a405991..5d5b9a8a3 100644 --- a/elasticsearch/_async/client/searchable_snapshots.py +++ b/elasticsearch/_async/client/searchable_snapshots.py @@ -39,7 +39,7 @@ async def cache_stats( """ Retrieve node-level cache statistics about searchable snapshots. - ``_ + ``_ :param node_id: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting @@ -97,7 +97,7 @@ async def clear_cache( """ Clear the cache of searchable snapshots. - ``_ + ``_ :param index: A comma-separated list of index names :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves @@ -169,7 +169,7 @@ async def mount( """ Mount a snapshot as a searchable index. - ``_ + ``_ :param repository: The name of the repository containing the snapshot of the index to mount @@ -249,7 +249,7 @@ async def stats( """ Retrieve shard-level statistics about searchable snapshots. - ``_ + ``_ :param index: A comma-separated list of index names :param level: Return stats aggregated at cluster, index or shard level diff --git a/elasticsearch/_async/client/security.py b/elasticsearch/_async/client/security.py index e8edd8f44..b28da2d6f 100644 --- a/elasticsearch/_async/client/security.py +++ b/elasticsearch/_async/client/security.py @@ -44,9 +44,10 @@ async def activate_user_profile( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a user profile on behalf of another user. + Activate a user profile. Create or update a user profile on behalf of another + user. - ``_ + ``_ :param grant_type: :param access_token: @@ -104,7 +105,7 @@ async def authenticate( and information about the realms that authenticated and authorized the user. If the user cannot be authenticated, this API returns a 401 status code. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/_authenticate" @@ -144,11 +145,11 @@ async def bulk_delete_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - The role management APIs are generally the preferred way to manage roles, rather - than using file-based role management. The bulk delete roles API cannot delete - roles that are defined in roles files. + Bulk delete roles. The role management APIs are generally the preferred way to + manage roles, rather than using file-based role management. The bulk delete roles + API cannot delete roles that are defined in roles files. - ``_ + ``_ :param names: An array of role names to delete :param refresh: If `true` (the default) then refresh the affected shards to make @@ -202,11 +203,11 @@ async def bulk_put_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - The role management APIs are generally the preferred way to manage roles, rather - than using file-based role management. The bulk create or update roles API cannot - update roles that are defined in roles files. + Bulk create or update roles. The role management APIs are generally the preferred + way to manage roles, rather than using file-based role management. The bulk create + or update roles API cannot update roles that are defined in roles files. - ``_ + ``_ :param roles: A dictionary of role name to RoleDescriptor objects to add or update :param refresh: If `true` (the default) then refresh the affected shards to make @@ -262,9 +263,10 @@ async def change_password( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Changes the passwords of users in the native realm and built-in users. + Change passwords. Change the passwords of users in the native realm and built-in + users. - ``_ + ``_ :param username: The user whose password you want to change. If you do not specify this parameter, the password is changed for the current user. @@ -324,10 +326,10 @@ async def clear_api_key_cache( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Evicts a subset of all entries from the API key cache. The cache is also automatically - cleared on state changes of the security index. + Clear the API key cache. Evict a subset of all entries from the API key cache. + The cache is also automatically cleared on state changes of the security index. - ``_ + ``_ :param ids: Comma-separated list of API key IDs to evict from the API key cache. To evict all API keys, use `*`. Does not support other wildcard patterns. @@ -366,9 +368,11 @@ async def clear_cached_privileges( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Evicts application privileges from the native application privileges cache. + Clear the privileges cache. Evict privileges from the native application privilege + cache. The cache is also automatically cleared for applications that have their + privileges updated. - ``_ + ``_ :param application: A comma-separated list of application names """ @@ -407,10 +411,10 @@ async def clear_cached_realms( usernames: t.Optional[t.Sequence[str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Evicts users from the user cache. Can completely clear the cache or evict specific - users. + Clear the user cache. Evict users from the user cache. You can completely clear + the cache or evict specific users. - ``_ + ``_ :param realms: Comma-separated list of realms to clear :param usernames: Comma-separated list of usernames to clear from the cache @@ -451,9 +455,9 @@ async def clear_cached_roles( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Evicts roles from the native role cache. + Clear the roles cache. Evict roles from the native role cache. - ``_ + ``_ :param name: Role name """ @@ -493,9 +497,10 @@ async def clear_cached_service_tokens( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Evicts tokens from the service account token caches. + Clear service account token caches. Evict a subset of all entries from the service + account token caches. - ``_ + ``_ :param namespace: An identifier for the namespace :param service: An identifier for the service name @@ -552,13 +557,13 @@ async def create_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create an API key. Creates an API key for access without requiring basic authentication. + Create an API key. Create an API key for access without requiring basic authentication. A successful request returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys. - ``_ + ``_ :param expiration: Expiration time for the API key. By default, API keys never expire. @@ -628,9 +633,10 @@ async def create_service_token( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a service accounts token for access without requiring basic authentication. + Create a service account token. Create a service accounts token for access without + requiring basic authentication. - ``_ + ``_ :param namespace: An identifier for the namespace :param service: An identifier for the service name @@ -698,9 +704,9 @@ async def delete_privileges( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes application privileges. + Delete application privileges. - ``_ + ``_ :param application: Application name :param name: Privilege name @@ -754,9 +760,9 @@ async def delete_role( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes roles in the native realm. + Delete roles. Delete roles in the native realm. - ``_ + ``_ :param name: Role name :param refresh: If `true` (the default) then refresh the affected shards to make @@ -802,9 +808,9 @@ async def delete_role_mapping( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes role mappings. + Delete role mappings. - ``_ + ``_ :param name: Role-mapping name :param refresh: If `true` (the default) then refresh the affected shards to make @@ -852,9 +858,10 @@ async def delete_service_token( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a service account token. + Delete service account tokens. Delete service account tokens for a service in + a specified namespace. - ``_ + ``_ :param namespace: An identifier for the namespace :param service: An identifier for the service name @@ -910,9 +917,9 @@ async def delete_user( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes users from the native realm. + Delete users. Delete users from the native realm. - ``_ + ``_ :param username: username :param refresh: If `true` (the default) then refresh the affected shards to make @@ -958,9 +965,9 @@ async def disable_user( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Disables users in the native realm. + Disable users. Disable users in the native realm. - ``_ + ``_ :param username: The username of the user to disable :param refresh: If `true` (the default) then refresh the affected shards to make @@ -1006,9 +1013,10 @@ async def disable_user_profile( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Disables a user profile so it's not visible in user profile searches. + Disable a user profile. Disable user profiles so that they are not visible in + user profile searches. - ``_ + ``_ :param uid: Unique identifier for the user profile. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make @@ -1054,9 +1062,9 @@ async def enable_user( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables users in the native realm. + Enable users. Enable users in the native realm. - ``_ + ``_ :param username: The username of the user to enable :param refresh: If `true` (the default) then refresh the affected shards to make @@ -1102,9 +1110,10 @@ async def enable_user_profile( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables a user profile so it's visible in user profile searches. + Enable a user profile. Enable user profiles to make them visible in user profile + searches. - ``_ + ``_ :param uid: Unique identifier for the user profile. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make @@ -1146,10 +1155,10 @@ async def enroll_kibana( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables a Kibana instance to configure itself for communication with a secured - Elasticsearch cluster. + Enroll Kibana. Enable a Kibana instance to configure itself for communication + with a secured Elasticsearch cluster. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/enroll/kibana" @@ -1182,9 +1191,10 @@ async def enroll_node( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows a new node to join an existing cluster with security features enabled. + Enroll a node. Enroll a new node to allow it to join an existing cluster with + security features enabled. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/enroll/node" @@ -1231,7 +1241,7 @@ async def get_api_key( privileges (including `manage_security`), this API returns all API keys regardless of ownership. - ``_ + ``_ :param active_only: A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, @@ -1303,10 +1313,10 @@ async def get_builtin_privileges( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the list of cluster privileges and index privileges that are available - in this version of Elasticsearch. + Get builtin privileges. Get the list of cluster privileges and index privileges + that are available in this version of Elasticsearch. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/privilege/_builtin" @@ -1341,9 +1351,9 @@ async def get_privileges( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves application privileges. + Get application privileges. - ``_ + ``_ :param application: Application name :param name: Privilege name @@ -1388,11 +1398,11 @@ async def get_role( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The role management APIs are generally the preferred way to manage roles, rather - than using file-based role management. The get roles API cannot retrieve roles - that are defined in roles files. + Get roles. Get roles in the native realm. The role management APIs are generally + the preferred way to manage roles, rather than using file-based role management. + The get roles API cannot retrieve roles that are defined in roles files. - ``_ + ``_ :param name: The name of the role. You can specify multiple roles as a comma-separated list. If you do not specify this parameter, the API returns information about @@ -1435,9 +1445,12 @@ async def get_role_mapping( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves role mappings. + Get role mappings. Role mappings define which roles are assigned to each user. + The role mapping APIs are generally the preferred way to manage role mappings + rather than using role mapping files. The get role mappings API cannot retrieve + role mappings that are defined in role mapping files. - ``_ + ``_ :param name: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does @@ -1483,9 +1496,10 @@ async def get_service_accounts( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - This API returns a list of service accounts that match the provided path parameter(s). + Get service accounts. Get a list of service accounts that match the provided + path parameters. - ``_ + ``_ :param namespace: Name of the namespace. Omit this parameter to retrieve information about all service accounts. If you omit this parameter, you must also omit @@ -1534,9 +1548,9 @@ async def get_service_credentials( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information of all service credentials for a service account. + Get service account credentials. - ``_ + ``_ :param namespace: Name of the namespace. :param service: Name of the service name. @@ -1602,9 +1616,9 @@ async def get_token( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a bearer token for access without requiring basic authentication. + Get a token. Create a bearer token for access without requiring basic authentication. - ``_ + ``_ :param grant_type: :param kerberos_ticket: @@ -1661,9 +1675,9 @@ async def get_user( with_profile_uid: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about users in the native realm and built-in users. + Get users. Get information about users in the native realm and built-in users. - ``_ + ``_ :param username: An identifier for the user. You can specify multiple usernames as a comma-separated list. If you omit this parameter, the API retrieves @@ -1712,9 +1726,9 @@ async def get_user_privileges( username: t.Optional[t.Union[None, str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves security privileges for the logged in user. + Get user privileges. - ``_ + ``_ :param application: The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, @@ -1762,9 +1776,9 @@ async def get_user_profile( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a user's profile using the unique profile ID. + Get a user profile. Get a user's profile using the unique profile ID. - ``_ + ``_ :param uid: A unique identifier for the user profile. :param data: List of filters for the `data` field of the profile document. To @@ -1826,23 +1840,23 @@ async def grant_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates an API key on behalf of another user. This API is similar to Create API - keys, however it creates the API key for a user that is different than the user - that runs the API. The caller must have authentication credentials (either an - access token, or a username and password) for the user on whose behalf the API - key will be created. It is not possible to use this API to create an API key - without that user’s credentials. The user, for whom the authentication credentials - is provided, can optionally "run as" (impersonate) another user. In this case, - the API key will be created on behalf of the impersonated user. This API is intended - be used by applications that need to create and manage API keys for end users, - but cannot guarantee that those users have permission to create API keys on their - own behalf. A successful grant API key API call returns a JSON structure that - contains the API key, its unique id, and its name. If applicable, it also returns - expiration information for the API key in milliseconds. By default, API keys - never expire. You can specify expiration information when you create the API - keys. - - ``_ + Grant an API key. Create an API key on behalf of another user. This API is similar + to the create API keys API, however it creates the API key for a user that is + different than the user that runs the API. The caller must have authentication + credentials (either an access token, or a username and password) for the user + on whose behalf the API key will be created. It is not possible to use this API + to create an API key without that user’s credentials. The user, for whom the + authentication credentials is provided, can optionally "run as" (impersonate) + another user. In this case, the API key will be created on behalf of the impersonated + user. This API is intended be used by applications that need to create and manage + API keys for end users, but cannot guarantee that those users have permission + to create API keys on their own behalf. A successful grant API key API call returns + a JSON structure that contains the API key, its unique id, and its name. If applicable, + it also returns expiration information for the API key in milliseconds. By default, + API keys never expire. You can specify expiration information when you create + the API keys. + + ``_ :param api_key: Defines the API key. :param grant_type: The type of grant. Supported grant types are: `access_token`, @@ -1980,10 +1994,10 @@ async def has_privileges( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Check user privileges. Determines whether the specified user has a specified - list of privileges. + Check user privileges. Determine whether the specified user has a specified list + of privileges. - ``_ + ``_ :param user: Username :param application: @@ -2040,10 +2054,10 @@ async def has_privileges_user_profile( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Determines whether the users associated with the specified profile IDs have all - the requested privileges. + Check user profile privileges. Determine whether the users associated with the + specified user profile IDs have all the requested privileges. - ``_ + ``_ :param privileges: :param uids: A list of profile IDs. The privileges are checked for associated @@ -2100,15 +2114,19 @@ async def invalidate_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Invalidate API keys. Invalidates one or more API keys. The `manage_api_key` privilege - allows deleting any API keys. The `manage_own_api_key` only allows deleting API - keys that are owned by the user. In addition, with the `manage_own_api_key` privilege, - an invalidation request must be issued in one of the three formats: - Set the - parameter `owner=true`. - Or, set both `username` and `realm_name` to match the - user’s identity. - Or, if the request is issued by an API key, i.e. an API key - invalidates itself, specify its ID in the `ids` field. + Invalidate API keys. This API invalidates API keys created by the create API + key or grant API key APIs. Invalidated API keys fail authentication, but they + can still be viewed using the get API key information and query API key information + APIs, for at least the configured retention period, until they are automatically + deleted. The `manage_api_key` privilege allows deleting any API keys. The `manage_own_api_key` + only allows deleting API keys that are owned by the user. In addition, with the + `manage_own_api_key` privilege, an invalidation request must be issued in one + of the three formats: - Set the parameter `owner=true`. - Or, set both `username` + and `realm_name` to match the user’s identity. - Or, if the request is issued + by an API key, that is to say an API key invalidates itself, specify its ID in + the `ids` field. - ``_ + ``_ :param id: :param ids: A list of API key ids. This parameter cannot be used with any of @@ -2177,9 +2195,14 @@ async def invalidate_token( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Invalidates one or more access tokens or refresh tokens. + Invalidate a token. The access tokens returned by the get token API have a finite + period of time for which they are valid. After that time period, they can no + longer be used. The time period is defined by the `xpack.security.authc.token.timeout` + setting. The refresh tokens returned by the get token API are only valid for + 24 hours. They can also be used exactly once. If you want to invalidate one or + more access or refresh tokens immediately, use this invalidate token API. - ``_ + ``_ :param realm_name: :param refresh_token: @@ -2237,9 +2260,9 @@ async def put_privileges( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds or updates application privileges. + Create or update application privileges. - ``_ + ``_ :param privileges: :param refresh: If `true` (the default) then refresh the affected shards to make @@ -2285,6 +2308,7 @@ async def put_privileges( "global_", "indices", "metadata", + "remote_indices", "run_as", "transient_metadata", ), @@ -2373,16 +2397,18 @@ async def put_role( refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, + remote_indices: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, run_as: t.Optional[t.Sequence[str]] = None, transient_metadata: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - The role management APIs are generally the preferred way to manage roles, rather - than using file-based role management. The create or update roles API cannot - update roles that are defined in roles files. + Create or update roles. The role management APIs are generally the preferred + way to manage roles in the native realm, rather than using file-based role management. + The create or update roles API cannot update roles that are defined in roles + files. File-based role management is not available in Elastic Serverless. - ``_ + ``_ :param name: The name of the role. :param applications: A list of application privilege entries. @@ -2398,6 +2424,7 @@ async def put_role( :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + :param remote_indices: A list of remote indices permissions entries. :param run_as: A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will @@ -2438,6 +2465,8 @@ async def put_role( __body["indices"] = indices if metadata is not None: __body["metadata"] = metadata + if remote_indices is not None: + __body["remote_indices"] = remote_indices if run_as is not None: __body["run_as"] = run_as if transient_metadata is not None: @@ -2483,9 +2512,16 @@ async def put_role_mapping( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates and updates role mappings. + Create or update role mappings. Role mappings define which roles are assigned + to each user. Each mapping has rules that identify users and a list of roles + that are granted to those users. The role mapping APIs are generally the preferred + way to manage role mappings rather than using role mapping files. The create + or update role mappings API cannot update role mappings that are defined in role + mapping files. This API does not create roles. Rather, it maps users to existing + roles. Roles can be created by using the create or update roles API or roles + files. - ``_ + ``_ :param name: Role-mapping name :param enabled: @@ -2570,10 +2606,11 @@ async def put_user( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds and updates users in the native realm. These users are commonly referred - to as native users. + Create or update users. A password is required for adding a new user but is optional + when updating an existing user. To change a user’s password without updating + any other fields, use the change password API. - ``_ + ``_ :param username: The username of the User :param email: @@ -2668,10 +2705,10 @@ async def query_api_keys( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Query API keys. Retrieves a paginated list of API keys and their information. + Find API keys with a query. Get a paginated list of API keys and their information. You can optionally filter the results with a query. - ``_ + ``_ :param aggregations: Any aggregations to run over the corpus of returned API keys. Aggregations and queries work together. Aggregations are computed only @@ -2795,10 +2832,10 @@ async def query_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves roles in a paginated manner. You can optionally filter the results - with a query. + Find roles with a query. Get roles in a paginated manner. You can optionally + filter the results with a query. - ``_ + ``_ :param from_: Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more @@ -2881,10 +2918,10 @@ async def query_user( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information for Users in a paginated manner. You can optionally filter - the results with a query. + Find users with a query. Get information for users in a paginated manner. You + can optionally filter the results with a query. - ``_ + ``_ :param from_: Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more @@ -2960,9 +2997,9 @@ async def saml_authenticate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Submits a SAML Response message to Elasticsearch for consumption. + Authenticate SAML. Submits a SAML response message to Elasticsearch for consumption. - ``_ + ``_ :param content: The SAML response as it was sent by the user’s browser, usually a Base64 encoded XML document. @@ -3022,9 +3059,9 @@ async def saml_complete_logout( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Verifies the logout response sent from the SAML IdP. + Logout of SAML completely. Verifies the logout response sent from the SAML IdP. - ``_ + ``_ :param ids: A json array with all the valid SAML Request Ids that the caller of the API has for the current user. @@ -3088,9 +3125,9 @@ async def saml_invalidate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Submits a SAML LogoutRequest message to Elasticsearch for consumption. + Invalidate SAML. Submits a SAML LogoutRequest message to Elasticsearch for consumption. - ``_ + ``_ :param query_string: The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. This query should include @@ -3155,9 +3192,9 @@ async def saml_logout( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Submits a request to invalidate an access token and refresh token. + Logout of SAML. Submits a request to invalidate an access token and refresh token. - ``_ + ``_ :param token: The access token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent token that was received @@ -3212,10 +3249,10 @@ async def saml_prepare_authentication( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a SAML authentication request () as a URL string, based - on the configuration of the respective SAML realm in Elasticsearch. + Prepare SAML authentication. Creates a SAML authentication request (``) + as a URL string, based on the configuration of the respective SAML realm in Elasticsearch. - ``_ + ``_ :param acs: The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. The realm is used to generate the authentication @@ -3268,9 +3305,10 @@ async def saml_service_provider_metadata( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Generate SAML metadata for a SAML 2.0 Service Provider. + Create SAML service provider metadata. Generate SAML metadata for a SAML 2.0 + Service Provider. - ``_ + ``_ :param realm_name: The name of the SAML realm in Elasticsearch. """ @@ -3314,9 +3352,10 @@ async def suggest_user_profiles( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get suggestions for user profiles that match specified search criteria. + Suggest a user profile. Get suggestions for user profiles that match specified + search criteria. - ``_ + ``_ :param data: List of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content use `data=` @@ -3400,7 +3439,7 @@ async def update_api_key( not possible to use an API key as the authentication credential for this API. To update an API key, the owner user’s credentials are required. - ``_ + ``_ :param id: The ID of the API key to update. :param expiration: Expiration time for the API key. @@ -3473,10 +3512,10 @@ async def update_user_profile_data( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates specific data for the user profile that's associated with the specified - unique ID. + Update user profile data. Update specific data for the user profile that is associated + with a unique ID. - ``_ + ``_ :param uid: A unique identifier for the user profile. :param data: Non-searchable data that you want to associate with the user profile. diff --git a/elasticsearch/_async/client/slm.py b/elasticsearch/_async/client/slm.py index 831774cc2..bbf64654b 100644 --- a/elasticsearch/_async/client/slm.py +++ b/elasticsearch/_async/client/slm.py @@ -38,7 +38,7 @@ async def delete_lifecycle( """ Deletes an existing snapshot lifecycle policy. - ``_ + ``_ :param policy_id: The id of the snapshot lifecycle policy to remove """ @@ -79,7 +79,7 @@ async def execute_lifecycle( Immediately creates a snapshot according to the lifecycle policy, without waiting for the scheduled time. - ``_ + ``_ :param policy_id: The id of the snapshot lifecycle policy to be executed """ @@ -118,7 +118,7 @@ async def execute_retention( """ Deletes any snapshots that are expired according to the policy's retention rules. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/_execute_retention" @@ -155,7 +155,7 @@ async def get_lifecycle( Retrieves one or more snapshot lifecycle policy definitions and information about the latest snapshot attempts. - ``_ + ``_ :param policy_id: Comma-separated list of snapshot lifecycle policies to retrieve """ @@ -198,7 +198,7 @@ async def get_stats( Returns global and policy-level statistics about actions taken by snapshot lifecycle management. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/stats" @@ -233,7 +233,7 @@ async def get_status( """ Retrieves the status of snapshot lifecycle management (SLM). - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/status" @@ -279,7 +279,7 @@ async def put_lifecycle( """ Creates or updates a snapshot lifecycle policy. - ``_ + ``_ :param policy_id: ID for the snapshot lifecycle policy you want to create or update. @@ -356,7 +356,7 @@ async def start( """ Turns on snapshot lifecycle management (SLM). - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/start" @@ -391,7 +391,7 @@ async def stop( """ Turns off snapshot lifecycle management (SLM). - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/stop" diff --git a/elasticsearch/_async/client/snapshot.py b/elasticsearch/_async/client/snapshot.py index 6f2d6bcbf..2223ca978 100644 --- a/elasticsearch/_async/client/snapshot.py +++ b/elasticsearch/_async/client/snapshot.py @@ -41,7 +41,7 @@ async def cleanup_repository( Triggers the review of a snapshot repository’s contents and deletes any stale data not referenced by existing snapshots. - ``_ + ``_ :param name: Snapshot repository to clean up. :param master_timeout: Period to wait for a connection to the master node. @@ -95,7 +95,7 @@ async def clone( """ Clones indices from one snapshot into another snapshot in the same repository. - ``_ + ``_ :param repository: A repository name :param snapshot: The name of the snapshot to clone from @@ -178,7 +178,7 @@ async def create( """ Creates a snapshot in a repository. - ``_ + ``_ :param repository: Repository for the snapshot. :param snapshot: Name of the snapshot. Must be unique in the repository. @@ -282,7 +282,7 @@ async def create_repository( """ Creates a repository. - ``_ + ``_ :param name: A repository name :param repository: @@ -342,7 +342,7 @@ async def delete( """ Deletes one or more snapshots. - ``_ + ``_ :param repository: A repository name :param snapshot: A comma-separated list of snapshot names @@ -393,7 +393,7 @@ async def delete_repository( """ Deletes a repository. - ``_ + ``_ :param name: Name of the snapshot repository to unregister. Wildcard (`*`) patterns are supported. @@ -467,7 +467,7 @@ async def get( """ Returns information about a snapshot. - ``_ + ``_ :param repository: Comma-separated list of snapshot repository names used to limit the request. Wildcard (*) expressions are supported. @@ -579,7 +579,7 @@ async def get_repository( """ Returns information about a repository. - ``_ + ``_ :param name: A comma-separated list of repository names :param local: Return local information, do not retrieve the state from master @@ -616,6 +616,84 @@ async def get_repository( path_parts=__path_parts, ) + @_rewrite_parameters() + async def repository_verify_integrity( + self, + *, + name: t.Union[str, t.Sequence[str]], + blob_thread_pool_concurrency: t.Optional[int] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + index_snapshot_verification_concurrency: t.Optional[int] = None, + index_verification_concurrency: t.Optional[int] = None, + max_bytes_per_sec: t.Optional[str] = None, + max_failed_shard_snapshots: t.Optional[int] = None, + meta_thread_pool_concurrency: t.Optional[int] = None, + pretty: t.Optional[bool] = None, + snapshot_verification_concurrency: t.Optional[int] = None, + verify_blob_contents: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Verifies the integrity of the contents of a snapshot repository + + ``_ + + :param name: A repository name + :param blob_thread_pool_concurrency: Number of threads to use for reading blob + contents + :param index_snapshot_verification_concurrency: Number of snapshots to verify + concurrently within each index + :param index_verification_concurrency: Number of indices to verify concurrently + :param max_bytes_per_sec: Rate limit for individual blob verification + :param max_failed_shard_snapshots: Maximum permitted number of failed shard snapshots + :param meta_thread_pool_concurrency: Number of threads to use for reading metadata + :param snapshot_verification_concurrency: Number of snapshots to verify concurrently + :param verify_blob_contents: Whether to verify the contents of individual blobs + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'name'") + __path_parts: t.Dict[str, str] = {"repository": _quote(name)} + __path = f'/_snapshot/{__path_parts["repository"]}/_verify_integrity' + __query: t.Dict[str, t.Any] = {} + if blob_thread_pool_concurrency is not None: + __query["blob_thread_pool_concurrency"] = blob_thread_pool_concurrency + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if index_snapshot_verification_concurrency is not None: + __query["index_snapshot_verification_concurrency"] = ( + index_snapshot_verification_concurrency + ) + if index_verification_concurrency is not None: + __query["index_verification_concurrency"] = index_verification_concurrency + if max_bytes_per_sec is not None: + __query["max_bytes_per_sec"] = max_bytes_per_sec + if max_failed_shard_snapshots is not None: + __query["max_failed_shard_snapshots"] = max_failed_shard_snapshots + if meta_thread_pool_concurrency is not None: + __query["meta_thread_pool_concurrency"] = meta_thread_pool_concurrency + if pretty is not None: + __query["pretty"] = pretty + if snapshot_verification_concurrency is not None: + __query["snapshot_verification_concurrency"] = ( + snapshot_verification_concurrency + ) + if verify_blob_contents is not None: + __query["verify_blob_contents"] = verify_blob_contents + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + endpoint_id="snapshot.repository_verify_integrity", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=( "feature_states", @@ -656,7 +734,7 @@ async def restore( """ Restores a snapshot. - ``_ + ``_ :param repository: A repository name :param snapshot: A snapshot name @@ -749,7 +827,7 @@ async def status( """ Returns information about the status of a snapshot. - ``_ + ``_ :param repository: A repository name :param snapshot: A comma-separated list of snapshot names @@ -808,7 +886,7 @@ async def verify_repository( """ Verifies a repository. - ``_ + ``_ :param name: A repository name :param master_timeout: Explicit operation timeout for connection to master node diff --git a/elasticsearch/_async/client/sql.py b/elasticsearch/_async/client/sql.py index c089ed495..c4b2f4335 100644 --- a/elasticsearch/_async/client/sql.py +++ b/elasticsearch/_async/client/sql.py @@ -41,7 +41,7 @@ async def clear_cursor( """ Clears the SQL cursor - ``_ + ``_ :param cursor: Cursor to clear. """ @@ -87,7 +87,7 @@ async def delete_async( Deletes an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. - ``_ + ``_ :param id: Identifier for the search. """ @@ -134,7 +134,7 @@ async def get_async( Returns the current status and available results for an async SQL search or stored synchronous SQL search - ``_ + ``_ :param id: Identifier for the search. :param delimiter: Separator for CSV results. The API only supports this parameter @@ -192,7 +192,7 @@ async def get_async_status( Returns the current status of an async SQL search or a stored synchronous SQL search - ``_ + ``_ :param id: Identifier for the search. """ @@ -251,7 +251,11 @@ async def query( field_multi_value_leniency: t.Optional[bool] = None, filter: t.Optional[t.Mapping[str, t.Any]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, - format: t.Optional[str] = None, + format: t.Optional[ + t.Union[ + str, t.Literal["cbor", "csv", "json", "smile", "tsv", "txt", "yaml"] + ] + ] = None, human: t.Optional[bool] = None, index_using_frozen: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, @@ -271,7 +275,7 @@ async def query( """ Executes a SQL request - ``_ + ``_ :param catalog: Default catalog (cluster) for queries. If unspecified, the queries execute on the data in the local cluster only. @@ -381,7 +385,7 @@ async def translate( """ Translates SQL into Elasticsearch queries - ``_ + ``_ :param query: SQL query to run. :param fetch_size: The maximum number of rows (or entries) to return in one response. diff --git a/elasticsearch/_async/client/ssl.py b/elasticsearch/_async/client/ssl.py index 5b9397893..75f423927 100644 --- a/elasticsearch/_async/client/ssl.py +++ b/elasticsearch/_async/client/ssl.py @@ -35,10 +35,25 @@ async def certificates( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the X.509 certificates used to encrypt communications - in the cluster. + Get SSL certificates. Get information about the X.509 certificates that are used + to encrypt communications in the cluster. The API returns a list that includes + certificates from all TLS contexts including: - Settings for transport and HTTP + interfaces - TLS settings that are used within authentication realms - TLS settings + for remote monitoring exporters The list includes certificates that are used + for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` + and `xpack.security.transport.ssl.certificate_authorities` settings. It also + includes certificates that are used for configuring server identity, such as + `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`. + The list does not include certificates that are sourced from the default SSL + context of the Java Runtime Environment (JRE), even if those certificates are + in use within Elasticsearch. NOTE: When a PKCS#11 token is configured as the + truststore of the JRE, the API returns all the certificates that are included + in the PKCS#11 token irrespective of whether these are used in the Elasticsearch + TLS configuration. If Elasticsearch is configured to use a keystore or truststore, + the API output includes all certificates in that store, even though some of the + certificates might not be in active use within the cluster. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ssl/certificates" diff --git a/elasticsearch/_async/client/synonyms.py b/elasticsearch/_async/client/synonyms.py index 153c552af..fac176a30 100644 --- a/elasticsearch/_async/client/synonyms.py +++ b/elasticsearch/_async/client/synonyms.py @@ -38,7 +38,7 @@ async def delete_synonym( """ Deletes a synonym set - ``_ + ``_ :param id: The id of the synonyms set to be deleted """ @@ -79,7 +79,7 @@ async def delete_synonym_rule( """ Deletes a synonym rule in a synonym set - ``_ + ``_ :param set_id: The id of the synonym set to be updated :param rule_id: The id of the synonym rule to be deleted @@ -129,7 +129,7 @@ async def get_synonym( """ Retrieves a synonym set - ``_ + ``_ :param id: "The id of the synonyms set to be retrieved :param from_: Starting offset for query rules to be retrieved @@ -176,7 +176,7 @@ async def get_synonym_rule( """ Retrieves a synonym rule from a synonym set - ``_ + ``_ :param set_id: The id of the synonym set to retrieve the synonym rule from :param rule_id: The id of the synonym rule to retrieve @@ -225,7 +225,7 @@ async def get_synonyms_sets( """ Retrieves a summary of all defined synonym sets - ``_ + ``_ :param from_: Starting offset :param size: specifies a max number of results to get @@ -274,7 +274,7 @@ async def put_synonym( """ Creates or updates a synonym set. - ``_ + ``_ :param id: The id of the synonyms set to be created or updated :param synonyms_set: The synonym set information to update @@ -327,7 +327,7 @@ async def put_synonym_rule( """ Creates or updates a synonym rule in a synonym set - ``_ + ``_ :param set_id: The id of the synonym set to be updated with the synonym rule :param rule_id: The id of the synonym rule to be updated or created diff --git a/elasticsearch/_async/client/tasks.py b/elasticsearch/_async/client/tasks.py index 0acbfef31..fcfe84185 100644 --- a/elasticsearch/_async/client/tasks.py +++ b/elasticsearch/_async/client/tasks.py @@ -42,7 +42,7 @@ async def cancel( """ Cancels a task, if it can be cancelled through an API. - ``_ + ``_ :param task_id: ID of the task. :param actions: Comma-separated list or wildcard expression of actions used to @@ -102,7 +102,7 @@ async def get( Get task information. Returns information about the tasks currently executing in the cluster. - ``_ + ``_ :param task_id: ID of the task. :param timeout: Period to wait for a response. If no response is received before @@ -160,7 +160,7 @@ async def list( The task management API returns information about tasks currently executing on one or more nodes in the cluster. - ``_ + ``_ :param actions: Comma-separated list or wildcard expression of actions used to limit the request. diff --git a/elasticsearch/_async/client/text_structure.py b/elasticsearch/_async/client/text_structure.py index 8da71dafb..406592946 100644 --- a/elasticsearch/_async/client/text_structure.py +++ b/elasticsearch/_async/client/text_structure.py @@ -53,7 +53,7 @@ async def find_structure( Finds the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. - ``_ + ``_ :param text_files: :param charset: The text’s character set. It must be a character set that is @@ -193,7 +193,7 @@ async def test_grok_pattern( """ Tests a Grok pattern on some text. - ``_ + ``_ :param grok_pattern: Grok pattern to run on the text. :param text: Lines of text to run the Grok pattern on. diff --git a/elasticsearch/_async/client/transform.py b/elasticsearch/_async/client/transform.py index e1fa776c7..fb12e6a04 100644 --- a/elasticsearch/_async/client/transform.py +++ b/elasticsearch/_async/client/transform.py @@ -41,7 +41,7 @@ async def delete_transform( """ Delete a transform. Deletes a transform. - ``_ + ``_ :param transform_id: Identifier for the transform. :param delete_dest_index: If this value is true, the destination index is deleted @@ -101,7 +101,7 @@ async def get_transform( """ Get transforms. Retrieves configuration information for transforms. - ``_ + ``_ :param transform_id: Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using @@ -170,7 +170,7 @@ async def get_transform_stats( """ Get transform stats. Retrieves usage information for transforms. - ``_ + ``_ :param transform_id: Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using @@ -256,7 +256,7 @@ async def preview_transform( These values are determined based on the field types of the source index and the transform aggregations. - ``_ + ``_ :param transform_id: Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform configuration details in @@ -393,7 +393,7 @@ async def put_transform( If you used transforms prior to 7.5, also do not give users any privileges on `.data-frame-internal*` indices. - ``_ + ``_ :param transform_id: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -495,7 +495,7 @@ async def reset_transform( it; alternatively, use the `force` query parameter. If the destination index was created by the transform, it is deleted. - ``_ + ``_ :param transform_id: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -547,7 +547,7 @@ async def schedule_now_transform( the transform will be processed again at now + frequency unless _schedule_now API is called again in the meantime. - ``_ + ``_ :param transform_id: Identifier for the transform. :param timeout: Controls the time to wait for the scheduling to take place @@ -611,7 +611,7 @@ async def start_transform( privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. - ``_ + ``_ :param transform_id: Identifier for the transform. :param from_: Restricts the set of transformed entities to those changed after @@ -665,7 +665,7 @@ async def stop_transform( """ Stop transforms. Stops one or more transforms. - ``_ + ``_ :param transform_id: Identifier for the transform. To stop multiple transforms, use a comma-separated list or a wildcard expression. To stop all transforms, @@ -765,7 +765,7 @@ async def update_transform( which roles the user who updated it had at the time of update and runs with those privileges. - ``_ + ``_ :param transform_id: Identifier for the transform. :param defer_validation: When true, deferrable validations are not run. This @@ -852,7 +852,7 @@ async def upgrade_transforms( the role used to read source data and write to the destination index remains unchanged. - ``_ + ``_ :param dry_run: When true, the request checks for updates but does not run them. :param timeout: Period to wait for a response. If no response is received before diff --git a/elasticsearch/_async/client/watcher.py b/elasticsearch/_async/client/watcher.py index 387c90355..7b63b0cac 100644 --- a/elasticsearch/_async/client/watcher.py +++ b/elasticsearch/_async/client/watcher.py @@ -39,7 +39,7 @@ async def ack_watch( """ Acknowledges a watch, manually throttling the execution of the watch's actions. - ``_ + ``_ :param watch_id: Watch ID :param action_id: A comma-separated list of the action ids to be acked @@ -90,7 +90,7 @@ async def activate_watch( """ Activates a currently inactive watch. - ``_ + ``_ :param watch_id: Watch ID """ @@ -130,7 +130,7 @@ async def deactivate_watch( """ Deactivates a currently active watch. - ``_ + ``_ :param watch_id: Watch ID """ @@ -170,7 +170,7 @@ async def delete_watch( """ Removes a watch from Watcher. - ``_ + ``_ :param id: Watch ID """ @@ -245,7 +245,7 @@ async def execute_watch( and control whether a watch record would be written to the watch history after execution. - ``_ + ``_ :param id: Identifier for the watch. :param action_modes: Determines how to handle the watch actions as part of the @@ -328,7 +328,7 @@ async def get_watch( """ Retrieves a watch by its ID. - ``_ + ``_ :param id: Watch ID """ @@ -390,7 +390,7 @@ async def put_watch( """ Creates a new watch, or updates an existing one. - ``_ + ``_ :param id: Watch ID :param actions: @@ -487,7 +487,7 @@ async def query_watches( """ Retrieves stored watches. - ``_ + ``_ :param from_: The offset from the first result to fetch. Needs to be non-negative. :param query: Optional, query filter watches to be returned. @@ -557,7 +557,7 @@ async def start( """ Starts Watcher if it is not already running. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_watcher/_start" @@ -614,7 +614,7 @@ async def stats( """ Retrieves the current Watcher metrics. - ``_ + ``_ :param metric: Defines which additional metrics are included in the response. :param emit_stacktraces: Defines whether stack traces are generated for each @@ -660,7 +660,7 @@ async def stop( """ Stops Watcher if it is running. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_watcher/_stop" diff --git a/elasticsearch/_async/client/xpack.py b/elasticsearch/_async/client/xpack.py index a0c5042fe..08ca0f37d 100644 --- a/elasticsearch/_async/client/xpack.py +++ b/elasticsearch/_async/client/xpack.py @@ -34,7 +34,9 @@ async def info( self, *, accept_enterprise: t.Optional[bool] = None, - categories: t.Optional[t.Sequence[str]] = None, + categories: t.Optional[ + t.Sequence[t.Union[str, t.Literal["build", "features", "license"]]] + ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, @@ -43,7 +45,7 @@ async def info( """ Provides general information about the installed X-Pack features. - ``_ + ``_ :param accept_enterprise: If this param is used it must be set to true :param categories: A comma-separated list of the information categories to include @@ -88,7 +90,7 @@ async def usage( This API provides information about which features are currently enabled and available under the current license and some usage statistics. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index 24c88d49a..4f1498e66 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -642,7 +642,7 @@ def bulk( in a single API call. This reduces overhead and can greatly increase indexing speed. - ``_ + ``_ :param operations: :param index: Name of the data stream, index, or index alias to perform bulk @@ -741,7 +741,7 @@ def clear_scroll( """ Clears the search context and results for a scrolling search. - ``_ + ``_ :param scroll_id: Scroll IDs to clear. To clear all scroll IDs, use `_all`. """ @@ -791,7 +791,7 @@ def close_point_in_time( """ Closes a point-in-time. - ``_ + ``_ :param id: The ID of the point-in-time. """ @@ -865,7 +865,7 @@ def count( """ Returns number of documents matching a query. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams and indices, omit this @@ -1000,7 +1000,7 @@ def create( and makes it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. - ``_ + ``_ :param index: Name of the data stream or index to target. If the target doesn’t exist and matches the name or wildcard (`*`) pattern of an index template @@ -1104,7 +1104,7 @@ def delete( """ Delete a document. Removes a JSON document from the specified index. - ``_ + ``_ :param index: Name of the target index. :param id: Unique identifier for the document. @@ -1226,7 +1226,7 @@ def delete_by_query( """ Delete documents. Deletes documents that match the specified query. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this @@ -1402,9 +1402,12 @@ def delete_by_query_rethrottle( requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ - Changes the number of requests per second for a particular Delete By Query operation. + Throttle a delete by query operation. Change the number of requests per second + for a particular delete by query operation. Rethrottling that speeds up the query + takes effect immediately but rethrotting that slows down the query takes effect + after completing the current batch to prevent scroll timeouts. - ``_ + ``_ :param task_id: The ID for the task. :param requests_per_second: The throttle for this request in sub-requests per @@ -1450,7 +1453,7 @@ def delete_script( """ Delete a script or search template. Deletes a stored script or search template. - ``_ + ``_ :param id: Identifier for the stored script or search template. :param master_timeout: Period to wait for a connection to the master node. If @@ -1518,7 +1521,7 @@ def exists( """ Check a document. Checks if a specified document exists. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases. Supports wildcards (`*`). @@ -1619,7 +1622,7 @@ def exists_source( """ Check for a document source. Checks if a document's `_source` is stored. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases. Supports wildcards (`*`). @@ -1720,7 +1723,7 @@ def explain( Explain a document match result. Returns information about why a specific document matches, or doesn’t match, a query. - ``_ + ``_ :param index: Index names used to limit the request. Only a single index name can be provided to this parameter. @@ -1842,7 +1845,7 @@ def field_caps( like any other field. For example, a runtime field with a type of keyword is returned as any other field that belongs to the `keyword` family. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams @@ -1959,7 +1962,7 @@ def get( Get a document by its ID. Retrieves the document with the specified ID from an index. - ``_ + ``_ :param index: Name of the index that contains the document. :param id: Unique identifier of the document. @@ -2048,7 +2051,7 @@ def get_script( """ Get a script or search template. Retrieves a stored script or search template. - ``_ + ``_ :param id: Identifier for the stored script or search template. :param master_timeout: Specify timeout for connection to master @@ -2088,9 +2091,9 @@ def get_script_context( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns all script contexts. + Get script contexts. Get a list of supported script contexts and their methods. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_script_context" @@ -2123,9 +2126,9 @@ def get_script_languages( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns available script types, languages and contexts + Get script languages. Get a list of available script types, languages, and contexts. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_script_language" @@ -2180,7 +2183,7 @@ def get_source( """ Get a document's source. Returns the source of a document. - ``_ + ``_ :param index: Name of the index that contains the document. :param id: Unique identifier of the document. @@ -2263,7 +2266,7 @@ def health_report( """ Returns the health of the cluster. - ``_ + ``_ :param feature: A feature of the cluster, as returned by the top-level health report API. @@ -2340,7 +2343,7 @@ def index( and makes it searchable. If the target is an index and the document already exists, the request updates the document and increments its version. - ``_ + ``_ :param index: Name of the data stream or index to target. :param document: @@ -2449,7 +2452,7 @@ def info( """ Get cluster info. Returns basic information about the cluster. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/" @@ -2505,7 +2508,7 @@ def knn_search( """ Performs a kNN search. - ``_ + ``_ :param index: A comma-separated list of index names to search; use `_all` or to perform the operation on all indices @@ -2604,9 +2607,12 @@ def mget( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to get multiple documents in one request. + Get multiple documents. Get multiple JSON documents by ID from one or more indices. + If you specify an index in the request URI, you only need to specify the document + IDs in the request body. To ensure fast responses, this multi get (mget) API + responds with partial results if one or more shards fail. - ``_ + ``_ :param index: Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. @@ -2725,9 +2731,15 @@ def msearch( typed_keys: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to execute several search operations in one request. + Run multiple searches. The format of the request is similar to the bulk API format + and makes use of the newline delimited JSON (NDJSON) format. The structure is + as follows: ``` header\\n body\\n header\\n body\\n ``` This structure is specifically + optimized to reduce parsing if a specific search ends up redirected to another + node. IMPORTANT: The final line of data must end with a newline character `\\n`. + Each newline character may be preceded by a carriage return `\\r`. When sending + requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. - ``_ + ``_ :param searches: :param index: Comma-separated list of data streams, indices, and index aliases @@ -2859,7 +2871,7 @@ def msearch_template( """ Runs multiple templated searches with a single request. - ``_ + ``_ :param search_templates: :param index: Comma-separated list of data streams, indices, and aliases to search. @@ -2952,9 +2964,13 @@ def mtermvectors( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns multiple termvectors in one request. + Get multiple term vectors. You can specify existing documents by index and ID + or provide artificial documents in the body of the request. You can specify the + index in the request body or request URI. The response contains a `docs` array + with all the fetched termvectors. Each element has the structure provided by + the termvectors API. - ``_ + ``_ :param index: Name of the index that contains the documents. :param docs: Array of existing or artificial documents. @@ -3071,7 +3087,7 @@ def open_point_in_time( then the results of those requests might not be consistent as changes happening between searches are only visible to the more recent point in time. - ``_ + ``_ :param index: A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices @@ -3153,7 +3169,7 @@ def put_script( Create or update a script or search template. Creates or updates a stored script or search template. - ``_ + ``_ :param id: Identifier for the stored script or search template. Must be unique within the cluster. @@ -3239,7 +3255,7 @@ def rank_eval( Enables you to evaluate the quality of ranked search results over a set of typical search queries. - ``_ + ``_ :param requests: A set of typical search requests, together with their provided ratings. @@ -3335,7 +3351,7 @@ def reindex( can be any existing index, alias, or data stream. The destination must differ from the source. For example, you cannot reindex a data stream into itself. - ``_ + ``_ :param dest: The destination you are copying to. :param source: The source you are copying from. @@ -3429,9 +3445,10 @@ def reindex_rethrottle( requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ - Copies documents from a source to a destination. + Throttle a reindex operation. Change the number of requests per second for a + particular reindex operation. - ``_ + ``_ :param task_id: Identifier for the task. :param requests_per_second: The throttle for this request in sub-requests per @@ -3482,7 +3499,7 @@ def render_search_template( """ Renders a search template as a search request body. - ``_ + ``_ :param id: ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. @@ -3550,7 +3567,7 @@ def scripts_painless_execute( """ Run a script. Runs a script and returns a result. - ``_ + ``_ :param context: The context that the script should run in. :param context_setup: Additional parameters for the `context`. @@ -3606,9 +3623,24 @@ def scroll( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to retrieve a large numbers of results from a single search request. - - ``_ + Run a scrolling search. IMPORTANT: The scroll API is no longer recommend for + deep pagination. If you need to preserve the index state while paging through + more than 10,000 hits, use the `search_after` parameter with a point in time + (PIT). The scroll API gets large sets of results from a single scrolling search + request. To get the necessary scroll ID, submit a search API request that includes + an argument for the `scroll` query parameter. The `scroll` parameter indicates + how long Elasticsearch should retain the search context for the request. The + search response returns a scroll ID in the `_scroll_id` response body parameter. + You can then use the scroll ID with the scroll API to retrieve the next batch + of results for the request. If the Elasticsearch security features are enabled, + the access to the results of a specific scroll ID is restricted to the user or + API key that submitted the search. You can also use the scroll API to specify + a new scroll parameter that extends or shortens the retention period for the + search context. IMPORTANT: Results from a scrolling search reflect the state + of the index at the time of the initial search request. Subsequent indexing or + document changes only affect later search and scroll requests. + + ``_ :param scroll_id: Scroll ID of the search. :param rest_total_hits_as_int: If true, the API response’s hit.total property @@ -3800,7 +3832,7 @@ def search( search queries using the `q` query string parameter or the request body. If both are specified, only the query parameter is used. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams and indices, omit this @@ -4230,7 +4262,7 @@ def search_mvt( """ Search a vector tile. Searches a vector tile for geospatial values. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, or aliases to search :param field: Field containing geospatial data to return @@ -4385,7 +4417,7 @@ def search_shards( Returns information about the indices and shards that a search request would be executed against. - ``_ + ``_ :param index: Returns the indices and shards that a search request would be executed against. @@ -4486,7 +4518,7 @@ def search_template( """ Runs a search with a search template. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (*). @@ -4616,11 +4648,17 @@ def terms_enum( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - The terms enum API can be used to discover terms in the index that begin with - the provided string. It is designed for low-latency look-ups used in auto-complete - scenarios. - - ``_ + Get terms in an index. Discover terms that match a partial string in an index. + This "terms enum" API is designed for low-latency look-ups used in auto-complete + scenarios. If the `complete` property in the response is false, the returned + terms set may be incomplete and should be treated as approximate. This can occur + due to a few reasons, such as a request timeout or a node error. NOTE: The terms + enum API may return terms from deleted documents. Deleted documents are initially + only marked as deleted. It is not until their segments are merged that documents + are actually deleted. Until that happens, the terms enum API will return terms + from these documents. + + ``_ :param index: Comma-separated list of data streams, indices, and index aliases to search. Wildcard (*) expressions are supported. @@ -4719,7 +4757,7 @@ def termvectors( Get term vector information. Returns information and statistics about terms in the fields of a particular document. - ``_ + ``_ :param index: Name of the index that contains the document. :param id: Unique identifier of the document. @@ -4862,7 +4900,7 @@ def update( Update a document. Updates a document by running a script or passing a partial document. - ``_ + ``_ :param index: The name of the index :param id: Document ID @@ -5028,7 +5066,7 @@ def update_by_query( is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this @@ -5222,9 +5260,12 @@ def update_by_query_rethrottle( requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ - Changes the number of requests per second for a particular Update By Query operation. + Throttle an update by query operation. Change the number of requests per second + for a particular update by query operation. Rethrottling that speeds up the query + takes effect immediately but rethrotting that slows down the query takes effect + after completing the current batch to prevent scroll timeouts. - ``_ + ``_ :param task_id: The ID for the task. :param requests_per_second: The throttle for this request in sub-requests per diff --git a/elasticsearch/_sync/client/async_search.py b/elasticsearch/_sync/client/async_search.py index 8fbf3188d..b0b2bdaf9 100644 --- a/elasticsearch/_sync/client/async_search.py +++ b/elasticsearch/_sync/client/async_search.py @@ -36,13 +36,13 @@ def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an async search by identifier. If the search is still running, the search - request will be cancelled. Otherwise, the saved search results are deleted. If - the Elasticsearch security features are enabled, the deletion of a specific async - search is restricted to: the authenticated user that submitted the original search - request; users that have the `cancel_task` cluster privilege. + Delete an async search. If the asynchronous search is still running, it is cancelled. + Otherwise, the saved search results are deleted. If the Elasticsearch security + features are enabled, the deletion of a specific async search is restricted to: + the authenticated user that submitted the original search request; users that + have the `cancel_task` cluster privilege. - ``_ + ``_ :param id: A unique identifier for the async search. """ @@ -85,12 +85,12 @@ def get( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the results of a previously submitted async search request given its - identifier. If the Elasticsearch security features are enabled, access to the - results of a specific async search is restricted to the user or API key that + Get async search results. Retrieve the results of a previously submitted asynchronous + search request. If the Elasticsearch security features are enabled, access to + the results of a specific async search is restricted to the user or API key that submitted it. - ``_ + ``_ :param id: A unique identifier for the async search. :param keep_alive: Specifies how long the async search should be available in @@ -148,12 +148,12 @@ def status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get async search status Retrieves the status of a previously submitted async + Get async search status. Retrieve the status of a previously submitted async search request given its identifier, without retrieving search results. If the Elasticsearch security features are enabled, use of this API is restricted to the `monitoring_user` role. - ``_ + ``_ :param id: A unique identifier for the async search. """ @@ -323,17 +323,17 @@ def submit( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Runs a search request asynchronously. When the primary sort of the results is - an indexed field, shards get sorted based on minimum and maximum value that they - hold for that field, hence partial results become available following the sort - criteria that was requested. Warning: Async search does not support scroll nor - search requests that only include the suggest section. By default, Elasticsearch - doesn’t allow you to store an async search response larger than 10Mb and an attempt - to do this results in an error. The maximum allowed size for a stored async search - response can be set by changing the `search.max_async_search_response_size` cluster - level setting. + Run an async search. When the primary sort of the results is an indexed field, + shards get sorted based on minimum and maximum value that they hold for that + field. Partial results become available following the sort criteria that was + requested. Warning: Asynchronous search does not support scroll or search requests + that include only the suggest section. By default, Elasticsearch does not allow + you to store an async search response larger than 10Mb and an attempt to do this + results in an error. The maximum allowed size for a stored async search response + can be set by changing the `search.max_async_search_response_size` cluster level + setting. - ``_ + ``_ :param index: A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices diff --git a/elasticsearch/_sync/client/autoscaling.py b/elasticsearch/_sync/client/autoscaling.py index 2b2a23fc2..a39e1ba35 100644 --- a/elasticsearch/_sync/client/autoscaling.py +++ b/elasticsearch/_sync/client/autoscaling.py @@ -39,7 +39,7 @@ def delete_autoscaling_policy( Deletes an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. - ``_ + ``_ :param name: the name of the autoscaling policy """ @@ -79,7 +79,7 @@ def get_autoscaling_capacity( Gets the current autoscaling capacity based on the configured autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_autoscaling/capacity" @@ -116,7 +116,7 @@ def get_autoscaling_policy( Retrieves an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. - ``_ + ``_ :param name: the name of the autoscaling policy """ @@ -161,7 +161,7 @@ def put_autoscaling_policy( Creates a new autoscaling policy. Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. - ``_ + ``_ :param name: the name of the autoscaling policy :param policy: diff --git a/elasticsearch/_sync/client/cat.py b/elasticsearch/_sync/client/cat.py index 067ededb6..0048c8eb5 100644 --- a/elasticsearch/_sync/client/cat.py +++ b/elasticsearch/_sync/client/cat.py @@ -57,7 +57,7 @@ def aliases( not intended for use by applications. For application consumption, use the aliases API. - ``_ + ``_ :param name: A comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. @@ -145,7 +145,7 @@ def allocation( disk space. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. - ``_ + ``_ :param node_id: Comma-separated list of node identifiers or names used to limit the returned information. @@ -232,7 +232,7 @@ def component_templates( for use by applications. For application consumption, use the get component template API. - ``_ + ``_ :param name: The name of the component template. Accepts wildcard expressions. If omitted, all component templates are returned. @@ -316,7 +316,7 @@ def count( console. They are not intended for use by applications. For application consumption, use the count API. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -402,7 +402,7 @@ def fielddata( using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes stats API. - ``_ + ``_ :param fields: Comma-separated list of fields used to limit returned information. To retrieve all fields, omit this parameter. @@ -497,7 +497,7 @@ def health( across multiple nodes. You also can use the API to track the recovery of a large cluster over a longer period of time. - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -574,7 +574,7 @@ def help( """ Get CAT help. Returns help for the CAT APIs. - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -672,7 +672,7 @@ def indices( using the command line or Kibana console. They are not intended for use by applications. For application consumption, use an index endpoint. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -774,7 +774,7 @@ def master( command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API. - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -949,7 +949,7 @@ def ml_data_frame_analytics( For application consumption, use the get data frame analytics jobs statistics API. - ``_ + ``_ :param id: The ID of the data frame analytics to fetch :param allow_no_match: Whether to ignore if a wildcard expression matches no @@ -1123,7 +1123,7 @@ def ml_datafeeds( using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get datafeed statistics API. - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. @@ -1496,7 +1496,7 @@ def ml_jobs( for use by applications. For application consumption, use the get anomaly detection job statistics API. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param allow_no_match: Specifies what to do when the request: * Contains wildcard @@ -1685,7 +1685,7 @@ def ml_trained_models( console or command line. They are not intended for use by applications. For application consumption, use the get trained models statistics API. - ``_ + ``_ :param model_id: A unique identifier for the trained model. :param allow_no_match: Specifies what to do when the request: contains wildcard @@ -1782,7 +1782,7 @@ def nodeattrs( are not intended for use by applications. For application consumption, use the nodes info API. - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -1861,7 +1861,7 @@ def nodes( are not intended for use by applications. For application consumption, use the nodes info API. - ``_ + ``_ :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set @@ -1946,7 +1946,7 @@ def pending_tasks( console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API. - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -2020,7 +2020,7 @@ def plugins( They are not intended for use by applications. For application consumption, use the nodes info API. - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -2104,7 +2104,7 @@ def recovery( line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API. - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2196,7 +2196,7 @@ def repositories( are not intended for use by applications. For application consumption, use the get snapshot repository API. - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -2275,7 +2275,7 @@ def segments( console. They are not intended for use by applications. For application consumption, use the index segments API. - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2364,7 +2364,7 @@ def shards( for human consumption using the command line or Kibana console. They are not intended for use by applications. - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2452,7 +2452,7 @@ def snapshots( console. They are not intended for use by applications. For application consumption, use the get snapshot API. - ``_ + ``_ :param repository: A comma-separated list of snapshot repositories used to limit the request. Accepts wildcard expressions. `_all` returns all repositories. @@ -2542,7 +2542,7 @@ def tasks( console. They are not intended for use by applications. For application consumption, use the task management API. - ``_ + ``_ :param actions: The task action names, which are used to limit the response. :param detailed: If `true`, the response includes detailed information about @@ -2632,7 +2632,7 @@ def templates( console. They are not intended for use by applications. For application consumption, use the get index template API. - ``_ + ``_ :param name: The name of the template to return. Accepts wildcard expressions. If omitted, all templates are returned. @@ -2718,7 +2718,7 @@ def thread_pool( They are not intended for use by applications. For application consumption, use the nodes info API. - ``_ + ``_ :param thread_pool_patterns: A comma-separated list of thread pool names used to limit the request. Accepts wildcard expressions. @@ -2973,7 +2973,7 @@ def transforms( command line. They are not intended for use by applications. For application consumption, use the get transform statistics API. - ``_ + ``_ :param transform_id: A transform identifier or a wildcard expression. If you do not specify one of these options, the API returns information for all diff --git a/elasticsearch/_sync/client/ccr.py b/elasticsearch/_sync/client/ccr.py index 79683d2a7..f3b54acbd 100644 --- a/elasticsearch/_sync/client/ccr.py +++ b/elasticsearch/_sync/client/ccr.py @@ -38,7 +38,7 @@ def delete_auto_follow_pattern( """ Deletes auto-follow patterns. - ``_ + ``_ :param name: The name of the auto follow pattern. """ @@ -109,7 +109,7 @@ def follow( """ Creates a new follower index configured to follow the referenced leader index. - ``_ + ``_ :param index: The name of the follower index :param leader_index: @@ -201,7 +201,7 @@ def follow_info( Retrieves information about all follower indices, including parameters and status for each follower index - ``_ + ``_ :param index: A comma-separated list of index patterns; use `_all` to perform the operation on all indices @@ -243,7 +243,7 @@ def follow_stats( Retrieves follower stats. return shard-level stats about the following tasks associated with each shard for the specified indices. - ``_ + ``_ :param index: A comma-separated list of index patterns; use `_all` to perform the operation on all indices @@ -296,7 +296,7 @@ def forget_follower( """ Removes the follower retention leases from the leader. - ``_ + ``_ :param index: the name of the leader index for which specified follower retention leases should be removed @@ -353,7 +353,7 @@ def get_auto_follow_pattern( Gets configured auto-follow patterns. Returns the specified auto-follow pattern collection. - ``_ + ``_ :param name: Specifies the auto-follow pattern collection that you want to retrieve. If you do not specify a name, the API returns information for all collections. @@ -397,7 +397,7 @@ def pause_auto_follow_pattern( """ Pauses an auto-follow pattern - ``_ + ``_ :param name: The name of the auto follow pattern that should pause discovering new indices to follow. @@ -439,7 +439,7 @@ def pause_follow( Pauses a follower index. The follower index will not fetch any additional operations from the leader index. - ``_ + ``_ :param index: The name of the follower index that should pause following its leader index. @@ -516,7 +516,7 @@ def put_auto_follow_pattern( cluster. Newly created indices on the remote cluster matching any of the specified patterns will be automatically configured as follower indices. - ``_ + ``_ :param name: The name of the collection of auto-follow patterns. :param remote_cluster: The remote cluster containing the leader indices to match @@ -640,7 +640,7 @@ def resume_auto_follow_pattern( """ Resumes an auto-follow pattern that has been paused - ``_ + ``_ :param name: The name of the auto follow pattern to resume discovering new indices to follow. @@ -705,7 +705,7 @@ def resume_follow( """ Resumes a follower index that has been paused - ``_ + ``_ :param index: The name of the follow index to resume following. :param max_outstanding_read_requests: @@ -787,7 +787,7 @@ def stats( """ Gets all stats related to cross-cluster replication. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ccr/stats" @@ -824,7 +824,7 @@ def unfollow( Stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. - ``_ + ``_ :param index: The name of the follower index that should be turned into a regular index. diff --git a/elasticsearch/_sync/client/cluster.py b/elasticsearch/_sync/client/cluster.py index 19dc76b99..d7f60e889 100644 --- a/elasticsearch/_sync/client/cluster.py +++ b/elasticsearch/_sync/client/cluster.py @@ -46,7 +46,7 @@ def allocation_explain( """ Provides explanations for shard allocations in the cluster. - ``_ + ``_ :param current_node: Specifies the node ID or the name of the node to only explain a shard that is currently located on the specified node. @@ -117,7 +117,7 @@ def delete_component_template( are building blocks for constructing index templates that specify index mappings, settings, and aliases. - ``_ + ``_ :param name: Comma-separated list or wildcard expression of component template names used to limit the request. @@ -167,7 +167,7 @@ def delete_voting_config_exclusions( """ Clears cluster voting config exclusions. - ``_ + ``_ :param wait_for_removal: Specifies whether to wait for all excluded nodes to be removed from the cluster before clearing the voting configuration exclusions @@ -215,7 +215,7 @@ def exists_component_template( Check component templates. Returns information about whether a particular component template exists. - ``_ + ``_ :param name: Comma-separated list of component template names used to limit the request. Wildcard (*) expressions are supported. @@ -270,7 +270,7 @@ def get_component_template( """ Get component templates. Retrieves information about component templates. - ``_ + ``_ :param name: Comma-separated list of component template names used to limit the request. Wildcard (`*`) expressions are supported. @@ -334,7 +334,7 @@ def get_settings( Returns cluster-wide settings. By default, it returns only settings that have been explicitly defined. - ``_ + ``_ :param flat_settings: If `true`, returns settings in flat format. :param include_defaults: If `true`, returns default cluster settings from the @@ -424,7 +424,7 @@ def health( by the worst shard status. The cluster status is controlled by the worst index status. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target @@ -528,7 +528,7 @@ def info( """ Get cluster info. Returns basic information about the cluster. - ``_ + ``_ :param target: Limits the information returned to the specific target. Supports a comma-separated list, such as http,ingest. @@ -577,7 +577,7 @@ def pending_tasks( update, the activity of this task might be reported by both task api and pending cluster tasks API. - ``_ + ``_ :param local: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. @@ -625,7 +625,7 @@ def post_voting_config_exclusions( """ Updates the cluster voting config exclusions by node ids or node names. - ``_ + ``_ :param node_ids: A comma-separated list of the persistent ids of the nodes to exclude from the voting configuration. If specified, you may not also specify @@ -700,7 +700,7 @@ def put_component_template( You can include comments anywhere in the request body except before the opening curly bracket. - ``_ + ``_ :param name: Name of the component template to create. Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; @@ -789,7 +789,7 @@ def put_settings( """ Updates the cluster settings. - ``_ + ``_ :param flat_settings: Return settings in flat format (default: false) :param master_timeout: Explicit operation timeout for connection to master node @@ -845,7 +845,7 @@ def remote_info( cluster information. It returns connection and endpoint information keyed by the configured remote cluster alias. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_remote/info" @@ -890,7 +890,7 @@ def reroute( """ Allows to manually change the allocation of individual shards in the cluster. - ``_ + ``_ :param commands: Defines the commands to perform. :param dry_run: If true, then the request simulates the operation only and returns @@ -977,7 +977,7 @@ def state( """ Returns a comprehensive information about the state of the cluster. - ``_ + ``_ :param metric: Limit the information returned to the specified metrics :param index: A comma-separated list of index names; use `_all` or empty string @@ -1053,8 +1053,8 @@ def stats( node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, - flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, + include_remotes: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: @@ -1063,11 +1063,11 @@ def stats( size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). - ``_ + ``_ :param node_id: Comma-separated list of node filters used to limit returned information. Defaults to all nodes in the cluster. - :param flat_settings: If `true`, returns settings in flat format. + :param include_remotes: Include remote cluster data into the response :param timeout: Period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its stats. However, timed out nodes are included in the response’s `_nodes.failed` property. @@ -1085,10 +1085,10 @@ def stats( __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path - if flat_settings is not None: - __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human + if include_remotes is not None: + __query["include_remotes"] = include_remotes if pretty is not None: __query["pretty"] = pretty if timeout is not None: diff --git a/elasticsearch/_sync/client/connector.py b/elasticsearch/_sync/client/connector.py index d097da30b..2c64c556d 100644 --- a/elasticsearch/_sync/client/connector.py +++ b/elasticsearch/_sync/client/connector.py @@ -36,9 +36,10 @@ def check_in( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the last_seen field in the connector, and sets it to current timestamp + Check in a connector. Update the `last_seen` field in the connector and set it + to the current timestamp. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be checked in """ @@ -77,9 +78,12 @@ def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a connector. + Delete a connector. Removes a connector and associated sync jobs. This is a destructive + action that is not recoverable. NOTE: This action doesn’t delete any API keys, + ingest pipelines, or data indices associated with the connector. These need to + be removed manually. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be deleted :param delete_sync_jobs: A flag indicating if associated sync jobs should be @@ -121,9 +125,9 @@ def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a connector. + Get a connector. Get the details about a connector. - ``_ + ``_ :param connector_id: The unique identifier of the connector """ @@ -215,9 +219,10 @@ def last_sync( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates last sync stats in the connector document + Update the connector last sync stats. Update the fields related to the last sync + of a connector. This action is used for analytics and monitoring. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param last_access_control_sync_error: @@ -309,9 +314,9 @@ def list( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns existing connectors. + Get all connectors. Get information about all connectors. - ``_ + ``_ :param connector_name: A comma-separated list of connector names to fetch connector documents for @@ -383,9 +388,13 @@ def post( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a connector. + Create a connector. Connectors are Elasticsearch integrations that bring content + from third-party data sources, which can be deployed on Elastic Cloud or hosted + on your own infrastructure. Elastic managed connectors (Native connectors) are + a managed service on Elastic Cloud. Self-managed connectors (Connector clients) + are self-managed on your infrastructure. - ``_ + ``_ :param description: :param index_name: @@ -461,9 +470,9 @@ def put( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a connector. + Create or update a connector. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. @@ -530,9 +539,12 @@ def sync_job_cancel( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Cancels a connector sync job. + Cancel a connector sync job. Cancel a connector sync job, which sets the status + to cancelling and updates `cancellation_requested_at` to the current time. The + connector service is then responsible for setting the status of connector sync + jobs to cancelled. - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job """ @@ -574,9 +586,10 @@ def sync_job_delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a connector sync job. + Delete a connector sync job. Remove a connector sync job and its associated data. + This is a destructive action that is not recoverable. - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job to be deleted @@ -617,9 +630,9 @@ def sync_job_get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a connector sync job. + Get a connector sync job. - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job """ @@ -685,9 +698,10 @@ def sync_job_list( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Lists connector sync jobs. + Get all connector sync jobs. Get information about all stored connector sync + jobs listed by their creation date in ascending order. - ``_ + ``_ :param connector_id: A connector id to fetch connector sync jobs for :param from_: Starting offset (default: 0) @@ -746,9 +760,10 @@ def sync_job_post( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a connector sync job. + Create a connector sync job. Create a connector sync job document in the internal + index and initialize its counters and timestamps with default values. - ``_ + ``_ :param id: The id of the associated connector :param job_type: @@ -797,9 +812,10 @@ def update_active_filtering( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Activates the valid draft filtering for a connector. + Activate the connector draft filter. Activates the valid draft filtering for + a connector. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated """ @@ -842,9 +858,13 @@ def update_api_key_id( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the API key id in the connector document + Update the connector API key ID. Update the `api_key_id` and `api_key_secret_id` + fields of a connector. You can specify the ID of the API key used for authorization + and the ID of the connector secret where the API key is stored. The connector + secret ID is required only for Elastic managed (native) connectors. Self-managed + connectors (connector clients) do not use this field. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param api_key_id: @@ -896,9 +916,10 @@ def update_configuration( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the configuration field in the connector document + Update the connector configuration. Update the configuration field in the connector + document. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param configuration: @@ -949,9 +970,12 @@ def update_error( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the filtering field in the connector document + Update the connector error field. Set the error field for the connector. If the + error provided in the request body is non-null, the connector’s status is updated + to error. Otherwise, if the error is reset to null, the connector status is updated + to connected. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param error: @@ -1003,9 +1027,12 @@ def update_filtering( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the filtering field in the connector document + Update the connector filtering. Update the draft filtering configuration of a + connector and marks the draft validation state as edited. The filtering draft + is activated once validated by the running Elastic connector service. The filtering + property is used to configure sync rules (both basic and advanced) for a connector. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param advanced_snippet: @@ -1059,9 +1086,10 @@ def update_filtering_validation( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the draft filtering validation info for a connector. + Update the connector draft filtering validation. Update the draft filtering validation + info for a connector. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param validation: @@ -1111,9 +1139,10 @@ def update_index_name( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the index_name in the connector document + Update the connector index name. Update the `index_name` field of a connector, + specifying the index where the data ingested by the connector is stored. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param index_name: @@ -1164,9 +1193,9 @@ def update_name( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the name and description fields in the connector document + Update the connector name and description. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param description: @@ -1217,9 +1246,9 @@ def update_native( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the is_native flag in the connector document + Update the connector is_native flag. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param is_native: @@ -1269,9 +1298,10 @@ def update_pipeline( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the pipeline field in the connector document + Update the connector pipeline. When you create a new connector, the configuration + of an ingest pipeline is populated with default settings. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param pipeline: @@ -1321,9 +1351,9 @@ def update_scheduling( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the scheduling field in the connector document + Update the connector scheduling. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param scheduling: @@ -1373,9 +1403,9 @@ def update_service_type( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the service type of the connector + Update the connector service type. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param service_type: @@ -1432,9 +1462,9 @@ def update_status( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the status of the connector + Update the connector status. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param status: diff --git a/elasticsearch/_sync/client/dangling_indices.py b/elasticsearch/_sync/client/dangling_indices.py index 4cf0ec024..d5d869a65 100644 --- a/elasticsearch/_sync/client/dangling_indices.py +++ b/elasticsearch/_sync/client/dangling_indices.py @@ -39,13 +39,17 @@ def delete_dangling_index( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes the specified dangling index + Delete a dangling index. If Elasticsearch encounters index data that is absent + from the current cluster state, those indices are considered to be dangling. + For example, this can happen if you delete more than `cluster.indices.tombstones.size` + indices while an Elasticsearch node is offline. - ``_ + ``_ - :param index_uuid: The UUID of the dangling index - :param accept_data_loss: Must be set to true in order to delete the dangling - index + :param index_uuid: The UUID of the index to delete. Use the get dangling indices + API to find the UUID. + :param accept_data_loss: This parameter must be set to true to acknowledge that + it will no longer be possible to recove data from the dangling index. :param master_timeout: Specify timeout for connection to master :param timeout: Explicit operation timeout """ @@ -94,13 +98,20 @@ def import_dangling_index( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Imports the specified dangling index + Import a dangling index. If Elasticsearch encounters index data that is absent + from the current cluster state, those indices are considered to be dangling. + For example, this can happen if you delete more than `cluster.indices.tombstones.size` + indices while an Elasticsearch node is offline. - ``_ + ``_ - :param index_uuid: The UUID of the dangling index - :param accept_data_loss: Must be set to true in order to import the dangling - index + :param index_uuid: The UUID of the index to import. Use the get dangling indices + API to locate the UUID. + :param accept_data_loss: This parameter must be set to true to import a dangling + index. Because Elasticsearch cannot know where the dangling index data came + from or determine which shard copies are fresh and which are stale, it cannot + guarantee that the imported data represents the latest state of the index + when it was last in the cluster. :param master_timeout: Specify timeout for connection to master :param timeout: Explicit operation timeout """ @@ -145,9 +156,13 @@ def list_dangling_indices( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns all dangling indices. + Get the dangling indices. If Elasticsearch encounters index data that is absent + from the current cluster state, those indices are considered to be dangling. + For example, this can happen if you delete more than `cluster.indices.tombstones.size` + indices while an Elasticsearch node is offline. Use this API to list dangling + indices, which you can then import or delete. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_dangling" diff --git a/elasticsearch/_sync/client/enrich.py b/elasticsearch/_sync/client/enrich.py index 6a855c402..099cbf2dd 100644 --- a/elasticsearch/_sync/client/enrich.py +++ b/elasticsearch/_sync/client/enrich.py @@ -38,7 +38,7 @@ def delete_policy( """ Delete an enrich policy. Deletes an existing enrich policy and its enrich index. - ``_ + ``_ :param name: Enrich policy to delete. """ @@ -79,7 +79,7 @@ def execute_policy( """ Creates the enrich index for an existing enrich policy. - ``_ + ``_ :param name: Enrich policy to execute. :param wait_for_completion: If `true`, the request blocks other enrich policy @@ -123,7 +123,7 @@ def get_policy( """ Get an enrich policy. Returns information about an enrich policy. - ``_ + ``_ :param name: Comma-separated list of enrich policy names used to limit the request. To return information for all enrich policies, omit this parameter. @@ -173,7 +173,7 @@ def put_policy( """ Create an enrich policy. Creates an enrich policy. - ``_ + ``_ :param name: Name of the enrich policy to create or update. :param geo_match: Matches enrich data to incoming documents based on a `geo_shape` @@ -227,7 +227,7 @@ def stats( Get enrich stats. Returns enrich coordinator statistics and information about enrich policies that are currently executing. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_enrich/_stats" diff --git a/elasticsearch/_sync/client/eql.py b/elasticsearch/_sync/client/eql.py index ce3e515f3..39af43fb0 100644 --- a/elasticsearch/_sync/client/eql.py +++ b/elasticsearch/_sync/client/eql.py @@ -39,7 +39,7 @@ def delete( Deletes an async EQL search or a stored synchronous EQL search. The API also deletes results for the search. - ``_ + ``_ :param id: Identifier for the search to delete. A search ID is provided in the EQL search API's response for an async search. A search ID is also provided @@ -86,7 +86,7 @@ def get( Returns the current status and available results for an async EQL search or a stored synchronous EQL search. - ``_ + ``_ :param id: Identifier for the search. :param keep_alive: Period for which the search and its results are stored on @@ -137,7 +137,7 @@ def get_status( Returns the current status for an async EQL search or a stored synchronous EQL search without returning results. - ``_ + ``_ :param id: Identifier for the search. """ @@ -225,7 +225,7 @@ def search( """ Returns results matching a query expressed in Event Query Language (EQL) - ``_ + ``_ :param index: The name of the index to scope the operation :param query: EQL query you wish to run. diff --git a/elasticsearch/_sync/client/esql.py b/elasticsearch/_sync/client/esql.py index 844223895..fc9fd2a7e 100644 --- a/elasticsearch/_sync/client/esql.py +++ b/elasticsearch/_sync/client/esql.py @@ -47,7 +47,14 @@ def query( error_trace: t.Optional[bool] = None, filter: t.Optional[t.Mapping[str, t.Any]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, - format: t.Optional[str] = None, + format: t.Optional[ + t.Union[ + str, + t.Literal[ + "arrow", "cbor", "csv", "json", "smile", "tsv", "txt", "yaml" + ], + ] + ] = None, human: t.Optional[bool] = None, locale: t.Optional[str] = None, params: t.Optional[ @@ -63,7 +70,7 @@ def query( """ Executes an ES|QL request - ``_ + ``_ :param query: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. diff --git a/elasticsearch/_sync/client/features.py b/elasticsearch/_sync/client/features.py index 5d95b3440..96748493f 100644 --- a/elasticsearch/_sync/client/features.py +++ b/elasticsearch/_sync/client/features.py @@ -38,7 +38,7 @@ def get_features( Gets a list of features which can be included in snapshots using the feature_states field when creating a snapshot - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_features" @@ -73,7 +73,7 @@ def reset_features( """ Resets the internal state of features, usually by deleting system indices - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_features/_reset" diff --git a/elasticsearch/_sync/client/fleet.py b/elasticsearch/_sync/client/fleet.py index 001bcba36..098981d07 100644 --- a/elasticsearch/_sync/client/fleet.py +++ b/elasticsearch/_sync/client/fleet.py @@ -43,7 +43,7 @@ def global_checkpoints( Returns the current global checkpoints for an index. This API is design for internal use by the fleet server project. - ``_ + ``_ :param index: A single index or index alias that resolves to a single index. :param checkpoints: A comma separated list of previous global checkpoints. When diff --git a/elasticsearch/_sync/client/graph.py b/elasticsearch/_sync/client/graph.py index f411f3242..5a29add0b 100644 --- a/elasticsearch/_sync/client/graph.py +++ b/elasticsearch/_sync/client/graph.py @@ -48,7 +48,7 @@ def explore( Extracts and summarizes information about the documents and terms in an Elasticsearch data stream or index. - ``_ + ``_ :param index: Name of the index. :param connections: Specifies or more fields from which you want to extract terms diff --git a/elasticsearch/_sync/client/ilm.py b/elasticsearch/_sync/client/ilm.py index 48a68fa2a..4f8196869 100644 --- a/elasticsearch/_sync/client/ilm.py +++ b/elasticsearch/_sync/client/ilm.py @@ -42,7 +42,7 @@ def delete_lifecycle( that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error. - ``_ + ``_ :param name: Identifier for the policy. :param master_timeout: Period to wait for a connection to the master node. If @@ -97,7 +97,7 @@ def explain_lifecycle( currently executing phase, action, and step. Shows when the index entered each one, the definition of the running phase, and information about any failures. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to target. Supports wildcards (`*`). To target all data streams and indices, use `*` @@ -159,7 +159,7 @@ def get_lifecycle( """ Retrieves a lifecycle policy. - ``_ + ``_ :param name: Identifier for the policy. :param master_timeout: Period to wait for a connection to the master node. If @@ -210,7 +210,7 @@ def get_status( """ Retrieves the current index lifecycle management (ILM) status. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ilm/status" @@ -254,7 +254,7 @@ def migrate_to_data_tiers( data tiers, and optionally deletes one legacy index template.+ Using node roles enables ILM to automatically move the indices between data tiers. - ``_ + ``_ :param dry_run: If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. This provides @@ -304,17 +304,17 @@ def move_to_step( *, index: str, current_step: t.Optional[t.Mapping[str, t.Any]] = None, + next_step: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, - next_step: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ Manually moves an index into the specified step and executes that step. - ``_ + ``_ :param index: The name of the index whose lifecycle step is to change :param current_step: @@ -322,6 +322,10 @@ def move_to_step( """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") + if current_step is None and body is None: + raise ValueError("Empty value passed for parameter 'current_step'") + if next_step is None and body is None: + raise ValueError("Empty value passed for parameter 'next_step'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/_ilm/move/{__path_parts["index"]}' __query: t.Dict[str, t.Any] = {} @@ -374,7 +378,7 @@ def put_lifecycle( Creates a lifecycle policy. If the specified policy exists, the policy is replaced and the policy version is incremented. - ``_ + ``_ :param name: Identifier for the policy. :param master_timeout: Period to wait for a connection to the master node. If @@ -433,7 +437,7 @@ def remove_policy( """ Removes the assigned lifecycle policy and stops managing the specified index - ``_ + ``_ :param index: The name of the index to remove policy on """ @@ -473,7 +477,7 @@ def retry( """ Retries executing the policy for an index that is in the ERROR step. - ``_ + ``_ :param index: The name of the indices (comma-separated) whose failed lifecycle step is to be retry @@ -515,7 +519,7 @@ def start( """ Start the index lifecycle management (ILM) plugin. - ``_ + ``_ :param master_timeout: :param timeout: @@ -560,7 +564,7 @@ def stop( Halts all lifecycle management operations and stops the index lifecycle management (ILM) plugin - ``_ + ``_ :param master_timeout: :param timeout: diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index 7b59bb318..bcd064b1c 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -52,7 +52,7 @@ def add_block( Add an index block. Limits the operations allowed on an index by blocking specific operation types. - ``_ + ``_ :param index: A comma separated list of indices to add a block to :param block: The block to add (one of read, write, read_only or metadata) @@ -137,9 +137,10 @@ def analyze( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs analysis on a text string and returns the resulting tokens. + Get tokens from text analysis. The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) + on a text string and returns the resulting tokens. - ``_ + ``_ :param index: Index used to derive the analyzer. If specified, the `analyzer` or field parameter overrides this value. If no index is specified or the @@ -241,7 +242,7 @@ def clear_cache( Clears the caches of one or more indices. For data streams, the API clears the caches of the stream’s backing indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -326,7 +327,7 @@ def clone( """ Clones an existing index. - ``_ + ``_ :param index: Name of the source index to clone. :param target: Name of the target index to create. @@ -414,7 +415,7 @@ def close( """ Closes an index. - ``_ + ``_ :param index: Comma-separated list or wildcard expression of index names used to limit the request. @@ -495,7 +496,7 @@ def create( """ Create an index. Creates a new index. - ``_ + ``_ :param index: Name of the index you wish to create. :param aliases: Aliases for the index. @@ -569,7 +570,7 @@ def create_data_stream( Create a data stream. Creates a data stream. You must have a matching index template with data stream enabled. - ``_ + ``_ :param name: Name of the data stream, which must meet the following criteria: Lowercase only; Cannot include `\\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, @@ -630,7 +631,7 @@ def data_streams_stats( """ Get data stream stats. Retrieves statistics for one or more data streams. - ``_ + ``_ :param name: Comma-separated list of data streams used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams in a @@ -691,7 +692,7 @@ def delete( """ Delete indices. Deletes one or more indices. - ``_ + ``_ :param index: Comma-separated list of indices to delete. You cannot specify index aliases. By default, this parameter does not support wildcards (`*`) or `_all`. @@ -761,7 +762,7 @@ def delete_alias( """ Delete an alias. Removes a data stream or index from an alias. - ``_ + ``_ :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). @@ -826,7 +827,7 @@ def delete_data_lifecycle( Delete data stream lifecycles. Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle. - ``_ + ``_ :param name: A comma-separated list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams @@ -886,7 +887,7 @@ def delete_data_stream( """ Delete data streams. Deletes one or more data streams and their backing indices. - ``_ + ``_ :param name: Comma-separated list of data streams to delete. Wildcard (`*`) expressions are supported. @@ -941,7 +942,7 @@ def delete_index_template( then there is no wildcard support and the provided names should match completely with existing templates. - ``_ + ``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. @@ -993,7 +994,7 @@ def delete_template( """ Deletes a legacy index template. - ``_ + ``_ :param name: The name of the legacy index template to delete. Wildcard (`*`) expressions are supported. @@ -1055,7 +1056,7 @@ def disk_usage( """ Analyzes the disk usage of each field of an index or data stream. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. It’s recommended to execute this API with a single @@ -1130,7 +1131,7 @@ def downsample( (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped by a configured time interval. - ``_ + ``_ :param index: Name of the time series index to downsample. :param target_index: Name of the index to create. @@ -1199,7 +1200,7 @@ def exists( Check indices. Checks if one or more indices, index aliases, or data streams exist. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases. Supports wildcards (`*`). @@ -1277,7 +1278,7 @@ def exists_alias( """ Check aliases. Checks if one or more data stream or index aliases exist. - ``_ + ``_ :param name: Comma-separated list of aliases to check. Supports wildcards (`*`). :param index: Comma-separated list of data streams or indices used to limit the @@ -1347,7 +1348,7 @@ def exists_index_template( """ Returns information about whether a particular index template exists. - ``_ + ``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. @@ -1397,7 +1398,7 @@ def exists_template( Check existence of index templates. Returns information about whether a particular index template exists. - ``_ + ``_ :param name: The comma separated names of the index templates :param flat_settings: Return settings in flat format (default: false) @@ -1452,7 +1453,7 @@ def explain_data_lifecycle( creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. - ``_ + ``_ :param index: The name of the index to explain :param include_defaults: indicates if the API should return the default values @@ -1515,7 +1516,7 @@ def field_usage_stats( """ Returns field usage information for each shard and field of an index. - ``_ + ``_ :param index: Comma-separated list or wildcard expression of index names used to limit the request. @@ -1603,7 +1604,7 @@ def flush( """ Flushes one or more data streams or indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to flush. Supports wildcards (`*`). To flush all data streams and indices, omit this @@ -1686,7 +1687,7 @@ def forcemerge( """ Performs the force merge operation on one or more indices. - ``_ + ``_ :param index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices @@ -1780,7 +1781,7 @@ def get( Get index information. Returns information about one or more indices. For data streams, the API returns information about the stream’s backing indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (*) are supported. @@ -1869,7 +1870,7 @@ def get_alias( """ Get aliases. Retrieves information for one or more data stream or index aliases. - ``_ + ``_ :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, @@ -1952,7 +1953,7 @@ def get_data_lifecycle( Get data stream lifecycles. Retrieves the data stream lifecycle configuration of one or more data streams. - ``_ + ``_ :param name: Comma-separated list of data streams to limit the request. Supports wildcards (`*`). To target all data streams, omit this parameter or use `*` @@ -2018,7 +2019,7 @@ def get_data_stream( """ Get data streams. Retrieves information about one or more data streams. - ``_ + ``_ :param name: Comma-separated list of data stream names used to limit the request. Wildcard (`*`) expressions are supported. If omitted, all data streams are @@ -2094,7 +2095,7 @@ def get_field_mapping( Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. - ``_ + ``_ :param fields: Comma-separated list or wildcard expression of fields used to limit returned information. @@ -2171,7 +2172,7 @@ def get_index_template( """ Get index templates. Returns information about one or more index templates. - ``_ + ``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. @@ -2245,7 +2246,7 @@ def get_mapping( Get mapping definitions. Retrieves mapping definitions for one or more indices. For data streams, the API retrieves mappings for the stream’s backing indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2330,7 +2331,7 @@ def get_settings( Get index settings. Returns setting information for one or more indices. For data streams, returns setting information for the stream’s backing indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2417,7 +2418,7 @@ def get_template( """ Get index templates. Retrieves information about one or more index templates. - ``_ + ``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (`*`) expressions are supported. To return all index templates, @@ -2483,7 +2484,7 @@ def migrate_to_data_stream( with the same name. The indices for the alias become hidden backing indices for the stream. The write index for the alias becomes the write index for the stream. - ``_ + ``_ :param name: Name of the index alias to convert to a data stream. :param master_timeout: Period to wait for a connection to the master node. If @@ -2536,7 +2537,7 @@ def modify_data_stream( Update data streams. Performs one or more data stream modification actions in a single atomic operation. - ``_ + ``_ :param actions: Actions to perform. """ @@ -2596,7 +2597,7 @@ def open( """ Opens a closed index. For data streams, the API opens any closed backing indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). By default, you must explicitly @@ -2672,7 +2673,7 @@ def promote_data_stream( Promotes a data stream from a replicated data stream managed by CCR to a regular data stream - ``_ + ``_ :param name: The name of the data stream :param master_timeout: Period to wait for a connection to the master node. If @@ -2734,7 +2735,7 @@ def put_alias( """ Create or update an alias. Adds a data stream or index to an alias. - ``_ + ``_ :param index: Comma-separated list of data streams or indices to add. Supports wildcards (`*`). Wildcard patterns that match both data streams and indices @@ -2837,7 +2838,7 @@ def put_data_lifecycle( Update data stream lifecycles. Update the data stream lifecycle of the specified data streams. - ``_ + ``_ :param name: Comma-separated list of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. @@ -2939,7 +2940,7 @@ def put_index_template( Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. - ``_ + ``_ :param name: Index or template name :param allow_auto_create: This setting overrides the value of the `action.auto_create_index` @@ -3100,7 +3101,7 @@ def put_mapping( can also use this API to change the search settings of existing fields. For data streams, these changes are applied to all backing indices by default. - ``_ + ``_ :param index: A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. @@ -3230,7 +3231,7 @@ def put_settings( Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. - ``_ + ``_ :param settings: :param index: Comma-separated list of data streams, indices, and aliases used @@ -3335,7 +3336,7 @@ def put_template( Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. - ``_ + ``_ :param name: The name of the template :param aliases: Aliases for the index. @@ -3417,7 +3418,7 @@ def recovery( indices. For data streams, the API returns information for the stream’s backing indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -3481,7 +3482,7 @@ def refresh( indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -3551,7 +3552,7 @@ def reload_search_analyzers( """ Reloads an index's search analyzers and their resources. - ``_ + ``_ :param index: A comma-separated list of index names to reload analyzers for :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves @@ -3617,7 +3618,7 @@ def resolve_cluster( including the local cluster, if included. Multiple patterns and remote clusters are supported. - ``_ + ``_ :param name: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified @@ -3691,7 +3692,7 @@ def resolve_index( Resolves the specified name(s) and/or index patterns for indices, aliases, and data streams. Multiple patterns and remote clusters are supported. - ``_ + ``_ :param name: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified @@ -3764,7 +3765,7 @@ def rollover( """ Roll over to a new index. Creates a new index for a data stream or index alias. - ``_ + ``_ :param alias: Name of the data stream or index alias to roll over. :param new_index: Name of the index to create. Supports date math. Data streams @@ -3870,7 +3871,7 @@ def segments( Returns low-level information about the Lucene segments in index shards. For data streams, the API returns information about the stream’s backing indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -3950,7 +3951,7 @@ def shard_stores( Retrieves store information about replica shards in one or more indices. For data streams, the API retrieves store information for the stream’s backing indices. - ``_ + ``_ :param index: List of data streams, indices, and aliases used to limit the request. :param allow_no_indices: If false, the request returns an error if any wildcard @@ -4021,7 +4022,7 @@ def shrink( """ Shrinks an existing index into a new index with fewer primary shards. - ``_ + ``_ :param index: Name of the source index to shrink. :param target: Name of the target index to create. @@ -4097,7 +4098,7 @@ def simulate_index_template( Simulate an index. Returns the index configuration that would be applied to the specified index from an existing index template. - ``_ + ``_ :param name: Name of the index to simulate :param include_defaults: If true, returns all relevant default configurations @@ -4175,7 +4176,7 @@ def simulate_template( Simulate an index template. Returns the index configuration that would be applied by a particular index template. - ``_ + ``_ :param name: Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit this parameter and specify the template @@ -4306,7 +4307,7 @@ def split( """ Splits an existing index into a new index with more primary shards. - ``_ + ``_ :param index: Name of the source index to split. :param target: Name of the target index to create. @@ -4399,7 +4400,7 @@ def stats( Returns statistics for one or more indices. For data streams, the API retrieves statistics for the stream’s backing indices. - ``_ + ``_ :param index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices @@ -4502,7 +4503,7 @@ def unfreeze( """ Unfreezes an index. - ``_ + ``_ :param index: Identifier for the index. :param allow_no_indices: If `false`, the request returns an error if any wildcard @@ -4576,7 +4577,7 @@ def update_aliases( """ Create or update an alias. Adds a data stream or index to an alias. - ``_ + ``_ :param actions: Actions to perform. :param master_timeout: Period to wait for a connection to the master node. If @@ -4651,7 +4652,7 @@ def validate_query( """ Validate a query. Validates a query without running it. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index 2fc2a8de6..08f9da4aa 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -46,7 +46,7 @@ def delete( """ Delete an inference endpoint - ``_ + ``_ :param inference_id: The inference Id :param task_type: The task type @@ -111,7 +111,7 @@ def get( """ Get an inference endpoint - ``_ + ``_ :param task_type: The task type :param inference_id: The inference Id @@ -174,7 +174,7 @@ def inference( """ Perform inference on the service - ``_ + ``_ :param inference_id: The inference Id :param input: Inference input. Either a string or an array of strings. @@ -257,7 +257,7 @@ def put( """ Create an inference endpoint - ``_ + ``_ :param inference_id: The inference Id :param inference_config: diff --git a/elasticsearch/_sync/client/ingest.py b/elasticsearch/_sync/client/ingest.py index d88d12f97..e244e91a3 100644 --- a/elasticsearch/_sync/client/ingest.py +++ b/elasticsearch/_sync/client/ingest.py @@ -40,7 +40,7 @@ def delete_geoip_database( """ Deletes a geoip database configuration. - ``_ + ``_ :param id: A comma-separated list of geoip database configurations to delete :param master_timeout: Period to wait for a connection to the master node. If @@ -91,7 +91,7 @@ def delete_pipeline( """ Deletes one or more existing ingest pipeline. - ``_ + ``_ :param id: Pipeline ID or wildcard expression of pipeline IDs used to limit the request. To delete all ingest pipelines in a cluster, use a value of `*`. @@ -140,7 +140,7 @@ def geo_ip_stats( """ Gets download statistics for GeoIP2 databases used with the geoip processor. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ingest/geoip/stats" @@ -177,7 +177,7 @@ def get_geoip_database( """ Returns information about one or more geoip database configurations. - ``_ + ``_ :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit @@ -230,7 +230,7 @@ def get_pipeline( Returns information about one or more ingest pipelines. This API returns a local reference of the pipeline. - ``_ + ``_ :param id: Comma-separated list of pipeline IDs to retrieve. Wildcard (`*`) expressions are supported. To get all ingest pipelines, omit this parameter or use `*`. @@ -284,7 +284,7 @@ def processor_grok( you expect will match. A grok pattern is like a regular expression that supports aliased expressions that can be reused. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ingest/processor/grok" @@ -327,7 +327,7 @@ def put_geoip_database( """ Returns information about one or more geoip database configurations. - ``_ + ``_ :param id: ID of the database configuration to create or update. :param maxmind: The configuration necessary to identify which IP geolocation @@ -414,7 +414,7 @@ def put_pipeline( Creates or updates an ingest pipeline. Changes made using this API take effect immediately. - ``_ + ``_ :param id: ID of the ingest pipeline to create or update. :param deprecated: Marks this ingest pipeline as deprecated. When a deprecated @@ -506,7 +506,7 @@ def simulate( """ Executes an ingest pipeline against a set of provided documents. - ``_ + ``_ :param docs: Sample documents to test in the pipeline. :param id: Pipeline to test. If you don’t specify a `pipeline` in the request diff --git a/elasticsearch/_sync/client/license.py b/elasticsearch/_sync/client/license.py index 43135d5a7..28c51de20 100644 --- a/elasticsearch/_sync/client/license.py +++ b/elasticsearch/_sync/client/license.py @@ -37,7 +37,7 @@ def delete( """ Deletes licensing information for the cluster - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_license" @@ -76,7 +76,7 @@ def get( its type, its status, when it was issued, and when it expires. For more information about the different types of licenses, refer to [Elastic Stack subscriptions](https://www.elastic.co/subscriptions). - ``_ + ``_ :param accept_enterprise: If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum @@ -122,7 +122,7 @@ def get_basic_status( """ Retrieves information about the status of the basic license. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_license/basic_status" @@ -157,7 +157,7 @@ def get_trial_status( """ Retrieves information about the status of the trial license. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_license/trial_status" @@ -198,7 +198,7 @@ def post( """ Updates the license for the cluster. - ``_ + ``_ :param acknowledge: Specifies whether you acknowledge the license changes. :param license: @@ -257,7 +257,7 @@ def post_start_basic( acknowledge parameter set to true. To check the status of your basic license, use the following API: [Get basic status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). - ``_ + ``_ :param acknowledge: whether the user has acknowledged acknowledge messages (default: false) @@ -300,7 +300,7 @@ def post_start_trial( The start trial API enables you to start a 30-day trial, which gives access to all subscription features. - ``_ + ``_ :param acknowledge: whether the user has acknowledged acknowledge messages (default: false) diff --git a/elasticsearch/_sync/client/logstash.py b/elasticsearch/_sync/client/logstash.py index 040d5e030..bba034538 100644 --- a/elasticsearch/_sync/client/logstash.py +++ b/elasticsearch/_sync/client/logstash.py @@ -38,7 +38,7 @@ def delete_pipeline( """ Deletes a pipeline used for Logstash Central Management. - ``_ + ``_ :param id: Identifier for the pipeline. """ @@ -78,7 +78,7 @@ def get_pipeline( """ Retrieves pipelines used for Logstash Central Management. - ``_ + ``_ :param id: Comma-separated list of pipeline identifiers. """ @@ -125,7 +125,7 @@ def put_pipeline( """ Creates or updates a pipeline used for Logstash Central Management. - ``_ + ``_ :param id: Identifier for the pipeline. :param pipeline: diff --git a/elasticsearch/_sync/client/migration.py b/elasticsearch/_sync/client/migration.py index b0bfb2f01..bb6f718d6 100644 --- a/elasticsearch/_sync/client/migration.py +++ b/elasticsearch/_sync/client/migration.py @@ -40,7 +40,7 @@ def deprecations( that use deprecated features that will be removed or changed in the next major version. - ``_ + ``_ :param index: Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. @@ -83,7 +83,7 @@ def get_feature_upgrade_status( """ Find out whether system features need to be upgraded or not - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_migration/system_features" @@ -118,7 +118,7 @@ def post_feature_upgrade( """ Begin upgrades for system features - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_migration/system_features" diff --git a/elasticsearch/_sync/client/ml.py b/elasticsearch/_sync/client/ml.py index b157260ae..ebf72ef18 100644 --- a/elasticsearch/_sync/client/ml.py +++ b/elasticsearch/_sync/client/ml.py @@ -42,7 +42,7 @@ def clear_trained_model_deployment_cache( may be cached on that individual node. Calling this API clears the caches without restarting the deployment. - ``_ + ``_ :param model_id: The unique identifier of the trained model. """ @@ -102,7 +102,7 @@ def close_job( force parameters as the close job request. When a datafeed that has a specified end date stops, it automatically closes its associated job. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection @@ -164,7 +164,7 @@ def delete_calendar( Delete a calendar. Removes all scheduled events from a calendar, then deletes it. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. """ @@ -205,7 +205,7 @@ def delete_calendar_event( """ Delete events from a calendar. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param event_id: Identifier for the scheduled event. You can obtain this identifier @@ -253,7 +253,7 @@ def delete_calendar_job( """ Delete anomaly jobs from a calendar. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param job_id: An identifier for the anomaly detection jobs. It can be a job @@ -302,7 +302,7 @@ def delete_data_frame_analytics( """ Delete a data frame analytics job. - ``_ + ``_ :param id: Identifier for the data frame analytics job. :param force: If `true`, it deletes a job that is not stopped; this method is @@ -350,7 +350,7 @@ def delete_datafeed( """ Delete a datafeed. - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -408,7 +408,7 @@ def delete_expired_data( expired data for all anomaly detection jobs by using _all, by specifying * as the , or by omitting the . - ``_ + ``_ :param job_id: Identifier for an anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. @@ -469,7 +469,7 @@ def delete_filter( delete the filter. You must update or delete the job before you can delete the filter. - ``_ + ``_ :param filter_id: A string that uniquely identifies a filter. """ @@ -515,7 +515,7 @@ def delete_forecast( in the forecast jobs API. The delete forecast API enables you to delete one or more forecasts before they expire. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param forecast_id: A comma-separated list of forecast identifiers. If you do @@ -587,7 +587,7 @@ def delete_job( delete datafeed API with the same timeout and force parameters as the delete job request. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param delete_user_annotations: Specifies whether annotations that have been @@ -643,7 +643,7 @@ def delete_model_snapshot( that snapshot, first revert to a different one. To identify the active model snapshot, refer to the `model_snapshot_id` in the results from the get jobs API. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: Identifier for the model snapshot. @@ -691,7 +691,7 @@ def delete_trained_model( Delete an unreferenced trained model. The request deletes a trained inference model that is not referenced by an ingest pipeline. - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param force: Forcefully deletes a trained model that is referenced by ingest @@ -738,7 +738,7 @@ def delete_trained_model_alias( to a trained model. If the model alias is missing or refers to a model other than the one identified by the `model_id`, this API returns an error. - ``_ + ``_ :param model_id: The trained model ID to which the model alias refers. :param model_alias: The model alias to delete. @@ -795,7 +795,7 @@ def estimate_model_memory( an anomaly detection job model. It is based on analysis configuration details for the job and cardinality estimates for the fields it references. - ``_ + ``_ :param analysis_config: For a list of the properties that you can specify in the `analysis_config` component of the body of this API. @@ -863,7 +863,7 @@ def evaluate_data_frame( for use on indexes created by data frame analytics. Evaluation requires both a ground truth field and an analytics result field to be present. - ``_ + ``_ :param evaluation: Defines the type of evaluation you want to perform. :param index: Defines the `index` in which the evaluation will be performed. @@ -943,7 +943,7 @@ def explain_data_frame_analytics( setting later on. If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -1050,7 +1050,7 @@ def flush_job( and persists the model state to disk and the job must be opened again before analyzing further data. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param advance_time: Refer to the description for the `advance_time` query parameter. @@ -1121,7 +1121,7 @@ def forecast( for a job that has an `over_field_name` in its configuration. Forcasts predict future behavior based on historical data. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. The job must be open when you create a forecast; otherwise, an error occurs. @@ -1204,7 +1204,7 @@ def get_buckets( Get anomaly detection job results for buckets. The API presents a chronological view of the records, grouped by bucket. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param timestamp: The timestamp of a single bucket result. If you do not specify @@ -1299,7 +1299,7 @@ def get_calendar_events( """ Get info about events in calendars. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids @@ -1365,7 +1365,7 @@ def get_calendars( """ Get calendar configuration info. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids @@ -1438,7 +1438,7 @@ def get_categories( """ Get anomaly detection job results for categories. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param category_id: Identifier for the category, which is unique in the job. @@ -1522,7 +1522,7 @@ def get_data_frame_analytics( multiple data frame analytics jobs in a single API request by using a comma-separated list of data frame analytics jobs or a wildcard expression. - ``_ + ``_ :param id: Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame @@ -1594,7 +1594,7 @@ def get_data_frame_analytics_stats( """ Get data frame analytics jobs usage info. - ``_ + ``_ :param id: Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame @@ -1664,7 +1664,7 @@ def get_datafeed_stats( the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds. - ``_ + ``_ :param datafeed_id: Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the @@ -1724,7 +1724,7 @@ def get_datafeeds( `*` as the ``, or by omitting the ``. This API returns a maximum of 10,000 datafeeds. - ``_ + ``_ :param datafeed_id: Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the @@ -1787,7 +1787,7 @@ def get_filters( """ Get filters. You can get a single filter or all filters. - ``_ + ``_ :param filter_id: A string that uniquely identifies a filter. :param from_: Skips the specified number of filters. @@ -1851,7 +1851,7 @@ def get_influencers( that have contributed to, or are to blame for, the anomalies. Influencer results are available only if an `influencer_field_name` is specified in the job configuration. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param desc: If true, the results are sorted in descending order. @@ -1932,7 +1932,7 @@ def get_job_stats( """ Get anomaly detection jobs usage info. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. If @@ -1993,7 +1993,7 @@ def get_jobs( detection jobs by using `_all`, by specifying `*` as the ``, or by omitting the ``. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. If you do not specify one of these @@ -2056,7 +2056,7 @@ def get_memory_stats( jobs and trained models are using memory, on each node, both within the JVM heap, and natively, outside of the JVM. - ``_ + ``_ :param node_id: The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or `ml:true` @@ -2111,7 +2111,7 @@ def get_model_snapshot_upgrade_stats( """ Get anomaly detection job model snapshot upgrade usage info. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: A numerical character string that uniquely identifies the @@ -2182,7 +2182,7 @@ def get_model_snapshots( """ Get model snapshots info. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: A numerical character string that uniquely identifies the @@ -2295,7 +2295,7 @@ def get_overall_buckets( its default), the `overall_score` is the maximum `overall_score` of the overall buckets that have a span equal to the jobs' largest bucket span. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, a comma-separated list of jobs or groups, or a wildcard expression. @@ -2400,7 +2400,7 @@ def get_records( found in each bucket, which relates to the number of time series being modeled and the number of detectors. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param desc: Refer to the description for the `desc` query parameter. @@ -2495,7 +2495,7 @@ def get_trained_models( """ Get trained model configuration info. - ``_ + ``_ :param model_id: The unique identifier of the trained model or a model alias. You can get information for multiple trained models in a single API request @@ -2579,7 +2579,7 @@ def get_trained_models_stats( models in a single API request by using a comma-separated list of model IDs or a wildcard expression. - ``_ + ``_ :param model_id: The unique identifier of the trained model or a model alias. It can be a comma-separated list or a wildcard expression. @@ -2642,7 +2642,7 @@ def infer_trained_model( """ Evaluate a trained model. - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param docs: An array of objects to pass to the model for inference. The objects @@ -2704,7 +2704,7 @@ def info( what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ml/info" @@ -2749,7 +2749,7 @@ def open_job( job is ready to resume its analysis from where it left off, once new data is received. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param timeout: Refer to the description for the `timeout` query parameter. @@ -2803,7 +2803,7 @@ def post_calendar_events( """ Add scheduled events to the calendar. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param events: A list of one of more scheduled events. The event’s start and @@ -2861,7 +2861,7 @@ def post_data( data can be accepted from only a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. @@ -2925,7 +2925,7 @@ def preview_data_frame_analytics( Preview features used by data frame analytics. Previews the extracted features used by a data frame analytics config. - ``_ + ``_ :param id: Identifier for the data frame analytics job. :param config: A data frame analytics config as described in create data frame @@ -2995,7 +2995,7 @@ def preview_datafeed( that accurately reflects the behavior of the datafeed, use the appropriate credentials. You can also use secondary authorization headers to supply the credentials. - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -3071,7 +3071,7 @@ def put_calendar( """ Create a calendar. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param description: A description of the calendar. @@ -3125,7 +3125,7 @@ def put_calendar_job( """ Add anomaly detection job to calendar. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param job_id: An identifier for the anomaly detection jobs. It can be a job @@ -3199,7 +3199,7 @@ def put_data_frame_analytics( that performs an analysis on the source indices and stores the outcome in a destination index. - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -3377,7 +3377,7 @@ def put_datafeed( directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -3532,7 +3532,7 @@ def put_filter( more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. - ``_ + ``_ :param filter_id: A string that uniquely identifies a filter. :param description: A description of the filter. @@ -3619,7 +3619,7 @@ def put_job( Create an anomaly detection job. If you include a `datafeed_config`, you must have read index privileges on the source index. - ``_ + ``_ :param job_id: The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and @@ -3800,7 +3800,7 @@ def put_trained_model( Create a trained model. Enable you to supply a trained model that is not created by data frame analytics. - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param compressed_definition: The compressed (GZipped and Base64 encoded) inference @@ -3914,7 +3914,7 @@ def put_trained_model_alias( common between the old and new trained models for the model alias, the API returns a warning. - ``_ + ``_ :param model_id: The identifier for the trained model that the alias refers to. :param model_alias: The alias to create or update. This value cannot end in numbers. @@ -3972,7 +3972,7 @@ def put_trained_model_definition_part( """ Create part of a trained model definition. - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param part: The definition part number. When the definition is loaded for inference @@ -4051,7 +4051,7 @@ def put_trained_model_vocabulary( processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param vocabulary: The model vocabulary, which must not be empty. @@ -4109,7 +4109,7 @@ def reset_job( job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list. - ``_ + ``_ :param job_id: The ID of the job to reset. :param delete_user_annotations: Specifies whether annotations that have been @@ -4169,7 +4169,7 @@ def revert_model_snapshot( For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: You can specify `empty` as the . Reverting to @@ -4239,7 +4239,7 @@ def set_upgrade_mode( the current value for the upgrade_mode setting by using the get machine learning info API. - ``_ + ``_ :param enabled: When `true`, it enables `upgrade_mode` which temporarily halts all job and datafeed tasks and prohibits new job and datafeed tasks from @@ -4294,7 +4294,7 @@ def start_data_frame_analytics( exists, it is used as is. You can therefore set up the destination index in advance with custom settings and mappings. - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -4356,7 +4356,7 @@ def start_datafeed( headers when you created or updated the datafeed, those credentials are used instead. - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -4426,7 +4426,7 @@ def start_trained_model_deployment( Start a trained model deployment. It allocates the model to every machine learning node. - ``_ + ``_ :param model_id: The unique identifier of the trained model. Currently, only PyTorch models are supported. @@ -4510,7 +4510,7 @@ def stop_data_frame_analytics( Stop data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -4576,7 +4576,7 @@ def stop_datafeed( Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. - ``_ + ``_ :param datafeed_id: Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a @@ -4638,7 +4638,7 @@ def stop_trained_model_deployment( """ Stop a trained model deployment. - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param allow_no_match: Specifies what to do when the request: contains wildcard @@ -4703,7 +4703,7 @@ def update_data_frame_analytics( """ Update a data frame analytics job. - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -4815,7 +4815,7 @@ def update_datafeed( query using those same roles. If you provide secondary authorization headers, those credentials are used instead. - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -4979,7 +4979,7 @@ def update_filter( Update a filter. Updates the description of a filter, adds items, or removes items from the list. - ``_ + ``_ :param filter_id: A string that uniquely identifies a filter. :param add_items: The items to add to the filter. @@ -5070,7 +5070,7 @@ def update_job( Update an anomaly detection job. Updates certain properties of an anomaly detection job. - ``_ + ``_ :param job_id: Identifier for the job. :param allow_lazy_open: Advanced configuration option. Specifies whether this @@ -5198,7 +5198,7 @@ def update_model_snapshot( """ Update a snapshot. Updates certain properties of a snapshot. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: Identifier for the model snapshot. @@ -5259,7 +5259,7 @@ def update_trained_model_deployment( """ Update a trained model deployment. - ``_ + ``_ :param model_id: The unique identifier of the trained model. Currently, only PyTorch models are supported. @@ -5325,7 +5325,7 @@ def upgrade_job_snapshot( a time and the upgraded snapshot cannot be the current snapshot of the anomaly detection job. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: A numerical character string that uniquely identifies the @@ -5401,7 +5401,7 @@ def validate( """ Validates an anomaly detection job. - ``_ + ``_ :param analysis_config: :param analysis_limits: @@ -5471,7 +5471,7 @@ def validate_detector( """ Validates an anomaly detection detector. - ``_ + ``_ :param detector: """ diff --git a/elasticsearch/_sync/client/monitoring.py b/elasticsearch/_sync/client/monitoring.py index 923a9b652..0a97e0202 100644 --- a/elasticsearch/_sync/client/monitoring.py +++ b/elasticsearch/_sync/client/monitoring.py @@ -44,7 +44,7 @@ def bulk( """ Used by the monitoring features to send monitoring data. - ``_ + ``_ :param interval: Collection interval (e.g., '10s' or '10000ms') of the payload :param operations: diff --git a/elasticsearch/_sync/client/nodes.py b/elasticsearch/_sync/client/nodes.py index 918bb8ced..c605a7b6b 100644 --- a/elasticsearch/_sync/client/nodes.py +++ b/elasticsearch/_sync/client/nodes.py @@ -40,7 +40,7 @@ def clear_repositories_metering_archive( You can use this API to clear the archived repositories metering information in the cluster. - ``_ + ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). @@ -92,7 +92,7 @@ def get_repositories_metering_info( compute aggregations over a period of time. Additionally, the information exposed by this API is volatile, meaning that it won’t be present after node restarts. - ``_ + ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). @@ -146,7 +146,7 @@ def hot_threads( This API yields a breakdown of the hot threads on each selected node in the cluster. The output is plain text with a breakdown of each node’s top hot threads. - ``_ + ``_ :param node_id: List of node IDs or names used to limit returned information. :param ignore_idle_threads: If true, known idle threads (e.g. waiting in a socket @@ -221,7 +221,7 @@ def info( """ Returns cluster nodes information. - ``_ + ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. @@ -290,7 +290,7 @@ def reload_secure_settings( """ Reloads the keystore on nodes in the cluster. - ``_ + ``_ :param node_id: The names of particular nodes in the cluster to target. :param secure_settings_password: The password for the Elasticsearch keystore. @@ -361,7 +361,7 @@ def stats( """ Returns cluster nodes statistics. - ``_ + ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. @@ -478,7 +478,7 @@ def usage( """ Returns information on the usage of features. - ``_ + ``_ :param node_id: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting diff --git a/elasticsearch/_sync/client/query_rules.py b/elasticsearch/_sync/client/query_rules.py index 7b66ca7ed..d5aaa2f76 100644 --- a/elasticsearch/_sync/client/query_rules.py +++ b/elasticsearch/_sync/client/query_rules.py @@ -39,7 +39,7 @@ def delete_rule( """ Deletes a query rule within a query ruleset. - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset containing the rule to delete @@ -87,7 +87,7 @@ def delete_ruleset( """ Deletes a query ruleset. - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset to delete """ @@ -128,7 +128,7 @@ def get_rule( """ Returns the details about a query rule within a query ruleset - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset containing the rule to retrieve @@ -176,7 +176,7 @@ def get_ruleset( """ Returns the details about a query ruleset - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset """ @@ -219,7 +219,7 @@ def list_rulesets( """ Returns summarized information about existing query rulesets. - ``_ + ``_ :param from_: Starting offset (default: 0) :param size: specifies a max number of results to get @@ -272,7 +272,7 @@ def put_rule( """ Creates or updates a query rule within a query ruleset. - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset containing the rule to be created or updated @@ -347,7 +347,7 @@ def put_ruleset( """ Creates or updates a query ruleset. - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset to be created or updated @@ -382,3 +382,56 @@ def put_ruleset( endpoint_id="query_rules.put_ruleset", path_parts=__path_parts, ) + + @_rewrite_parameters( + body_fields=("match_criteria",), + ) + def test( + self, + *, + ruleset_id: str, + match_criteria: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Creates or updates a query ruleset. + + ``_ + + :param ruleset_id: The unique identifier of the query ruleset to be created or + updated + :param match_criteria: + """ + if ruleset_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'ruleset_id'") + if match_criteria is None and body is None: + raise ValueError("Empty value passed for parameter 'match_criteria'") + __path_parts: t.Dict[str, str] = {"ruleset_id": _quote(ruleset_id)} + __path = f'/_query_rules/{__path_parts["ruleset_id"]}/_test' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if match_criteria is not None: + __body["match_criteria"] = match_criteria + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="query_rules.test", + path_parts=__path_parts, + ) diff --git a/elasticsearch/_sync/client/rollup.py b/elasticsearch/_sync/client/rollup.py index 066427db5..9ed2c25fa 100644 --- a/elasticsearch/_sync/client/rollup.py +++ b/elasticsearch/_sync/client/rollup.py @@ -38,7 +38,7 @@ def delete_job( """ Deletes an existing rollup job. - ``_ + ``_ :param id: Identifier for the job. """ @@ -78,7 +78,7 @@ def get_jobs( """ Retrieves the configuration, stats, and status of rollup jobs. - ``_ + ``_ :param id: Identifier for the rollup job. If it is `_all` or omitted, the API returns all rollup jobs. @@ -123,7 +123,7 @@ def get_rollup_caps( Returns the capabilities of any rollup jobs that have been configured for a specific index or index pattern. - ``_ + ``_ :param id: Index, indices or index-pattern to return rollup capabilities for. `_all` may be used to fetch rollup capabilities from all jobs. @@ -168,7 +168,7 @@ def get_rollup_index_caps( Returns the rollup capabilities of all jobs inside of a rollup index (for example, the index where rollup data is stored). - ``_ + ``_ :param index: Data stream or index to check for rollup capabilities. Wildcard (`*`) expressions are supported. @@ -230,7 +230,7 @@ def put_job( """ Creates a rollup job. - ``_ + ``_ :param id: Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the data that is associated with the rollup job. @@ -346,7 +346,7 @@ def rollup_search( """ Enables searching rolled-up data using the standard Query DSL. - ``_ + ``_ :param index: Enables searching rolled-up data using the standard Query DSL. :param aggregations: Specifies aggregations. @@ -409,7 +409,7 @@ def start_job( """ Starts an existing, stopped rollup job. - ``_ + ``_ :param id: Identifier for the rollup job. """ @@ -451,7 +451,7 @@ def stop_job( """ Stops an existing, started rollup job. - ``_ + ``_ :param id: Identifier for the rollup job. :param timeout: If `wait_for_completion` is `true`, the API blocks for (at maximum) diff --git a/elasticsearch/_sync/client/search_application.py b/elasticsearch/_sync/client/search_application.py index e6483fe8e..7b7619fa7 100644 --- a/elasticsearch/_sync/client/search_application.py +++ b/elasticsearch/_sync/client/search_application.py @@ -36,9 +36,10 @@ def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a search application. + Delete a search application. Remove a search application and its associated alias. + Indices attached to the search application are not removed. - ``_ + ``_ :param name: The name of the search application to delete """ @@ -76,9 +77,10 @@ def delete_behavioral_analytics( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a behavioral analytics collection. + Delete a behavioral analytics collection. The associated data stream is also + deleted. - ``_ + ``_ :param name: The name of the analytics collection to be deleted """ @@ -116,9 +118,9 @@ def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the details about a search application + Get search application details. - ``_ + ``_ :param name: The name of the search application """ @@ -156,9 +158,9 @@ def get_behavioral_analytics( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the existing behavioral analytics collections. + Get behavioral analytics collections. - ``_ + ``_ :param name: A list of analytics collections to limit the returned information """ @@ -205,7 +207,7 @@ def list( """ Returns the existing search applications. - ``_ + ``_ :param from_: Starting offset. :param q: Query in the Lucene query string syntax. @@ -254,9 +256,9 @@ def put( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a search application. + Create or update a search application. - ``_ + ``_ :param name: The name of the search application to be created or updated. :param search_application: @@ -307,9 +309,9 @@ def put_behavioral_analytics( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a behavioral analytics collection. + Create a behavioral analytics collection. - ``_ + ``_ :param name: The name of the analytics collection to be created or updated. """ @@ -353,9 +355,12 @@ def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Perform a search against a search application. + Run a search application search. Generate and run an Elasticsearch query that + uses the specified query parameteter and the search template associated with + the search application or default template. Unspecified template parameters are + assigned their default values if applicable. - ``_ + ``_ :param name: The name of the search application to be searched. :param params: Query parameters specific to this request, which will override diff --git a/elasticsearch/_sync/client/searchable_snapshots.py b/elasticsearch/_sync/client/searchable_snapshots.py index 823162b57..2db8dfb04 100644 --- a/elasticsearch/_sync/client/searchable_snapshots.py +++ b/elasticsearch/_sync/client/searchable_snapshots.py @@ -39,7 +39,7 @@ def cache_stats( """ Retrieve node-level cache statistics about searchable snapshots. - ``_ + ``_ :param node_id: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting @@ -97,7 +97,7 @@ def clear_cache( """ Clear the cache of searchable snapshots. - ``_ + ``_ :param index: A comma-separated list of index names :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves @@ -169,7 +169,7 @@ def mount( """ Mount a snapshot as a searchable index. - ``_ + ``_ :param repository: The name of the repository containing the snapshot of the index to mount @@ -249,7 +249,7 @@ def stats( """ Retrieve shard-level statistics about searchable snapshots. - ``_ + ``_ :param index: A comma-separated list of index names :param level: Return stats aggregated at cluster, index or shard level diff --git a/elasticsearch/_sync/client/security.py b/elasticsearch/_sync/client/security.py index 28b98fdba..fd4b1cac9 100644 --- a/elasticsearch/_sync/client/security.py +++ b/elasticsearch/_sync/client/security.py @@ -44,9 +44,10 @@ def activate_user_profile( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a user profile on behalf of another user. + Activate a user profile. Create or update a user profile on behalf of another + user. - ``_ + ``_ :param grant_type: :param access_token: @@ -104,7 +105,7 @@ def authenticate( and information about the realms that authenticated and authorized the user. If the user cannot be authenticated, this API returns a 401 status code. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/_authenticate" @@ -144,11 +145,11 @@ def bulk_delete_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - The role management APIs are generally the preferred way to manage roles, rather - than using file-based role management. The bulk delete roles API cannot delete - roles that are defined in roles files. + Bulk delete roles. The role management APIs are generally the preferred way to + manage roles, rather than using file-based role management. The bulk delete roles + API cannot delete roles that are defined in roles files. - ``_ + ``_ :param names: An array of role names to delete :param refresh: If `true` (the default) then refresh the affected shards to make @@ -202,11 +203,11 @@ def bulk_put_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - The role management APIs are generally the preferred way to manage roles, rather - than using file-based role management. The bulk create or update roles API cannot - update roles that are defined in roles files. + Bulk create or update roles. The role management APIs are generally the preferred + way to manage roles, rather than using file-based role management. The bulk create + or update roles API cannot update roles that are defined in roles files. - ``_ + ``_ :param roles: A dictionary of role name to RoleDescriptor objects to add or update :param refresh: If `true` (the default) then refresh the affected shards to make @@ -262,9 +263,10 @@ def change_password( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Changes the passwords of users in the native realm and built-in users. + Change passwords. Change the passwords of users in the native realm and built-in + users. - ``_ + ``_ :param username: The user whose password you want to change. If you do not specify this parameter, the password is changed for the current user. @@ -324,10 +326,10 @@ def clear_api_key_cache( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Evicts a subset of all entries from the API key cache. The cache is also automatically - cleared on state changes of the security index. + Clear the API key cache. Evict a subset of all entries from the API key cache. + The cache is also automatically cleared on state changes of the security index. - ``_ + ``_ :param ids: Comma-separated list of API key IDs to evict from the API key cache. To evict all API keys, use `*`. Does not support other wildcard patterns. @@ -366,9 +368,11 @@ def clear_cached_privileges( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Evicts application privileges from the native application privileges cache. + Clear the privileges cache. Evict privileges from the native application privilege + cache. The cache is also automatically cleared for applications that have their + privileges updated. - ``_ + ``_ :param application: A comma-separated list of application names """ @@ -407,10 +411,10 @@ def clear_cached_realms( usernames: t.Optional[t.Sequence[str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Evicts users from the user cache. Can completely clear the cache or evict specific - users. + Clear the user cache. Evict users from the user cache. You can completely clear + the cache or evict specific users. - ``_ + ``_ :param realms: Comma-separated list of realms to clear :param usernames: Comma-separated list of usernames to clear from the cache @@ -451,9 +455,9 @@ def clear_cached_roles( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Evicts roles from the native role cache. + Clear the roles cache. Evict roles from the native role cache. - ``_ + ``_ :param name: Role name """ @@ -493,9 +497,10 @@ def clear_cached_service_tokens( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Evicts tokens from the service account token caches. + Clear service account token caches. Evict a subset of all entries from the service + account token caches. - ``_ + ``_ :param namespace: An identifier for the namespace :param service: An identifier for the service name @@ -552,13 +557,13 @@ def create_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create an API key. Creates an API key for access without requiring basic authentication. + Create an API key. Create an API key for access without requiring basic authentication. A successful request returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys. - ``_ + ``_ :param expiration: Expiration time for the API key. By default, API keys never expire. @@ -628,9 +633,10 @@ def create_service_token( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a service accounts token for access without requiring basic authentication. + Create a service account token. Create a service accounts token for access without + requiring basic authentication. - ``_ + ``_ :param namespace: An identifier for the namespace :param service: An identifier for the service name @@ -698,9 +704,9 @@ def delete_privileges( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes application privileges. + Delete application privileges. - ``_ + ``_ :param application: Application name :param name: Privilege name @@ -754,9 +760,9 @@ def delete_role( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes roles in the native realm. + Delete roles. Delete roles in the native realm. - ``_ + ``_ :param name: Role name :param refresh: If `true` (the default) then refresh the affected shards to make @@ -802,9 +808,9 @@ def delete_role_mapping( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes role mappings. + Delete role mappings. - ``_ + ``_ :param name: Role-mapping name :param refresh: If `true` (the default) then refresh the affected shards to make @@ -852,9 +858,10 @@ def delete_service_token( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a service account token. + Delete service account tokens. Delete service account tokens for a service in + a specified namespace. - ``_ + ``_ :param namespace: An identifier for the namespace :param service: An identifier for the service name @@ -910,9 +917,9 @@ def delete_user( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes users from the native realm. + Delete users. Delete users from the native realm. - ``_ + ``_ :param username: username :param refresh: If `true` (the default) then refresh the affected shards to make @@ -958,9 +965,9 @@ def disable_user( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Disables users in the native realm. + Disable users. Disable users in the native realm. - ``_ + ``_ :param username: The username of the user to disable :param refresh: If `true` (the default) then refresh the affected shards to make @@ -1006,9 +1013,10 @@ def disable_user_profile( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Disables a user profile so it's not visible in user profile searches. + Disable a user profile. Disable user profiles so that they are not visible in + user profile searches. - ``_ + ``_ :param uid: Unique identifier for the user profile. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make @@ -1054,9 +1062,9 @@ def enable_user( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables users in the native realm. + Enable users. Enable users in the native realm. - ``_ + ``_ :param username: The username of the user to enable :param refresh: If `true` (the default) then refresh the affected shards to make @@ -1102,9 +1110,10 @@ def enable_user_profile( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables a user profile so it's visible in user profile searches. + Enable a user profile. Enable user profiles to make them visible in user profile + searches. - ``_ + ``_ :param uid: Unique identifier for the user profile. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make @@ -1146,10 +1155,10 @@ def enroll_kibana( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables a Kibana instance to configure itself for communication with a secured - Elasticsearch cluster. + Enroll Kibana. Enable a Kibana instance to configure itself for communication + with a secured Elasticsearch cluster. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/enroll/kibana" @@ -1182,9 +1191,10 @@ def enroll_node( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows a new node to join an existing cluster with security features enabled. + Enroll a node. Enroll a new node to allow it to join an existing cluster with + security features enabled. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/enroll/node" @@ -1231,7 +1241,7 @@ def get_api_key( privileges (including `manage_security`), this API returns all API keys regardless of ownership. - ``_ + ``_ :param active_only: A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, @@ -1303,10 +1313,10 @@ def get_builtin_privileges( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the list of cluster privileges and index privileges that are available - in this version of Elasticsearch. + Get builtin privileges. Get the list of cluster privileges and index privileges + that are available in this version of Elasticsearch. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/privilege/_builtin" @@ -1341,9 +1351,9 @@ def get_privileges( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves application privileges. + Get application privileges. - ``_ + ``_ :param application: Application name :param name: Privilege name @@ -1388,11 +1398,11 @@ def get_role( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The role management APIs are generally the preferred way to manage roles, rather - than using file-based role management. The get roles API cannot retrieve roles - that are defined in roles files. + Get roles. Get roles in the native realm. The role management APIs are generally + the preferred way to manage roles, rather than using file-based role management. + The get roles API cannot retrieve roles that are defined in roles files. - ``_ + ``_ :param name: The name of the role. You can specify multiple roles as a comma-separated list. If you do not specify this parameter, the API returns information about @@ -1435,9 +1445,12 @@ def get_role_mapping( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves role mappings. + Get role mappings. Role mappings define which roles are assigned to each user. + The role mapping APIs are generally the preferred way to manage role mappings + rather than using role mapping files. The get role mappings API cannot retrieve + role mappings that are defined in role mapping files. - ``_ + ``_ :param name: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does @@ -1483,9 +1496,10 @@ def get_service_accounts( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - This API returns a list of service accounts that match the provided path parameter(s). + Get service accounts. Get a list of service accounts that match the provided + path parameters. - ``_ + ``_ :param namespace: Name of the namespace. Omit this parameter to retrieve information about all service accounts. If you omit this parameter, you must also omit @@ -1534,9 +1548,9 @@ def get_service_credentials( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information of all service credentials for a service account. + Get service account credentials. - ``_ + ``_ :param namespace: Name of the namespace. :param service: Name of the service name. @@ -1602,9 +1616,9 @@ def get_token( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a bearer token for access without requiring basic authentication. + Get a token. Create a bearer token for access without requiring basic authentication. - ``_ + ``_ :param grant_type: :param kerberos_ticket: @@ -1661,9 +1675,9 @@ def get_user( with_profile_uid: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about users in the native realm and built-in users. + Get users. Get information about users in the native realm and built-in users. - ``_ + ``_ :param username: An identifier for the user. You can specify multiple usernames as a comma-separated list. If you omit this parameter, the API retrieves @@ -1712,9 +1726,9 @@ def get_user_privileges( username: t.Optional[t.Union[None, str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves security privileges for the logged in user. + Get user privileges. - ``_ + ``_ :param application: The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, @@ -1762,9 +1776,9 @@ def get_user_profile( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a user's profile using the unique profile ID. + Get a user profile. Get a user's profile using the unique profile ID. - ``_ + ``_ :param uid: A unique identifier for the user profile. :param data: List of filters for the `data` field of the profile document. To @@ -1826,23 +1840,23 @@ def grant_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates an API key on behalf of another user. This API is similar to Create API - keys, however it creates the API key for a user that is different than the user - that runs the API. The caller must have authentication credentials (either an - access token, or a username and password) for the user on whose behalf the API - key will be created. It is not possible to use this API to create an API key - without that user’s credentials. The user, for whom the authentication credentials - is provided, can optionally "run as" (impersonate) another user. In this case, - the API key will be created on behalf of the impersonated user. This API is intended - be used by applications that need to create and manage API keys for end users, - but cannot guarantee that those users have permission to create API keys on their - own behalf. A successful grant API key API call returns a JSON structure that - contains the API key, its unique id, and its name. If applicable, it also returns - expiration information for the API key in milliseconds. By default, API keys - never expire. You can specify expiration information when you create the API - keys. - - ``_ + Grant an API key. Create an API key on behalf of another user. This API is similar + to the create API keys API, however it creates the API key for a user that is + different than the user that runs the API. The caller must have authentication + credentials (either an access token, or a username and password) for the user + on whose behalf the API key will be created. It is not possible to use this API + to create an API key without that user’s credentials. The user, for whom the + authentication credentials is provided, can optionally "run as" (impersonate) + another user. In this case, the API key will be created on behalf of the impersonated + user. This API is intended be used by applications that need to create and manage + API keys for end users, but cannot guarantee that those users have permission + to create API keys on their own behalf. A successful grant API key API call returns + a JSON structure that contains the API key, its unique id, and its name. If applicable, + it also returns expiration information for the API key in milliseconds. By default, + API keys never expire. You can specify expiration information when you create + the API keys. + + ``_ :param api_key: Defines the API key. :param grant_type: The type of grant. Supported grant types are: `access_token`, @@ -1980,10 +1994,10 @@ def has_privileges( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Check user privileges. Determines whether the specified user has a specified - list of privileges. + Check user privileges. Determine whether the specified user has a specified list + of privileges. - ``_ + ``_ :param user: Username :param application: @@ -2040,10 +2054,10 @@ def has_privileges_user_profile( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Determines whether the users associated with the specified profile IDs have all - the requested privileges. + Check user profile privileges. Determine whether the users associated with the + specified user profile IDs have all the requested privileges. - ``_ + ``_ :param privileges: :param uids: A list of profile IDs. The privileges are checked for associated @@ -2100,15 +2114,19 @@ def invalidate_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Invalidate API keys. Invalidates one or more API keys. The `manage_api_key` privilege - allows deleting any API keys. The `manage_own_api_key` only allows deleting API - keys that are owned by the user. In addition, with the `manage_own_api_key` privilege, - an invalidation request must be issued in one of the three formats: - Set the - parameter `owner=true`. - Or, set both `username` and `realm_name` to match the - user’s identity. - Or, if the request is issued by an API key, i.e. an API key - invalidates itself, specify its ID in the `ids` field. + Invalidate API keys. This API invalidates API keys created by the create API + key or grant API key APIs. Invalidated API keys fail authentication, but they + can still be viewed using the get API key information and query API key information + APIs, for at least the configured retention period, until they are automatically + deleted. The `manage_api_key` privilege allows deleting any API keys. The `manage_own_api_key` + only allows deleting API keys that are owned by the user. In addition, with the + `manage_own_api_key` privilege, an invalidation request must be issued in one + of the three formats: - Set the parameter `owner=true`. - Or, set both `username` + and `realm_name` to match the user’s identity. - Or, if the request is issued + by an API key, that is to say an API key invalidates itself, specify its ID in + the `ids` field. - ``_ + ``_ :param id: :param ids: A list of API key ids. This parameter cannot be used with any of @@ -2177,9 +2195,14 @@ def invalidate_token( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Invalidates one or more access tokens or refresh tokens. + Invalidate a token. The access tokens returned by the get token API have a finite + period of time for which they are valid. After that time period, they can no + longer be used. The time period is defined by the `xpack.security.authc.token.timeout` + setting. The refresh tokens returned by the get token API are only valid for + 24 hours. They can also be used exactly once. If you want to invalidate one or + more access or refresh tokens immediately, use this invalidate token API. - ``_ + ``_ :param realm_name: :param refresh_token: @@ -2237,9 +2260,9 @@ def put_privileges( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds or updates application privileges. + Create or update application privileges. - ``_ + ``_ :param privileges: :param refresh: If `true` (the default) then refresh the affected shards to make @@ -2285,6 +2308,7 @@ def put_privileges( "global_", "indices", "metadata", + "remote_indices", "run_as", "transient_metadata", ), @@ -2373,16 +2397,18 @@ def put_role( refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, + remote_indices: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, run_as: t.Optional[t.Sequence[str]] = None, transient_metadata: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - The role management APIs are generally the preferred way to manage roles, rather - than using file-based role management. The create or update roles API cannot - update roles that are defined in roles files. + Create or update roles. The role management APIs are generally the preferred + way to manage roles in the native realm, rather than using file-based role management. + The create or update roles API cannot update roles that are defined in roles + files. File-based role management is not available in Elastic Serverless. - ``_ + ``_ :param name: The name of the role. :param applications: A list of application privilege entries. @@ -2398,6 +2424,7 @@ def put_role( :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + :param remote_indices: A list of remote indices permissions entries. :param run_as: A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will @@ -2438,6 +2465,8 @@ def put_role( __body["indices"] = indices if metadata is not None: __body["metadata"] = metadata + if remote_indices is not None: + __body["remote_indices"] = remote_indices if run_as is not None: __body["run_as"] = run_as if transient_metadata is not None: @@ -2483,9 +2512,16 @@ def put_role_mapping( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates and updates role mappings. + Create or update role mappings. Role mappings define which roles are assigned + to each user. Each mapping has rules that identify users and a list of roles + that are granted to those users. The role mapping APIs are generally the preferred + way to manage role mappings rather than using role mapping files. The create + or update role mappings API cannot update role mappings that are defined in role + mapping files. This API does not create roles. Rather, it maps users to existing + roles. Roles can be created by using the create or update roles API or roles + files. - ``_ + ``_ :param name: Role-mapping name :param enabled: @@ -2570,10 +2606,11 @@ def put_user( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds and updates users in the native realm. These users are commonly referred - to as native users. + Create or update users. A password is required for adding a new user but is optional + when updating an existing user. To change a user’s password without updating + any other fields, use the change password API. - ``_ + ``_ :param username: The username of the User :param email: @@ -2668,10 +2705,10 @@ def query_api_keys( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Query API keys. Retrieves a paginated list of API keys and their information. + Find API keys with a query. Get a paginated list of API keys and their information. You can optionally filter the results with a query. - ``_ + ``_ :param aggregations: Any aggregations to run over the corpus of returned API keys. Aggregations and queries work together. Aggregations are computed only @@ -2795,10 +2832,10 @@ def query_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves roles in a paginated manner. You can optionally filter the results - with a query. + Find roles with a query. Get roles in a paginated manner. You can optionally + filter the results with a query. - ``_ + ``_ :param from_: Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more @@ -2881,10 +2918,10 @@ def query_user( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information for Users in a paginated manner. You can optionally filter - the results with a query. + Find users with a query. Get information for users in a paginated manner. You + can optionally filter the results with a query. - ``_ + ``_ :param from_: Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more @@ -2960,9 +2997,9 @@ def saml_authenticate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Submits a SAML Response message to Elasticsearch for consumption. + Authenticate SAML. Submits a SAML response message to Elasticsearch for consumption. - ``_ + ``_ :param content: The SAML response as it was sent by the user’s browser, usually a Base64 encoded XML document. @@ -3022,9 +3059,9 @@ def saml_complete_logout( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Verifies the logout response sent from the SAML IdP. + Logout of SAML completely. Verifies the logout response sent from the SAML IdP. - ``_ + ``_ :param ids: A json array with all the valid SAML Request Ids that the caller of the API has for the current user. @@ -3088,9 +3125,9 @@ def saml_invalidate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Submits a SAML LogoutRequest message to Elasticsearch for consumption. + Invalidate SAML. Submits a SAML LogoutRequest message to Elasticsearch for consumption. - ``_ + ``_ :param query_string: The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. This query should include @@ -3155,9 +3192,9 @@ def saml_logout( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Submits a request to invalidate an access token and refresh token. + Logout of SAML. Submits a request to invalidate an access token and refresh token. - ``_ + ``_ :param token: The access token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent token that was received @@ -3212,10 +3249,10 @@ def saml_prepare_authentication( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a SAML authentication request () as a URL string, based - on the configuration of the respective SAML realm in Elasticsearch. + Prepare SAML authentication. Creates a SAML authentication request (``) + as a URL string, based on the configuration of the respective SAML realm in Elasticsearch. - ``_ + ``_ :param acs: The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. The realm is used to generate the authentication @@ -3268,9 +3305,10 @@ def saml_service_provider_metadata( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Generate SAML metadata for a SAML 2.0 Service Provider. + Create SAML service provider metadata. Generate SAML metadata for a SAML 2.0 + Service Provider. - ``_ + ``_ :param realm_name: The name of the SAML realm in Elasticsearch. """ @@ -3314,9 +3352,10 @@ def suggest_user_profiles( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get suggestions for user profiles that match specified search criteria. + Suggest a user profile. Get suggestions for user profiles that match specified + search criteria. - ``_ + ``_ :param data: List of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content use `data=` @@ -3400,7 +3439,7 @@ def update_api_key( not possible to use an API key as the authentication credential for this API. To update an API key, the owner user’s credentials are required. - ``_ + ``_ :param id: The ID of the API key to update. :param expiration: Expiration time for the API key. @@ -3473,10 +3512,10 @@ def update_user_profile_data( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates specific data for the user profile that's associated with the specified - unique ID. + Update user profile data. Update specific data for the user profile that is associated + with a unique ID. - ``_ + ``_ :param uid: A unique identifier for the user profile. :param data: Non-searchable data that you want to associate with the user profile. diff --git a/elasticsearch/_sync/client/slm.py b/elasticsearch/_sync/client/slm.py index 86f04928f..5b6054d72 100644 --- a/elasticsearch/_sync/client/slm.py +++ b/elasticsearch/_sync/client/slm.py @@ -38,7 +38,7 @@ def delete_lifecycle( """ Deletes an existing snapshot lifecycle policy. - ``_ + ``_ :param policy_id: The id of the snapshot lifecycle policy to remove """ @@ -79,7 +79,7 @@ def execute_lifecycle( Immediately creates a snapshot according to the lifecycle policy, without waiting for the scheduled time. - ``_ + ``_ :param policy_id: The id of the snapshot lifecycle policy to be executed """ @@ -118,7 +118,7 @@ def execute_retention( """ Deletes any snapshots that are expired according to the policy's retention rules. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/_execute_retention" @@ -155,7 +155,7 @@ def get_lifecycle( Retrieves one or more snapshot lifecycle policy definitions and information about the latest snapshot attempts. - ``_ + ``_ :param policy_id: Comma-separated list of snapshot lifecycle policies to retrieve """ @@ -198,7 +198,7 @@ def get_stats( Returns global and policy-level statistics about actions taken by snapshot lifecycle management. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/stats" @@ -233,7 +233,7 @@ def get_status( """ Retrieves the status of snapshot lifecycle management (SLM). - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/status" @@ -279,7 +279,7 @@ def put_lifecycle( """ Creates or updates a snapshot lifecycle policy. - ``_ + ``_ :param policy_id: ID for the snapshot lifecycle policy you want to create or update. @@ -356,7 +356,7 @@ def start( """ Turns on snapshot lifecycle management (SLM). - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/start" @@ -391,7 +391,7 @@ def stop( """ Turns off snapshot lifecycle management (SLM). - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/stop" diff --git a/elasticsearch/_sync/client/snapshot.py b/elasticsearch/_sync/client/snapshot.py index bb5297e26..a52498c63 100644 --- a/elasticsearch/_sync/client/snapshot.py +++ b/elasticsearch/_sync/client/snapshot.py @@ -41,7 +41,7 @@ def cleanup_repository( Triggers the review of a snapshot repository’s contents and deletes any stale data not referenced by existing snapshots. - ``_ + ``_ :param name: Snapshot repository to clean up. :param master_timeout: Period to wait for a connection to the master node. @@ -95,7 +95,7 @@ def clone( """ Clones indices from one snapshot into another snapshot in the same repository. - ``_ + ``_ :param repository: A repository name :param snapshot: The name of the snapshot to clone from @@ -178,7 +178,7 @@ def create( """ Creates a snapshot in a repository. - ``_ + ``_ :param repository: Repository for the snapshot. :param snapshot: Name of the snapshot. Must be unique in the repository. @@ -282,7 +282,7 @@ def create_repository( """ Creates a repository. - ``_ + ``_ :param name: A repository name :param repository: @@ -342,7 +342,7 @@ def delete( """ Deletes one or more snapshots. - ``_ + ``_ :param repository: A repository name :param snapshot: A comma-separated list of snapshot names @@ -393,7 +393,7 @@ def delete_repository( """ Deletes a repository. - ``_ + ``_ :param name: Name of the snapshot repository to unregister. Wildcard (`*`) patterns are supported. @@ -467,7 +467,7 @@ def get( """ Returns information about a snapshot. - ``_ + ``_ :param repository: Comma-separated list of snapshot repository names used to limit the request. Wildcard (*) expressions are supported. @@ -579,7 +579,7 @@ def get_repository( """ Returns information about a repository. - ``_ + ``_ :param name: A comma-separated list of repository names :param local: Return local information, do not retrieve the state from master @@ -616,6 +616,84 @@ def get_repository( path_parts=__path_parts, ) + @_rewrite_parameters() + def repository_verify_integrity( + self, + *, + name: t.Union[str, t.Sequence[str]], + blob_thread_pool_concurrency: t.Optional[int] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + index_snapshot_verification_concurrency: t.Optional[int] = None, + index_verification_concurrency: t.Optional[int] = None, + max_bytes_per_sec: t.Optional[str] = None, + max_failed_shard_snapshots: t.Optional[int] = None, + meta_thread_pool_concurrency: t.Optional[int] = None, + pretty: t.Optional[bool] = None, + snapshot_verification_concurrency: t.Optional[int] = None, + verify_blob_contents: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Verifies the integrity of the contents of a snapshot repository + + ``_ + + :param name: A repository name + :param blob_thread_pool_concurrency: Number of threads to use for reading blob + contents + :param index_snapshot_verification_concurrency: Number of snapshots to verify + concurrently within each index + :param index_verification_concurrency: Number of indices to verify concurrently + :param max_bytes_per_sec: Rate limit for individual blob verification + :param max_failed_shard_snapshots: Maximum permitted number of failed shard snapshots + :param meta_thread_pool_concurrency: Number of threads to use for reading metadata + :param snapshot_verification_concurrency: Number of snapshots to verify concurrently + :param verify_blob_contents: Whether to verify the contents of individual blobs + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'name'") + __path_parts: t.Dict[str, str] = {"repository": _quote(name)} + __path = f'/_snapshot/{__path_parts["repository"]}/_verify_integrity' + __query: t.Dict[str, t.Any] = {} + if blob_thread_pool_concurrency is not None: + __query["blob_thread_pool_concurrency"] = blob_thread_pool_concurrency + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if index_snapshot_verification_concurrency is not None: + __query["index_snapshot_verification_concurrency"] = ( + index_snapshot_verification_concurrency + ) + if index_verification_concurrency is not None: + __query["index_verification_concurrency"] = index_verification_concurrency + if max_bytes_per_sec is not None: + __query["max_bytes_per_sec"] = max_bytes_per_sec + if max_failed_shard_snapshots is not None: + __query["max_failed_shard_snapshots"] = max_failed_shard_snapshots + if meta_thread_pool_concurrency is not None: + __query["meta_thread_pool_concurrency"] = meta_thread_pool_concurrency + if pretty is not None: + __query["pretty"] = pretty + if snapshot_verification_concurrency is not None: + __query["snapshot_verification_concurrency"] = ( + snapshot_verification_concurrency + ) + if verify_blob_contents is not None: + __query["verify_blob_contents"] = verify_blob_contents + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + endpoint_id="snapshot.repository_verify_integrity", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=( "feature_states", @@ -656,7 +734,7 @@ def restore( """ Restores a snapshot. - ``_ + ``_ :param repository: A repository name :param snapshot: A snapshot name @@ -749,7 +827,7 @@ def status( """ Returns information about the status of a snapshot. - ``_ + ``_ :param repository: A repository name :param snapshot: A comma-separated list of snapshot names @@ -808,7 +886,7 @@ def verify_repository( """ Verifies a repository. - ``_ + ``_ :param name: A repository name :param master_timeout: Explicit operation timeout for connection to master node diff --git a/elasticsearch/_sync/client/sql.py b/elasticsearch/_sync/client/sql.py index 3635bfc0b..b7da9229c 100644 --- a/elasticsearch/_sync/client/sql.py +++ b/elasticsearch/_sync/client/sql.py @@ -41,7 +41,7 @@ def clear_cursor( """ Clears the SQL cursor - ``_ + ``_ :param cursor: Cursor to clear. """ @@ -87,7 +87,7 @@ def delete_async( Deletes an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it. - ``_ + ``_ :param id: Identifier for the search. """ @@ -134,7 +134,7 @@ def get_async( Returns the current status and available results for an async SQL search or stored synchronous SQL search - ``_ + ``_ :param id: Identifier for the search. :param delimiter: Separator for CSV results. The API only supports this parameter @@ -192,7 +192,7 @@ def get_async_status( Returns the current status of an async SQL search or a stored synchronous SQL search - ``_ + ``_ :param id: Identifier for the search. """ @@ -251,7 +251,11 @@ def query( field_multi_value_leniency: t.Optional[bool] = None, filter: t.Optional[t.Mapping[str, t.Any]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, - format: t.Optional[str] = None, + format: t.Optional[ + t.Union[ + str, t.Literal["cbor", "csv", "json", "smile", "tsv", "txt", "yaml"] + ] + ] = None, human: t.Optional[bool] = None, index_using_frozen: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, @@ -271,7 +275,7 @@ def query( """ Executes a SQL request - ``_ + ``_ :param catalog: Default catalog (cluster) for queries. If unspecified, the queries execute on the data in the local cluster only. @@ -381,7 +385,7 @@ def translate( """ Translates SQL into Elasticsearch queries - ``_ + ``_ :param query: SQL query to run. :param fetch_size: The maximum number of rows (or entries) to return in one response. diff --git a/elasticsearch/_sync/client/ssl.py b/elasticsearch/_sync/client/ssl.py index 19892748e..9faa52fad 100644 --- a/elasticsearch/_sync/client/ssl.py +++ b/elasticsearch/_sync/client/ssl.py @@ -35,10 +35,25 @@ def certificates( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the X.509 certificates used to encrypt communications - in the cluster. + Get SSL certificates. Get information about the X.509 certificates that are used + to encrypt communications in the cluster. The API returns a list that includes + certificates from all TLS contexts including: - Settings for transport and HTTP + interfaces - TLS settings that are used within authentication realms - TLS settings + for remote monitoring exporters The list includes certificates that are used + for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` + and `xpack.security.transport.ssl.certificate_authorities` settings. It also + includes certificates that are used for configuring server identity, such as + `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`. + The list does not include certificates that are sourced from the default SSL + context of the Java Runtime Environment (JRE), even if those certificates are + in use within Elasticsearch. NOTE: When a PKCS#11 token is configured as the + truststore of the JRE, the API returns all the certificates that are included + in the PKCS#11 token irrespective of whether these are used in the Elasticsearch + TLS configuration. If Elasticsearch is configured to use a keystore or truststore, + the API output includes all certificates in that store, even though some of the + certificates might not be in active use within the cluster. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ssl/certificates" diff --git a/elasticsearch/_sync/client/synonyms.py b/elasticsearch/_sync/client/synonyms.py index 9e2b66ee6..b82ec67a0 100644 --- a/elasticsearch/_sync/client/synonyms.py +++ b/elasticsearch/_sync/client/synonyms.py @@ -38,7 +38,7 @@ def delete_synonym( """ Deletes a synonym set - ``_ + ``_ :param id: The id of the synonyms set to be deleted """ @@ -79,7 +79,7 @@ def delete_synonym_rule( """ Deletes a synonym rule in a synonym set - ``_ + ``_ :param set_id: The id of the synonym set to be updated :param rule_id: The id of the synonym rule to be deleted @@ -129,7 +129,7 @@ def get_synonym( """ Retrieves a synonym set - ``_ + ``_ :param id: "The id of the synonyms set to be retrieved :param from_: Starting offset for query rules to be retrieved @@ -176,7 +176,7 @@ def get_synonym_rule( """ Retrieves a synonym rule from a synonym set - ``_ + ``_ :param set_id: The id of the synonym set to retrieve the synonym rule from :param rule_id: The id of the synonym rule to retrieve @@ -225,7 +225,7 @@ def get_synonyms_sets( """ Retrieves a summary of all defined synonym sets - ``_ + ``_ :param from_: Starting offset :param size: specifies a max number of results to get @@ -274,7 +274,7 @@ def put_synonym( """ Creates or updates a synonym set. - ``_ + ``_ :param id: The id of the synonyms set to be created or updated :param synonyms_set: The synonym set information to update @@ -327,7 +327,7 @@ def put_synonym_rule( """ Creates or updates a synonym rule in a synonym set - ``_ + ``_ :param set_id: The id of the synonym set to be updated with the synonym rule :param rule_id: The id of the synonym rule to be updated or created diff --git a/elasticsearch/_sync/client/tasks.py b/elasticsearch/_sync/client/tasks.py index a0c4a742d..5474ef2de 100644 --- a/elasticsearch/_sync/client/tasks.py +++ b/elasticsearch/_sync/client/tasks.py @@ -42,7 +42,7 @@ def cancel( """ Cancels a task, if it can be cancelled through an API. - ``_ + ``_ :param task_id: ID of the task. :param actions: Comma-separated list or wildcard expression of actions used to @@ -102,7 +102,7 @@ def get( Get task information. Returns information about the tasks currently executing in the cluster. - ``_ + ``_ :param task_id: ID of the task. :param timeout: Period to wait for a response. If no response is received before @@ -160,7 +160,7 @@ def list( The task management API returns information about tasks currently executing on one or more nodes in the cluster. - ``_ + ``_ :param actions: Comma-separated list or wildcard expression of actions used to limit the request. diff --git a/elasticsearch/_sync/client/text_structure.py b/elasticsearch/_sync/client/text_structure.py index 48f02393a..a3e118d8f 100644 --- a/elasticsearch/_sync/client/text_structure.py +++ b/elasticsearch/_sync/client/text_structure.py @@ -53,7 +53,7 @@ def find_structure( Finds the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch. - ``_ + ``_ :param text_files: :param charset: The text’s character set. It must be a character set that is @@ -193,7 +193,7 @@ def test_grok_pattern( """ Tests a Grok pattern on some text. - ``_ + ``_ :param grok_pattern: Grok pattern to run on the text. :param text: Lines of text to run the Grok pattern on. diff --git a/elasticsearch/_sync/client/transform.py b/elasticsearch/_sync/client/transform.py index 49613f26f..fc5a64a4e 100644 --- a/elasticsearch/_sync/client/transform.py +++ b/elasticsearch/_sync/client/transform.py @@ -41,7 +41,7 @@ def delete_transform( """ Delete a transform. Deletes a transform. - ``_ + ``_ :param transform_id: Identifier for the transform. :param delete_dest_index: If this value is true, the destination index is deleted @@ -101,7 +101,7 @@ def get_transform( """ Get transforms. Retrieves configuration information for transforms. - ``_ + ``_ :param transform_id: Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using @@ -170,7 +170,7 @@ def get_transform_stats( """ Get transform stats. Retrieves usage information for transforms. - ``_ + ``_ :param transform_id: Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using @@ -256,7 +256,7 @@ def preview_transform( These values are determined based on the field types of the source index and the transform aggregations. - ``_ + ``_ :param transform_id: Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform configuration details in @@ -393,7 +393,7 @@ def put_transform( If you used transforms prior to 7.5, also do not give users any privileges on `.data-frame-internal*` indices. - ``_ + ``_ :param transform_id: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -495,7 +495,7 @@ def reset_transform( it; alternatively, use the `force` query parameter. If the destination index was created by the transform, it is deleted. - ``_ + ``_ :param transform_id: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -547,7 +547,7 @@ def schedule_now_transform( the transform will be processed again at now + frequency unless _schedule_now API is called again in the meantime. - ``_ + ``_ :param transform_id: Identifier for the transform. :param timeout: Controls the time to wait for the scheduling to take place @@ -611,7 +611,7 @@ def start_transform( privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. - ``_ + ``_ :param transform_id: Identifier for the transform. :param from_: Restricts the set of transformed entities to those changed after @@ -665,7 +665,7 @@ def stop_transform( """ Stop transforms. Stops one or more transforms. - ``_ + ``_ :param transform_id: Identifier for the transform. To stop multiple transforms, use a comma-separated list or a wildcard expression. To stop all transforms, @@ -765,7 +765,7 @@ def update_transform( which roles the user who updated it had at the time of update and runs with those privileges. - ``_ + ``_ :param transform_id: Identifier for the transform. :param defer_validation: When true, deferrable validations are not run. This @@ -852,7 +852,7 @@ def upgrade_transforms( the role used to read source data and write to the destination index remains unchanged. - ``_ + ``_ :param dry_run: When true, the request checks for updates but does not run them. :param timeout: Period to wait for a response. If no response is received before diff --git a/elasticsearch/_sync/client/watcher.py b/elasticsearch/_sync/client/watcher.py index dfb71a1be..1b35a2f97 100644 --- a/elasticsearch/_sync/client/watcher.py +++ b/elasticsearch/_sync/client/watcher.py @@ -39,7 +39,7 @@ def ack_watch( """ Acknowledges a watch, manually throttling the execution of the watch's actions. - ``_ + ``_ :param watch_id: Watch ID :param action_id: A comma-separated list of the action ids to be acked @@ -90,7 +90,7 @@ def activate_watch( """ Activates a currently inactive watch. - ``_ + ``_ :param watch_id: Watch ID """ @@ -130,7 +130,7 @@ def deactivate_watch( """ Deactivates a currently active watch. - ``_ + ``_ :param watch_id: Watch ID """ @@ -170,7 +170,7 @@ def delete_watch( """ Removes a watch from Watcher. - ``_ + ``_ :param id: Watch ID """ @@ -245,7 +245,7 @@ def execute_watch( and control whether a watch record would be written to the watch history after execution. - ``_ + ``_ :param id: Identifier for the watch. :param action_modes: Determines how to handle the watch actions as part of the @@ -328,7 +328,7 @@ def get_watch( """ Retrieves a watch by its ID. - ``_ + ``_ :param id: Watch ID """ @@ -390,7 +390,7 @@ def put_watch( """ Creates a new watch, or updates an existing one. - ``_ + ``_ :param id: Watch ID :param actions: @@ -487,7 +487,7 @@ def query_watches( """ Retrieves stored watches. - ``_ + ``_ :param from_: The offset from the first result to fetch. Needs to be non-negative. :param query: Optional, query filter watches to be returned. @@ -557,7 +557,7 @@ def start( """ Starts Watcher if it is not already running. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_watcher/_start" @@ -614,7 +614,7 @@ def stats( """ Retrieves the current Watcher metrics. - ``_ + ``_ :param metric: Defines which additional metrics are included in the response. :param emit_stacktraces: Defines whether stack traces are generated for each @@ -660,7 +660,7 @@ def stop( """ Stops Watcher if it is running. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_watcher/_stop" diff --git a/elasticsearch/_sync/client/xpack.py b/elasticsearch/_sync/client/xpack.py index 8ff48f904..f5bd3e897 100644 --- a/elasticsearch/_sync/client/xpack.py +++ b/elasticsearch/_sync/client/xpack.py @@ -34,7 +34,9 @@ def info( self, *, accept_enterprise: t.Optional[bool] = None, - categories: t.Optional[t.Sequence[str]] = None, + categories: t.Optional[ + t.Sequence[t.Union[str, t.Literal["build", "features", "license"]]] + ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, @@ -43,7 +45,7 @@ def info( """ Provides general information about the installed X-Pack features. - ``_ + ``_ :param accept_enterprise: If this param is used it must be set to true :param categories: A comma-separated list of the information categories to include @@ -88,7 +90,7 @@ def usage( This API provides information about which features are currently enabled and available under the current license and some usage statistics. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and From 43541b05f474a6ac9fb3bdc6f555c8cd4eff6a8d Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 10 Nov 2024 21:53:36 +0400 Subject: [PATCH 04/65] [Backport 8.x] Emit Python warnings for beta and tech preview APIs (#2686) Co-authored-by: Quentin Pradet --- elasticsearch/_async/client/__init__.py | 3 ++ elasticsearch/_async/client/cat.py | 9 +++- elasticsearch/_async/client/connector.py | 33 +++++++++++++- elasticsearch/_async/client/features.py | 3 +- elasticsearch/_async/client/fleet.py | 10 ++++- elasticsearch/_async/client/indices.py | 11 ++++- elasticsearch/_async/client/inference.py | 12 +++++- elasticsearch/_async/client/nodes.py | 10 ++++- elasticsearch/_async/client/rollup.py | 16 ++++++- .../_async/client/search_application.py | 16 ++++++- .../_async/client/searchable_snapshots.py | 10 ++++- elasticsearch/_async/client/snapshot.py | 9 +++- elasticsearch/_async/client/tasks.py | 11 ++++- elasticsearch/_async/client/utils.py | 4 ++ elasticsearch/_sync/client/__init__.py | 3 ++ elasticsearch/_sync/client/cat.py | 9 +++- elasticsearch/_sync/client/connector.py | 33 +++++++++++++- elasticsearch/_sync/client/features.py | 3 +- elasticsearch/_sync/client/fleet.py | 10 ++++- elasticsearch/_sync/client/indices.py | 11 ++++- elasticsearch/_sync/client/inference.py | 12 +++++- elasticsearch/_sync/client/nodes.py | 10 ++++- elasticsearch/_sync/client/rollup.py | 16 ++++++- .../_sync/client/search_application.py | 16 ++++++- .../_sync/client/searchable_snapshots.py | 10 ++++- elasticsearch/_sync/client/snapshot.py | 9 +++- elasticsearch/_sync/client/tasks.py | 11 ++++- elasticsearch/_sync/client/utils.py | 41 ++++++++++++++++++ elasticsearch/exceptions.py | 4 ++ test_elasticsearch/test_client/test_utils.py | 43 ++++++++++++++++++- 30 files changed, 373 insertions(+), 25 deletions(-) diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index 771420ced..3086538e2 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -82,8 +82,10 @@ _TYPE_HOSTS, CLIENT_META_SERVICE, SKIP_IN_PATH, + Stability, _quote, _rewrite_parameters, + _stability_warning, client_node_configs, is_requests_http_auth, is_requests_node_class, @@ -3554,6 +3556,7 @@ async def render_search_template( @_rewrite_parameters( body_fields=("context", "context_setup", "script"), ) + @_stability_warning(Stability.EXPERIMENTAL) async def scripts_painless_execute( self, *, diff --git a/elasticsearch/_async/client/cat.py b/elasticsearch/_async/client/cat.py index 2c8e8e055..257470b84 100644 --- a/elasticsearch/_async/client/cat.py +++ b/elasticsearch/_async/client/cat.py @@ -20,7 +20,13 @@ from elastic_transport import ObjectApiResponse, TextApiResponse from ._base import NamespacedClient -from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) class CatClient(NamespacedClient): @@ -2517,6 +2523,7 @@ async def snapshots( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) async def tasks( self, *, diff --git a/elasticsearch/_async/client/connector.py b/elasticsearch/_async/client/connector.py index ac2f1de31..34d566333 100644 --- a/elasticsearch/_async/client/connector.py +++ b/elasticsearch/_async/client/connector.py @@ -20,12 +20,19 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) class ConnectorClient(NamespacedClient): @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) async def check_in( self, *, @@ -67,6 +74,7 @@ async def check_in( ) @_rewrite_parameters() + @_stability_warning(Stability.BETA) async def delete( self, *, @@ -115,6 +123,7 @@ async def delete( ) @_rewrite_parameters() + @_stability_warning(Stability.BETA) async def get( self, *, @@ -170,6 +179,7 @@ async def get( "sync_cursor", ), ) + @_stability_warning(Stability.EXPERIMENTAL) async def last_sync( self, *, @@ -299,6 +309,7 @@ async def last_sync( @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) + @_stability_warning(Stability.BETA) async def list( self, *, @@ -372,6 +383,7 @@ async def list( "service_type", ), ) + @_stability_warning(Stability.BETA) async def post( self, *, @@ -453,6 +465,7 @@ async def post( "service_type", ), ) + @_stability_warning(Stability.BETA) async def put( self, *, @@ -529,6 +542,7 @@ async def put( ) @_rewrite_parameters() + @_stability_warning(Stability.BETA) async def sync_job_cancel( self, *, @@ -576,6 +590,7 @@ async def sync_job_cancel( ) @_rewrite_parameters() + @_stability_warning(Stability.BETA) async def sync_job_delete( self, *, @@ -620,6 +635,7 @@ async def sync_job_delete( ) @_rewrite_parameters() + @_stability_warning(Stability.BETA) async def sync_job_get( self, *, @@ -664,6 +680,7 @@ async def sync_job_get( @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) + @_stability_warning(Stability.BETA) async def sync_job_list( self, *, @@ -743,6 +760,7 @@ async def sync_job_list( @_rewrite_parameters( body_fields=("id", "job_type", "trigger_method"), ) + @_stability_warning(Stability.BETA) async def sync_job_post( self, *, @@ -802,6 +820,7 @@ async def sync_job_post( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) async def update_active_filtering( self, *, @@ -845,6 +864,7 @@ async def update_active_filtering( @_rewrite_parameters( body_fields=("api_key_id", "api_key_secret_id"), ) + @_stability_warning(Stability.BETA) async def update_api_key_id( self, *, @@ -903,6 +923,7 @@ async def update_api_key_id( @_rewrite_parameters( body_fields=("configuration", "values"), ) + @_stability_warning(Stability.BETA) async def update_configuration( self, *, @@ -958,6 +979,7 @@ async def update_configuration( @_rewrite_parameters( body_fields=("error",), ) + @_stability_warning(Stability.EXPERIMENTAL) async def update_error( self, *, @@ -1013,6 +1035,7 @@ async def update_error( @_rewrite_parameters( body_fields=("advanced_snippet", "filtering", "rules"), ) + @_stability_warning(Stability.BETA) async def update_filtering( self, *, @@ -1074,6 +1097,7 @@ async def update_filtering( @_rewrite_parameters( body_fields=("validation",), ) + @_stability_warning(Stability.EXPERIMENTAL) async def update_filtering_validation( self, *, @@ -1127,6 +1151,7 @@ async def update_filtering_validation( @_rewrite_parameters( body_fields=("index_name",), ) + @_stability_warning(Stability.BETA) async def update_index_name( self, *, @@ -1180,6 +1205,7 @@ async def update_index_name( @_rewrite_parameters( body_fields=("description", "name"), ) + @_stability_warning(Stability.BETA) async def update_name( self, *, @@ -1234,6 +1260,7 @@ async def update_name( @_rewrite_parameters( body_fields=("is_native",), ) + @_stability_warning(Stability.BETA) async def update_native( self, *, @@ -1286,6 +1313,7 @@ async def update_native( @_rewrite_parameters( body_fields=("pipeline",), ) + @_stability_warning(Stability.BETA) async def update_pipeline( self, *, @@ -1339,6 +1367,7 @@ async def update_pipeline( @_rewrite_parameters( body_fields=("scheduling",), ) + @_stability_warning(Stability.BETA) async def update_scheduling( self, *, @@ -1391,6 +1420,7 @@ async def update_scheduling( @_rewrite_parameters( body_fields=("service_type",), ) + @_stability_warning(Stability.BETA) async def update_service_type( self, *, @@ -1443,6 +1473,7 @@ async def update_service_type( @_rewrite_parameters( body_fields=("status",), ) + @_stability_warning(Stability.EXPERIMENTAL) async def update_status( self, *, diff --git a/elasticsearch/_async/client/features.py b/elasticsearch/_async/client/features.py index 62e730285..32fecf55a 100644 --- a/elasticsearch/_async/client/features.py +++ b/elasticsearch/_async/client/features.py @@ -20,7 +20,7 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import _rewrite_parameters +from .utils import Stability, _rewrite_parameters, _stability_warning class FeaturesClient(NamespacedClient): @@ -62,6 +62,7 @@ async def get_features( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) async def reset_features( self, *, diff --git a/elasticsearch/_async/client/fleet.py b/elasticsearch/_async/client/fleet.py index a2ca88f00..eb05f0352 100644 --- a/elasticsearch/_async/client/fleet.py +++ b/elasticsearch/_async/client/fleet.py @@ -20,7 +20,13 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) class FleetClient(NamespacedClient): @@ -91,6 +97,7 @@ async def global_checkpoints( @_rewrite_parameters( body_name="searches", ) + @_stability_warning(Stability.EXPERIMENTAL) async def msearch( self, *, @@ -277,6 +284,7 @@ async def msearch( "from": "from_", }, ) + @_stability_warning(Stability.EXPERIMENTAL) async def search( self, *, diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index 5c8c337d1..e884cd8ff 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -20,7 +20,13 @@ from elastic_transport import HeadApiResponse, ObjectApiResponse from ._base import NamespacedClient -from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) class IndicesClient(NamespacedClient): @@ -1032,6 +1038,7 @@ async def delete_template( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) async def disk_usage( self, *, @@ -1114,6 +1121,7 @@ async def disk_usage( @_rewrite_parameters( body_name="config", ) + @_stability_warning(Stability.EXPERIMENTAL) async def downsample( self, *, @@ -1488,6 +1496,7 @@ async def explain_data_lifecycle( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) async def field_usage_stats( self, *, diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index 701ba6835..a3ddb1628 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -20,12 +20,19 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) class InferenceClient(NamespacedClient): @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) async def delete( self, *, @@ -93,6 +100,7 @@ async def delete( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) async def get( self, *, @@ -151,6 +159,7 @@ async def get( @_rewrite_parameters( body_fields=("input", "query", "task_settings"), ) + @_stability_warning(Stability.EXPERIMENTAL) async def inference( self, *, @@ -237,6 +246,7 @@ async def inference( @_rewrite_parameters( body_name="inference_config", ) + @_stability_warning(Stability.EXPERIMENTAL) async def put( self, *, diff --git a/elasticsearch/_async/client/nodes.py b/elasticsearch/_async/client/nodes.py index 17c0f5401..a7b516588 100644 --- a/elasticsearch/_async/client/nodes.py +++ b/elasticsearch/_async/client/nodes.py @@ -20,12 +20,19 @@ from elastic_transport import ObjectApiResponse, TextApiResponse from ._base import NamespacedClient -from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) class NodesClient(NamespacedClient): @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) async def clear_repositories_metering_archive( self, *, @@ -76,6 +83,7 @@ async def clear_repositories_metering_archive( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) async def get_repositories_metering_info( self, *, diff --git a/elasticsearch/_async/client/rollup.py b/elasticsearch/_async/client/rollup.py index 6ccba08ab..154090dd2 100644 --- a/elasticsearch/_async/client/rollup.py +++ b/elasticsearch/_async/client/rollup.py @@ -20,12 +20,19 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) class RollupClient(NamespacedClient): @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) async def delete_job( self, *, @@ -66,6 +73,7 @@ async def delete_job( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) async def get_jobs( self, *, @@ -110,6 +118,7 @@ async def get_jobs( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) async def get_rollup_caps( self, *, @@ -155,6 +164,7 @@ async def get_rollup_caps( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) async def get_rollup_index_caps( self, *, @@ -209,6 +219,7 @@ async def get_rollup_index_caps( ), ignore_deprecated_options={"headers"}, ) + @_stability_warning(Stability.EXPERIMENTAL) async def put_job( self, *, @@ -327,6 +338,7 @@ async def put_job( @_rewrite_parameters( body_fields=("aggregations", "aggs", "query", "size"), ) + @_stability_warning(Stability.EXPERIMENTAL) async def rollup_search( self, *, @@ -397,6 +409,7 @@ async def rollup_search( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) async def start_job( self, *, @@ -437,6 +450,7 @@ async def start_job( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) async def stop_job( self, *, diff --git a/elasticsearch/_async/client/search_application.py b/elasticsearch/_async/client/search_application.py index e4d0838e5..b8462f575 100644 --- a/elasticsearch/_async/client/search_application.py +++ b/elasticsearch/_async/client/search_application.py @@ -20,12 +20,19 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) class SearchApplicationClient(NamespacedClient): @_rewrite_parameters() + @_stability_warning(Stability.BETA) async def delete( self, *, @@ -67,6 +74,7 @@ async def delete( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) async def delete_behavioral_analytics( self, *, @@ -108,6 +116,7 @@ async def delete_behavioral_analytics( ) @_rewrite_parameters() + @_stability_warning(Stability.BETA) async def get( self, *, @@ -148,6 +157,7 @@ async def get( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) async def get_behavioral_analytics( self, *, @@ -193,6 +203,7 @@ async def get_behavioral_analytics( @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) + @_stability_warning(Stability.BETA) async def list( self, *, @@ -243,6 +254,7 @@ async def list( @_rewrite_parameters( body_name="search_application", ) + @_stability_warning(Stability.BETA) async def put( self, *, @@ -299,6 +311,7 @@ async def put( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) async def put_behavioral_analytics( self, *, @@ -342,6 +355,7 @@ async def put_behavioral_analytics( body_fields=("params",), ignore_deprecated_options={"params"}, ) + @_stability_warning(Stability.BETA) async def search( self, *, diff --git a/elasticsearch/_async/client/searchable_snapshots.py b/elasticsearch/_async/client/searchable_snapshots.py index 5d5b9a8a3..092e29ede 100644 --- a/elasticsearch/_async/client/searchable_snapshots.py +++ b/elasticsearch/_async/client/searchable_snapshots.py @@ -20,12 +20,19 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) class SearchableSnapshotsClient(NamespacedClient): @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) async def cache_stats( self, *, @@ -75,6 +82,7 @@ async def cache_stats( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) async def clear_cache( self, *, diff --git a/elasticsearch/_async/client/snapshot.py b/elasticsearch/_async/client/snapshot.py index 2223ca978..b9dfdc634 100644 --- a/elasticsearch/_async/client/snapshot.py +++ b/elasticsearch/_async/client/snapshot.py @@ -20,7 +20,13 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) class SnapshotClient(NamespacedClient): @@ -617,6 +623,7 @@ async def get_repository( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) async def repository_verify_integrity( self, *, diff --git a/elasticsearch/_async/client/tasks.py b/elasticsearch/_async/client/tasks.py index fcfe84185..ca9c38eae 100644 --- a/elasticsearch/_async/client/tasks.py +++ b/elasticsearch/_async/client/tasks.py @@ -20,12 +20,19 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) class TasksClient(NamespacedClient): @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) async def cancel( self, *, @@ -87,6 +94,7 @@ async def cancel( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) async def get( self, *, @@ -138,6 +146,7 @@ async def get( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) async def list( self, *, diff --git a/elasticsearch/_async/client/utils.py b/elasticsearch/_async/client/utils.py index ec0a6e4b8..97918d9e4 100644 --- a/elasticsearch/_async/client/utils.py +++ b/elasticsearch/_async/client/utils.py @@ -20,10 +20,12 @@ _TYPE_HOSTS, CLIENT_META_SERVICE, SKIP_IN_PATH, + Stability, _base64_auth_header, _quote, _quote_query, _rewrite_parameters, + _stability_warning, client_node_configs, is_requests_http_auth, is_requests_node_class, @@ -37,8 +39,10 @@ "_quote_query", "_TYPE_HOSTS", "SKIP_IN_PATH", + "Stability", "client_node_configs", "_rewrite_parameters", + "_stability_warning", "is_requests_http_auth", "is_requests_node_class", ] diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index 4f1498e66..d92347291 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -82,8 +82,10 @@ _TYPE_HOSTS, CLIENT_META_SERVICE, SKIP_IN_PATH, + Stability, _quote, _rewrite_parameters, + _stability_warning, client_node_configs, is_requests_http_auth, is_requests_node_class, @@ -3552,6 +3554,7 @@ def render_search_template( @_rewrite_parameters( body_fields=("context", "context_setup", "script"), ) + @_stability_warning(Stability.EXPERIMENTAL) def scripts_painless_execute( self, *, diff --git a/elasticsearch/_sync/client/cat.py b/elasticsearch/_sync/client/cat.py index 0048c8eb5..c3ddf4dc6 100644 --- a/elasticsearch/_sync/client/cat.py +++ b/elasticsearch/_sync/client/cat.py @@ -20,7 +20,13 @@ from elastic_transport import ObjectApiResponse, TextApiResponse from ._base import NamespacedClient -from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) class CatClient(NamespacedClient): @@ -2517,6 +2523,7 @@ def snapshots( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) def tasks( self, *, diff --git a/elasticsearch/_sync/client/connector.py b/elasticsearch/_sync/client/connector.py index 2c64c556d..58d551bb7 100644 --- a/elasticsearch/_sync/client/connector.py +++ b/elasticsearch/_sync/client/connector.py @@ -20,12 +20,19 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) class ConnectorClient(NamespacedClient): @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) def check_in( self, *, @@ -67,6 +74,7 @@ def check_in( ) @_rewrite_parameters() + @_stability_warning(Stability.BETA) def delete( self, *, @@ -115,6 +123,7 @@ def delete( ) @_rewrite_parameters() + @_stability_warning(Stability.BETA) def get( self, *, @@ -170,6 +179,7 @@ def get( "sync_cursor", ), ) + @_stability_warning(Stability.EXPERIMENTAL) def last_sync( self, *, @@ -299,6 +309,7 @@ def last_sync( @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) + @_stability_warning(Stability.BETA) def list( self, *, @@ -372,6 +383,7 @@ def list( "service_type", ), ) + @_stability_warning(Stability.BETA) def post( self, *, @@ -453,6 +465,7 @@ def post( "service_type", ), ) + @_stability_warning(Stability.BETA) def put( self, *, @@ -529,6 +542,7 @@ def put( ) @_rewrite_parameters() + @_stability_warning(Stability.BETA) def sync_job_cancel( self, *, @@ -576,6 +590,7 @@ def sync_job_cancel( ) @_rewrite_parameters() + @_stability_warning(Stability.BETA) def sync_job_delete( self, *, @@ -620,6 +635,7 @@ def sync_job_delete( ) @_rewrite_parameters() + @_stability_warning(Stability.BETA) def sync_job_get( self, *, @@ -664,6 +680,7 @@ def sync_job_get( @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) + @_stability_warning(Stability.BETA) def sync_job_list( self, *, @@ -743,6 +760,7 @@ def sync_job_list( @_rewrite_parameters( body_fields=("id", "job_type", "trigger_method"), ) + @_stability_warning(Stability.BETA) def sync_job_post( self, *, @@ -802,6 +820,7 @@ def sync_job_post( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) def update_active_filtering( self, *, @@ -845,6 +864,7 @@ def update_active_filtering( @_rewrite_parameters( body_fields=("api_key_id", "api_key_secret_id"), ) + @_stability_warning(Stability.BETA) def update_api_key_id( self, *, @@ -903,6 +923,7 @@ def update_api_key_id( @_rewrite_parameters( body_fields=("configuration", "values"), ) + @_stability_warning(Stability.BETA) def update_configuration( self, *, @@ -958,6 +979,7 @@ def update_configuration( @_rewrite_parameters( body_fields=("error",), ) + @_stability_warning(Stability.EXPERIMENTAL) def update_error( self, *, @@ -1013,6 +1035,7 @@ def update_error( @_rewrite_parameters( body_fields=("advanced_snippet", "filtering", "rules"), ) + @_stability_warning(Stability.BETA) def update_filtering( self, *, @@ -1074,6 +1097,7 @@ def update_filtering( @_rewrite_parameters( body_fields=("validation",), ) + @_stability_warning(Stability.EXPERIMENTAL) def update_filtering_validation( self, *, @@ -1127,6 +1151,7 @@ def update_filtering_validation( @_rewrite_parameters( body_fields=("index_name",), ) + @_stability_warning(Stability.BETA) def update_index_name( self, *, @@ -1180,6 +1205,7 @@ def update_index_name( @_rewrite_parameters( body_fields=("description", "name"), ) + @_stability_warning(Stability.BETA) def update_name( self, *, @@ -1234,6 +1260,7 @@ def update_name( @_rewrite_parameters( body_fields=("is_native",), ) + @_stability_warning(Stability.BETA) def update_native( self, *, @@ -1286,6 +1313,7 @@ def update_native( @_rewrite_parameters( body_fields=("pipeline",), ) + @_stability_warning(Stability.BETA) def update_pipeline( self, *, @@ -1339,6 +1367,7 @@ def update_pipeline( @_rewrite_parameters( body_fields=("scheduling",), ) + @_stability_warning(Stability.BETA) def update_scheduling( self, *, @@ -1391,6 +1420,7 @@ def update_scheduling( @_rewrite_parameters( body_fields=("service_type",), ) + @_stability_warning(Stability.BETA) def update_service_type( self, *, @@ -1443,6 +1473,7 @@ def update_service_type( @_rewrite_parameters( body_fields=("status",), ) + @_stability_warning(Stability.EXPERIMENTAL) def update_status( self, *, diff --git a/elasticsearch/_sync/client/features.py b/elasticsearch/_sync/client/features.py index 96748493f..83aa4127e 100644 --- a/elasticsearch/_sync/client/features.py +++ b/elasticsearch/_sync/client/features.py @@ -20,7 +20,7 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import _rewrite_parameters +from .utils import Stability, _rewrite_parameters, _stability_warning class FeaturesClient(NamespacedClient): @@ -62,6 +62,7 @@ def get_features( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) def reset_features( self, *, diff --git a/elasticsearch/_sync/client/fleet.py b/elasticsearch/_sync/client/fleet.py index 098981d07..a8a86a7df 100644 --- a/elasticsearch/_sync/client/fleet.py +++ b/elasticsearch/_sync/client/fleet.py @@ -20,7 +20,13 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) class FleetClient(NamespacedClient): @@ -91,6 +97,7 @@ def global_checkpoints( @_rewrite_parameters( body_name="searches", ) + @_stability_warning(Stability.EXPERIMENTAL) def msearch( self, *, @@ -277,6 +284,7 @@ def msearch( "from": "from_", }, ) + @_stability_warning(Stability.EXPERIMENTAL) def search( self, *, diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index bcd064b1c..92133311a 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -20,7 +20,13 @@ from elastic_transport import HeadApiResponse, ObjectApiResponse from ._base import NamespacedClient -from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) class IndicesClient(NamespacedClient): @@ -1032,6 +1038,7 @@ def delete_template( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) def disk_usage( self, *, @@ -1114,6 +1121,7 @@ def disk_usage( @_rewrite_parameters( body_name="config", ) + @_stability_warning(Stability.EXPERIMENTAL) def downsample( self, *, @@ -1488,6 +1496,7 @@ def explain_data_lifecycle( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) def field_usage_stats( self, *, diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index 08f9da4aa..9f58dfbfc 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -20,12 +20,19 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) class InferenceClient(NamespacedClient): @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) def delete( self, *, @@ -93,6 +100,7 @@ def delete( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) def get( self, *, @@ -151,6 +159,7 @@ def get( @_rewrite_parameters( body_fields=("input", "query", "task_settings"), ) + @_stability_warning(Stability.EXPERIMENTAL) def inference( self, *, @@ -237,6 +246,7 @@ def inference( @_rewrite_parameters( body_name="inference_config", ) + @_stability_warning(Stability.EXPERIMENTAL) def put( self, *, diff --git a/elasticsearch/_sync/client/nodes.py b/elasticsearch/_sync/client/nodes.py index c605a7b6b..5c8e36979 100644 --- a/elasticsearch/_sync/client/nodes.py +++ b/elasticsearch/_sync/client/nodes.py @@ -20,12 +20,19 @@ from elastic_transport import ObjectApiResponse, TextApiResponse from ._base import NamespacedClient -from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) class NodesClient(NamespacedClient): @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) def clear_repositories_metering_archive( self, *, @@ -76,6 +83,7 @@ def clear_repositories_metering_archive( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) def get_repositories_metering_info( self, *, diff --git a/elasticsearch/_sync/client/rollup.py b/elasticsearch/_sync/client/rollup.py index 9ed2c25fa..75d86e361 100644 --- a/elasticsearch/_sync/client/rollup.py +++ b/elasticsearch/_sync/client/rollup.py @@ -20,12 +20,19 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) class RollupClient(NamespacedClient): @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) def delete_job( self, *, @@ -66,6 +73,7 @@ def delete_job( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) def get_jobs( self, *, @@ -110,6 +118,7 @@ def get_jobs( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) def get_rollup_caps( self, *, @@ -155,6 +164,7 @@ def get_rollup_caps( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) def get_rollup_index_caps( self, *, @@ -209,6 +219,7 @@ def get_rollup_index_caps( ), ignore_deprecated_options={"headers"}, ) + @_stability_warning(Stability.EXPERIMENTAL) def put_job( self, *, @@ -327,6 +338,7 @@ def put_job( @_rewrite_parameters( body_fields=("aggregations", "aggs", "query", "size"), ) + @_stability_warning(Stability.EXPERIMENTAL) def rollup_search( self, *, @@ -397,6 +409,7 @@ def rollup_search( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) def start_job( self, *, @@ -437,6 +450,7 @@ def start_job( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) def stop_job( self, *, diff --git a/elasticsearch/_sync/client/search_application.py b/elasticsearch/_sync/client/search_application.py index 7b7619fa7..42b042434 100644 --- a/elasticsearch/_sync/client/search_application.py +++ b/elasticsearch/_sync/client/search_application.py @@ -20,12 +20,19 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) class SearchApplicationClient(NamespacedClient): @_rewrite_parameters() + @_stability_warning(Stability.BETA) def delete( self, *, @@ -67,6 +74,7 @@ def delete( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) def delete_behavioral_analytics( self, *, @@ -108,6 +116,7 @@ def delete_behavioral_analytics( ) @_rewrite_parameters() + @_stability_warning(Stability.BETA) def get( self, *, @@ -148,6 +157,7 @@ def get( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) def get_behavioral_analytics( self, *, @@ -193,6 +203,7 @@ def get_behavioral_analytics( @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) + @_stability_warning(Stability.BETA) def list( self, *, @@ -243,6 +254,7 @@ def list( @_rewrite_parameters( body_name="search_application", ) + @_stability_warning(Stability.BETA) def put( self, *, @@ -299,6 +311,7 @@ def put( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) def put_behavioral_analytics( self, *, @@ -342,6 +355,7 @@ def put_behavioral_analytics( body_fields=("params",), ignore_deprecated_options={"params"}, ) + @_stability_warning(Stability.BETA) def search( self, *, diff --git a/elasticsearch/_sync/client/searchable_snapshots.py b/elasticsearch/_sync/client/searchable_snapshots.py index 2db8dfb04..45104eefc 100644 --- a/elasticsearch/_sync/client/searchable_snapshots.py +++ b/elasticsearch/_sync/client/searchable_snapshots.py @@ -20,12 +20,19 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) class SearchableSnapshotsClient(NamespacedClient): @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) def cache_stats( self, *, @@ -75,6 +82,7 @@ def cache_stats( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) def clear_cache( self, *, diff --git a/elasticsearch/_sync/client/snapshot.py b/elasticsearch/_sync/client/snapshot.py index a52498c63..e65dee0ec 100644 --- a/elasticsearch/_sync/client/snapshot.py +++ b/elasticsearch/_sync/client/snapshot.py @@ -20,7 +20,13 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) class SnapshotClient(NamespacedClient): @@ -617,6 +623,7 @@ def get_repository( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) def repository_verify_integrity( self, *, diff --git a/elasticsearch/_sync/client/tasks.py b/elasticsearch/_sync/client/tasks.py index 5474ef2de..f69ef007c 100644 --- a/elasticsearch/_sync/client/tasks.py +++ b/elasticsearch/_sync/client/tasks.py @@ -20,12 +20,19 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) class TasksClient(NamespacedClient): @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) def cancel( self, *, @@ -87,6 +94,7 @@ def cancel( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) def get( self, *, @@ -138,6 +146,7 @@ def get( ) @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) def list( self, *, diff --git a/elasticsearch/_sync/client/utils.py b/elasticsearch/_sync/client/utils.py index 1d1a983ac..959f61bfd 100644 --- a/elasticsearch/_sync/client/utils.py +++ b/elasticsearch/_sync/client/utils.py @@ -20,6 +20,7 @@ import urllib.parse import warnings from datetime import date, datetime +from enum import Enum, auto from functools import wraps from typing import ( TYPE_CHECKING, @@ -55,6 +56,8 @@ url_to_node_config, ) +from elasticsearch.exceptions import GeneralAvailabilityWarning + from ..._version import __versionstr__ from ...compat import to_bytes, to_str, warn_stacklevel @@ -70,6 +73,13 @@ # Default User-Agent used by the client USER_AGENT = create_user_agent("elasticsearch-py", __versionstr__) + +class Stability(Enum): + STABLE = auto() + BETA = auto() + EXPERIMENTAL = auto() + + _TYPE_HOSTS = Union[ str, Sequence[Union[str, Mapping[str, Union[str, int]], NodeConfig]] ] @@ -450,6 +460,37 @@ def wrapped(*args: Any, **kwargs: Any) -> Any: return wrapper +def _stability_warning( + stability: Stability, + version: Optional[str] = None, + message: Optional[str] = None, +) -> Callable[[F], F]: + def wrapper(api: F) -> F: + @wraps(api) + def wrapped(*args: Any, **kwargs: Any) -> Any: + if stability == Stability.BETA: + warnings.warn( + "This API is in beta and is subject to change. " + "The design and code is less mature than official GA features and is being provided as-is with no warranties. " + "Beta features are not subject to the support SLA of official GA features.", + category=GeneralAvailabilityWarning, + stacklevel=warn_stacklevel(), + ) + elif stability == Stability.EXPERIMENTAL: + warnings.warn( + "This API is in technical preview and may be changed or removed in a future release. " + "Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features.", + category=GeneralAvailabilityWarning, + stacklevel=warn_stacklevel(), + ) + + return api(*args, **kwargs) + + return wrapped # type: ignore[return-value] + + return wrapper + + def is_requests_http_auth(http_auth: Any) -> bool: """Detect if an http_auth value is a custom Requests auth object""" try: diff --git a/elasticsearch/exceptions.py b/elasticsearch/exceptions.py index f58706774..dc410ae30 100644 --- a/elasticsearch/exceptions.py +++ b/elasticsearch/exceptions.py @@ -115,6 +115,10 @@ class ElasticsearchWarning(TransportWarning): """ +class GeneralAvailabilityWarning(TransportWarning): + """Warning that is raised when a feature is not yet GA.""" + + # Aliases for backwards compatibility ElasticsearchDeprecationWarning = ElasticsearchWarning RequestError = BadRequestError diff --git a/test_elasticsearch/test_client/test_utils.py b/test_elasticsearch/test_client/test_utils.py index e53145bfd..e4713ff1c 100644 --- a/test_elasticsearch/test_client/test_utils.py +++ b/test_elasticsearch/test_client/test_utils.py @@ -16,7 +16,12 @@ # under the License. -from elasticsearch._sync.client.utils import _quote +import warnings + +import pytest + +from elasticsearch._sync.client.utils import Stability, _quote, _stability_warning +from elasticsearch.exceptions import GeneralAvailabilityWarning def test_handles_ascii(): @@ -36,3 +41,39 @@ def test_handles_unicode(): def test_handles_unicode2(): string = "中*文," assert "%E4%B8%AD*%E6%96%87," == _quote(string) + + +class TestStabilityWarning: + def test_default(self): + + @_stability_warning(stability=Stability.STABLE) + def func_default(*args, **kwargs): + pass + + with warnings.catch_warnings(): + warnings.simplefilter("error") + func_default() + + def test_beta(self, recwarn): + + @_stability_warning(stability=Stability.BETA) + def func_beta(*args, **kwargs): + pass + + with pytest.warns( + GeneralAvailabilityWarning, + match="This API is in beta and is subject to change.", + ): + func_beta() + + def test_experimental(self, recwarn): + + @_stability_warning(stability=Stability.EXPERIMENTAL) + def func_experimental(*args, **kwargs): + pass + + with pytest.warns( + GeneralAvailabilityWarning, + match="This API is in technical preview and may be changed or removed in a future release.", + ): + func_experimental() From 2d35e5280207d295549636248702c853811e3828 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 10 Nov 2024 22:05:04 +0400 Subject: [PATCH 05/65] [Backport 8.x] Switch to trustme for test certificates (#2688) Co-authored-by: Quentin Pradet --- .buildkite/certs/README.md | 26 ++++++++++++++++++++++++++ .buildkite/certs/ca.crt | 28 ++++++++++------------------ .buildkite/certs/ca.pem | 20 -------------------- .buildkite/certs/testnode.crt | 30 ++++++++++++------------------ .buildkite/certs/testnode.key | 32 +++++--------------------------- 5 files changed, 53 insertions(+), 83 deletions(-) create mode 100644 .buildkite/certs/README.md mode change 100755 => 100644 .buildkite/certs/ca.crt delete mode 100644 .buildkite/certs/ca.pem mode change 100755 => 100644 .buildkite/certs/testnode.crt mode change 100755 => 100644 .buildkite/certs/testnode.key diff --git a/.buildkite/certs/README.md b/.buildkite/certs/README.md new file mode 100644 index 000000000..63453aa1f --- /dev/null +++ b/.buildkite/certs/README.md @@ -0,0 +1,26 @@ +# CI certificates + +This directory contains certificates that can be used to test against Elasticsearch in CI + +## Generating new certificates using the Certificate Authority cert and key + +Before adding support for Python 3.13, we generated certificates with +[`elasticsearch-certutil`](https://www.elastic.co/guide/en/elasticsearch/reference/current/certutil.html). +However, those certificates are not compliant with RFC 5280, and Python now +enforces compliance by enabling the VERIFY_X509_STRICT flag by default. + +If you need to generate new certificates, you can do so with +[trustme](https://trustme.readthedocs.io/en/latest/) as follows: + +``` +```bash +pip install trustme +python -m trustme --identities instance +# Use the filenames expected by our tests +mv client.pem ca.crt +mv server.pem testnode.crt +mv server.key testnode.key +``` + +For more control over the generated certificates, trustme also offers a Python +API, but we have not needed it so far. diff --git a/.buildkite/certs/ca.crt b/.buildkite/certs/ca.crt old mode 100755 new mode 100644 index 5ed1c9853..f39d4c4a9 --- a/.buildkite/certs/ca.crt +++ b/.buildkite/certs/ca.crt @@ -1,20 +1,12 @@ -----BEGIN CERTIFICATE----- -MIIDSTCCAjGgAwIBAgIUHTeTPPuZIX3wdyudMsllXa9yZ1kwDQYJKoZIhvcNAQEL -BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l -cmF0ZWQgQ0EwHhcNMjMwODIxMTcyNTMyWhcNMjYwODIwMTcyNTMyWjA0MTIwMAYD -VQQDEylFbGFzdGljIENlcnRpZmljYXRlIFRvb2wgQXV0b2dlbmVyYXRlZCBDQTCC -ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMZs7DXbV7ovFvQ/CiqvHHZ/ -40rLyAcBQMhVBke2VVCQk3hIOPpHYt3xZgb61Oyrf14lFxny483beXaUqGThZ67Y -RsxzSOS8NUi21OLZ3xaE+p+Yx9Xe6lTMQJM4RpD/A5V35uikji1K4+F0ooJghELq -Fndmark/7SQFh6Bg8/aaf6Hpyar3WOWdQjHXgszNAv1Ez7+pPlfnCS8XNjYB5Y2n -gAayb1krMRW/3E6hRVZAig3I2H8mezL5tF8iS5aJW1WLpw4oYnbH0DdS+gpCK1lT -8GZd8Dk0QbNGpXNTu67BravVhgEoprBVMz6G1C4MiuVcBy7gA671/f46S4Tgb10C -AwEAAaNTMFEwHQYDVR0OBBYEFHVhRrHXbd5QFEgk3RFn4Y4LYo9PMB8GA1UdIwQY -MBaAFHVhRrHXbd5QFEgk3RFn4Y4LYo9PMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI -hvcNAQELBQADggEBACoGVPqeYE3IPRdSAtihIq071BfGA8vgfJWv0qiI0T+gYetX -dnebmQc5EccdEIrxD0bLKPgzd5c3ILwQy5+uo03ua7PrplwPVdeNXnU1LodAQ0Zb -GmTixXqgj8AMcvRsA7qARjXvf6w3Yyb7GO3FXRIGtqk12Vb1qnJg894CSIWrHiw0 -hRO5b7eJyrOy2s6QA6FucM/scM1Z/8D9tHfgwmrKM875VGerJORwfHCaCvF1YvBj -cIpYNnw2vFzDvRevh63sSQbZ9q3nbtD27AZSN9LKEbipSEOoBZMKG2zgDTT/Olzx -EQJ2t+Z487UuFX6+WaLZMteL2F4eh9OFWIYM3EI= +MIIByTCCAW+gAwIBAgIUIYClYWXiTsB8aMrEEMrzdrk5rOswCgYIKoZIzj0EAwIw +QDEXMBUGA1UECgwOdHJ1c3RtZSB2MS4yLjAxJTAjBgNVBAsMHFRlc3RpbmcgQ0Eg +I2JpdzFXYzEwbHBxQ0ZRTDUwIBcNMDAwMTAxMDAwMDAwWhgPMzAwMDAxMDEwMDAw +MDBaMEAxFzAVBgNVBAoMDnRydXN0bWUgdjEuMi4wMSUwIwYDVQQLDBxUZXN0aW5n +IENBICNiaXcxV2MxMGxwcUNGUUw1MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE +SN7++A76LmOR0tKKra1M6VVzGUljjL9fVPxOEIblOOJJhA7mKLQguNzEHjucNV23 +LcDzMX/M/oUBGdYZBbAv4qNFMEMwHQYDVR0OBBYEFCrGGcO9v0UAWSsD93P/x2MT +NiJbMBIGA1UdEwEB/wQIMAYBAf8CAQkwDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49 +BAMCA0gAMEUCIQDGyO21zIAwmARtoc2atVmmqZdPVkegHkCKCFY4P+KeEAIgKMCz +aU8LPCVyA+ZF9K+tcqkNK5h/5s7wlQ5DSeKSuE8= -----END CERTIFICATE----- diff --git a/.buildkite/certs/ca.pem b/.buildkite/certs/ca.pem deleted file mode 100644 index 5ed1c9853..000000000 --- a/.buildkite/certs/ca.pem +++ /dev/null @@ -1,20 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDSTCCAjGgAwIBAgIUHTeTPPuZIX3wdyudMsllXa9yZ1kwDQYJKoZIhvcNAQEL -BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l -cmF0ZWQgQ0EwHhcNMjMwODIxMTcyNTMyWhcNMjYwODIwMTcyNTMyWjA0MTIwMAYD -VQQDEylFbGFzdGljIENlcnRpZmljYXRlIFRvb2wgQXV0b2dlbmVyYXRlZCBDQTCC -ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMZs7DXbV7ovFvQ/CiqvHHZ/ -40rLyAcBQMhVBke2VVCQk3hIOPpHYt3xZgb61Oyrf14lFxny483beXaUqGThZ67Y -RsxzSOS8NUi21OLZ3xaE+p+Yx9Xe6lTMQJM4RpD/A5V35uikji1K4+F0ooJghELq -Fndmark/7SQFh6Bg8/aaf6Hpyar3WOWdQjHXgszNAv1Ez7+pPlfnCS8XNjYB5Y2n -gAayb1krMRW/3E6hRVZAig3I2H8mezL5tF8iS5aJW1WLpw4oYnbH0DdS+gpCK1lT -8GZd8Dk0QbNGpXNTu67BravVhgEoprBVMz6G1C4MiuVcBy7gA671/f46S4Tgb10C -AwEAAaNTMFEwHQYDVR0OBBYEFHVhRrHXbd5QFEgk3RFn4Y4LYo9PMB8GA1UdIwQY -MBaAFHVhRrHXbd5QFEgk3RFn4Y4LYo9PMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI -hvcNAQELBQADggEBACoGVPqeYE3IPRdSAtihIq071BfGA8vgfJWv0qiI0T+gYetX -dnebmQc5EccdEIrxD0bLKPgzd5c3ILwQy5+uo03ua7PrplwPVdeNXnU1LodAQ0Zb -GmTixXqgj8AMcvRsA7qARjXvf6w3Yyb7GO3FXRIGtqk12Vb1qnJg894CSIWrHiw0 -hRO5b7eJyrOy2s6QA6FucM/scM1Z/8D9tHfgwmrKM875VGerJORwfHCaCvF1YvBj -cIpYNnw2vFzDvRevh63sSQbZ9q3nbtD27AZSN9LKEbipSEOoBZMKG2zgDTT/Olzx -EQJ2t+Z487UuFX6+WaLZMteL2F4eh9OFWIYM3EI= ------END CERTIFICATE----- diff --git a/.buildkite/certs/testnode.crt b/.buildkite/certs/testnode.crt old mode 100755 new mode 100644 index 39eb092fa..74ab6da26 --- a/.buildkite/certs/testnode.crt +++ b/.buildkite/certs/testnode.crt @@ -1,20 +1,14 @@ -----BEGIN CERTIFICATE----- -MIIDODCCAiCgAwIBAgIVAKLWEcNzTd4B0NqnrJL0xAKaS8DWMA0GCSqGSIb3DQEB -CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu -ZXJhdGVkIENBMB4XDTIzMDgyMTE3MjcwMloXDTI2MDgyMDE3MjcwMlowEzERMA8G -A1UEAxMIaW5zdGFuY2UwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC8 -eLXL3ZX5v8JlHcfg+96Bpq24EeiqV+7RPPKbcH80ODjkETqYUpam+TcOl2gt23p/ -rpiPSSpOX8pFdmY78wTmxo2GCQZ/db2h0gZOOYpb8HQku+hJ4bAmtzizrqWW76Wz -csen3DSUkT0bKkJTjUMmwVhRaMpfv8EIcUbrHAwc3VCj7grnFL0kdAuQa6iyBH4I -lTUYOIOVyEJ8zZ7R4BJO3QU+TRuJ5+w/QiZMeDqxtrdDL37vYQHPW7L/XISCCOMp -sA3avzFphoQXBQ8mjdB8Txkd4sH7mJTqnRp5ILhRzVpcPPgQYFeIB567B+kFeSau -aJJmc0EVgOcK5aSMtOH3AgMBAAGjYjBgMB0GA1UdDgQWBBQsZbZDudZ63h52FlU5 -N2g3pznkETAfBgNVHSMEGDAWgBR1YUax123eUBRIJN0RZ+GOC2KPTzATBgNVHREE -DDAKgghpbnN0YW5jZTAJBgNVHRMEAjAAMA0GCSqGSIb3DQEBCwUAA4IBAQAyv0Cw -OrvZn7FHHS8TJI5vTi1F43R/eSNMNL/+q/nK93KaxWJH1T4zrJhrJ9KpzkFcalXP -bu02oTh28b3o3QpS2wdwMv/Q3NLoMBEmQlG2UrELFvV43nS8LCiwCX3o11L1HZP3 -1Z/rclwxbA4OQ/ZkPcol++TDZQTM/8WkIdZmTL4UDb/ppDjX24nTOitkMRZlYAOY -mid9GGExhKrUJ0I9/A3w1hWRA1Hwc+1TFDcPphl2x2uQ9HJFBueAvuFXmIjDki1x -qrvnFZ+mneI9kR4m82MX900WF15KS35GzmMui0tsf0wbfy3Jh+WnpMlIIa2OQXw7 -prbkg9tScQSsvhC8 +MIICKzCCAdKgAwIBAgIUZeLIKR7XTP5Gx/moiuzcWcfHaSswCgYIKoZIzj0EAwIw +QDEXMBUGA1UECgwOdHJ1c3RtZSB2MS4yLjAxJTAjBgNVBAsMHFRlc3RpbmcgQ0Eg +I2JpdzFXYzEwbHBxQ0ZRTDUwIBcNMDAwMTAxMDAwMDAwWhgPMzAwMDAxMDEwMDAw +MDBaMEIxFzAVBgNVBAoMDnRydXN0bWUgdjEuMi4wMScwJQYDVQQLDB5UZXN0aW5n +IGNlcnQgIzNPWkpxTWh0WmxrNGlDMm0wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC +AASp6UadRZ0ZP3F2KeEkIUOf0B8GOTX55B91RO/PLUQb26wZcWmHGPOJ0HAy9F2E +Y+rJ1zDUnfB5msowei/iuoaMo4GlMIGiMB0GA1UdDgQWBBSP5z3h8b13ul407YOd +kyjKNcf/vTAMBgNVHRMBAf8EAjAAMB8GA1UdIwQYMBaAFCrGGcO9v0UAWSsD93P/ +x2MTNiJbMBYGA1UdEQEB/wQMMAqCCGluc3RhbmNlMA4GA1UdDwEB/wQEAwIFoDAq +BgNVHSUBAf8EIDAeBggrBgEFBQcDAgYIKwYBBQUHAwEGCCsGAQUFBwMDMAoGCCqG +SM49BAMCA0cAMEQCIHPP7chQolK+N+GZ+rJ49euoTSzb2YIU5vnCY/bFEWO+AiBC +OTFYhR9Mw/e+WdJVZO78XZYKy5uA28JwsZuu7E0kZA== -----END CERTIFICATE----- diff --git a/.buildkite/certs/testnode.key b/.buildkite/certs/testnode.key old mode 100755 new mode 100644 index b7458996a..0c7522cd0 --- a/.buildkite/certs/testnode.key +++ b/.buildkite/certs/testnode.key @@ -1,27 +1,5 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAvHi1y92V+b/CZR3H4PvegaatuBHoqlfu0Tzym3B/NDg45BE6 -mFKWpvk3DpdoLdt6f66Yj0kqTl/KRXZmO/ME5saNhgkGf3W9odIGTjmKW/B0JLvo -SeGwJrc4s66llu+ls3LHp9w0lJE9GypCU41DJsFYUWjKX7/BCHFG6xwMHN1Qo+4K -5xS9JHQLkGuosgR+CJU1GDiDlchCfM2e0eASTt0FPk0biefsP0ImTHg6sba3Qy9+ -72EBz1uy/1yEggjjKbAN2r8xaYaEFwUPJo3QfE8ZHeLB+5iU6p0aeSC4Uc1aXDz4 -EGBXiAeeuwfpBXkmrmiSZnNBFYDnCuWkjLTh9wIDAQABAoIBAAU0iEDTI9s78pB8 -XBLYofKOuemFhRl/SDc7KbAlUT4N93RFDYs7bLG73Eto3xW1JBL2rXv3l1WGy71T -YctyEMaW4T28bhODGvOnK0lpyWp0n6CMGARCWW0YTlaYEjay866bEuyN5l3cDQX9 -Csvn8NzXJitJa51tXFVxW3YO1j7Nyc/M59oyBZ1ARYYmQqFYLEu6lvJOW0cKDFkZ -AcMVlOIxZQL/Mf+RO72aQGVuYNjqxlLIXLuE9zFR2gDFM2+l3FMUWDGHGBDFyjKU -iMk4+sSlOTFXqO9VQzua6FLFMsQT6m5PFD4uPY92KR6CPfH/NrWqwqr+jpjaU+gs -3U9GN+ECgYEA58qX7tKPk7CWdk3kyk5NsNcs/qib+heXWEubfhoU8LmSnbBQhOAz -wi//r/xm0OHGj84y66+G3T347iudrLjhr07oGM1QfjYT3kb90efLjwAfCECtyVYL -EQrWO5UeoTnmrhlB1mGL3sWaVAsVqNLz8i2H5c7sj0hxHsvM62159r8CgYEA0Cff -opJqmUpMpHm3sgjMWctylVrHBuQe5cl5Ad80pbd6mvtt4TvGXbUGNdzURfyve9DS -x1CVlj4Sz8VuelFQgYL+7/qUqZoms1aSgJpxWv8ou+wUHmlF3kVO8VKt3BNHV+8J -euSB6NG91BGguBoHgnOoVcjbDGdhJGRTojCNWskCgYEA1jE3nwDCnrbTA3XNk0ky -r9TXhmgm4r+EIpqTkL7nVOAXZVJ1xaQtECgsveKe3C2WwHLKSVMFbFMFQonZha+/ -FbHz9l9cH5U3XPL7QEpTp8xz4LtsHJ4/UbtS5vJQwKnxyjYaydGQYAb4KuunUz/F -H6kFaM6DeZB2v/+SWIfs6Z8CgYARUdAEyeP+vzTjVpFXSe4e5pOxI619wEtl2T6t -TjImO78C2DrLS9r0fxR2NNqgvCapybVQCj94EdAk44uOt+dX71thAluORRpFP8XO -14rpBGQSRtFhumaq2N95quR2dFAyW9xREmRQx+rgk1rpFplbXF48TQsU3CE0Evj2 -fM22KQKBgDhob7M9sWvXecxoyy3J17jUTcFqmqKcqGnx3ZJ7Q9CgAfjYqNNQws27 -wTuaJB0PEuCOu4t+lUHEBMIjGkBfo1bHd4EZaW04Xgbfn2j8MK2e+9GlRtedxxFg -c1JdRb5+eTgPwLcDsmMWIW357PDW7RDEI07G1ZB4SqxGTKkU7JOW ------END RSA PRIVATE KEY----- +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIN+K8+F47YchiH+7gA8KBG8u35PWcOJN+Fszv8TPEEpdoAoGCCqGSM49 +AwEHoUQDQgAEqelGnUWdGT9xdinhJCFDn9AfBjk1+eQfdUTvzy1EG9usGXFphxjz +idBwMvRdhGPqydcw1J3weZrKMHov4rqGjA== +-----END EC PRIVATE KEY----- From c2cfb70b3cd2b8072e181b30d067cea5ced13e63 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 10 Nov 2024 22:17:29 +0400 Subject: [PATCH 06/65] [Backport 8.x] Support Python 3.13 (#2690) Co-authored-by: Quentin Pradet --- .buildkite/Dockerfile | 2 +- .buildkite/pipeline.yml | 5 +++-- .buildkite/run-tests | 4 ++-- .github/workflows/ci.yml | 2 +- .readthedocs.yml | 2 +- noxfile.py | 4 ++-- pyproject.toml | 3 ++- .../test_server/test_vectorstore/test_vectorstore.py | 4 ++++ 8 files changed, 16 insertions(+), 10 deletions(-) diff --git a/.buildkite/Dockerfile b/.buildkite/Dockerfile index 7b0eb2e8e..a68ad997d 100644 --- a/.buildkite/Dockerfile +++ b/.buildkite/Dockerfile @@ -1,4 +1,4 @@ -ARG PYTHON_VERSION=3.12 +ARG PYTHON_VERSION=3.13 FROM python:${PYTHON_VERSION} # Default UID/GID to 1000 diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 16bf81360..9ec0f81b0 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,5 +1,5 @@ steps: - - label: ":elasticsearch: :python: ES Python ({{ matrix.python }}) {{ matrix.nox_session }} " + - label: ":elasticsearch: :python: ES Python {{ matrix.python }} {{ matrix.nox_session }} ({{ matrix.connection }})" agents: provider: "gcp" env: @@ -16,6 +16,7 @@ steps: - "3.10" - "3.11" - "3.12" + - "3.13" connection: - "urllib3" - "requests" @@ -27,7 +28,7 @@ steps: connection: "urllib3" nox_session: "test_otel" - with: - python: "3.12" + python: "3.13" connection: "urllib3" nox_session: "test_otel" command: ./.buildkite/run-tests diff --git a/.buildkite/run-tests b/.buildkite/run-tests index 7e7339fb6..5d6b38039 100755 --- a/.buildkite/run-tests +++ b/.buildkite/run-tests @@ -7,7 +7,7 @@ # Default environment variables export STACK_VERSION="${STACK_VERSION:=8.0.0-SNAPSHOT}" export TEST_SUITE="${TEST_SUITE:=platinum}" -export PYTHON_VERSION="${PYTHON_VERSION:=3.9}" +export PYTHON_VERSION="${PYTHON_VERSION:=3.13}" export PYTHON_CONNECTION_CLASS="${PYTHON_CONNECTION_CLASS:=urllib3}" script_path=$(dirname $(realpath -s $0)) @@ -18,7 +18,7 @@ echo "--- :elasticsearch: Starting Elasticsearch" DETACH=true bash $script_path/run-elasticsearch.sh if [[ -n "$RUNSCRIPTS" ]]; then - for RUNSCRIPT in ${RUNSCRIPTS//,/ } ; do + for RUNSCRIPT in ${RUNSCRIPTS//,/ }; do echo -e "\033[1m>>>>> Running run-$RUNSCRIPT.sh >>>>>>>>>>>>>>>>>>>>>>>>>>>>>\033[0m" CONTAINER_NAME=${RUNSCRIPT} \ DETACH=true \ diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 94c554900..1d7b8ecfd 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -23,7 +23,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] + python-version: ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] nox-session: [""] runs-on: ["ubuntu-latest"] diff --git a/.readthedocs.yml b/.readthedocs.yml index eee10c078..ccbd86512 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -7,7 +7,7 @@ build: # to set AIOHTTP_NO_EXTENSIONS to 1 but it has to be done in # https://readthedocs.org/dashboard/elasticsearch-py/environmentvariables/ # because of https://github.com/readthedocs/readthedocs.org/issues/6311 - python: "3.12" + python: "3" python: install: diff --git a/noxfile.py b/noxfile.py index 2f9bc3322..b42ed0d2f 100644 --- a/noxfile.py +++ b/noxfile.py @@ -45,14 +45,14 @@ def pytest_argv(): ] -@nox.session(python=["3.8", "3.9", "3.10", "3.11", "3.12"]) +@nox.session(python=["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"]) def test(session): session.install(".[dev]", env=INSTALL_ENV, silent=False) session.run(*pytest_argv(), *session.posargs) -@nox.session(python=["3.8", "3.12"]) +@nox.session(python=["3.8", "3.13"]) def test_otel(session): session.install( ".[dev]", diff --git a/pyproject.toml b/pyproject.toml index 1a2563680..8640b40fa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,6 +26,7 @@ classifiers = [ "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", ] @@ -69,7 +70,7 @@ dev = [ "nox", "orjson", "numpy", - "simsimd", + "simsimd ; python_version<'3.13'", "pyarrow", "pandas", "mapbox-vector-tile", diff --git a/test_elasticsearch/test_server/test_vectorstore/test_vectorstore.py b/test_elasticsearch/test_server/test_vectorstore/test_vectorstore.py index 3e17442eb..7b675a754 100644 --- a/test_elasticsearch/test_server/test_vectorstore/test_vectorstore.py +++ b/test_elasticsearch/test_server/test_vectorstore/test_vectorstore.py @@ -899,6 +899,8 @@ def test_max_marginal_relevance_search_errors( self, sync_client: Elasticsearch, index: str ) -> None: """Test max marginal relevance search error conditions.""" + pytest.importorskip("simsimd") + texts = ["foo", "bar", "baz"] vector_field = "vector_field" embedding_service = ConsistentFakeEmbeddings() @@ -940,6 +942,8 @@ def test_max_marginal_relevance_search( self, sync_client: Elasticsearch, index: str ) -> None: """Test max marginal relevance search.""" + pytest.importorskip("simsimd") + texts = ["foo", "bar", "baz"] vector_field = "vector_field" text_field = "text_field" From ab926231566fd808cd05c441d8da5be7873f5edb Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 11 Nov 2024 07:53:26 +0100 Subject: [PATCH 07/65] Auto-generated API code (#2692) --- elasticsearch/_async/client/__init__.py | 73 ++++++---- elasticsearch/_async/client/async_search.py | 8 +- elasticsearch/_async/client/autoscaling.py | 25 +++- elasticsearch/_async/client/security.py | 152 ++++++++++++++++++++ elasticsearch/_sync/client/__init__.py | 73 ++++++---- elasticsearch/_sync/client/async_search.py | 8 +- elasticsearch/_sync/client/autoscaling.py | 25 +++- elasticsearch/_sync/client/security.py | 152 ++++++++++++++++++++ 8 files changed, 442 insertions(+), 74 deletions(-) diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index 3086538e2..d5985ca05 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -743,7 +743,8 @@ async def clear_scroll( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears the search context and results for a scrolling search. + Clear a scrolling search. Clear the search context and results for a scrolling + search. ``_ @@ -793,7 +794,11 @@ async def close_point_in_time( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Closes a point-in-time. + Close a point in time. A point in time must be opened explicitly before being + used in search requests. The `keep_alive` parameter tells Elasticsearch how long + it should persist. A point in time is automatically closed when the `keep_alive` + period has elapsed. However, keeping points in time has a cost; close them as + soon as they are no longer required for search requests. ``_ @@ -1844,10 +1849,11 @@ async def field_caps( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - The field capabilities API returns the information about the capabilities of - fields among multiple indices. The field capabilities API returns runtime fields - like any other field. For example, a runtime field with a type of keyword is - returned as any other field that belongs to the `keyword` family. + Get the field capabilities. Get information about the capabilities of fields + among multiple indices. For data streams, the API returns field capabilities + among the stream’s backing indices. It returns runtime fields like any other + field. For example, a runtime field with a type of keyword is returned the same + as any other field that belongs to the `keyword` family. ``_ @@ -2490,6 +2496,7 @@ async def info( ), parameter_aliases={"_source": "source"}, ) + @_stability_warning(Stability.EXPERIMENTAL) async def knn_search( self, *, @@ -2510,7 +2517,15 @@ async def knn_search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs a kNN search. + Run a knn search. NOTE: The kNN search API has been replaced by the `knn` option + in the search API. Perform a k-nearest neighbor (kNN) search on a dense_vector + field and return the matching documents. Given a query vector, the API finds + the k closest vectors and returns those documents as search hits. Elasticsearch + uses the HNSW algorithm to support efficient kNN search. Like most kNN algorithms, + HNSW is an approximate method that sacrifices result accuracy for improved search + speed. This means the results returned are not always the true k closest neighbors. + The kNN search API supports restricting the search using a filter. The search + will return the top k documents that also match the filter query. ``_ @@ -2873,7 +2888,7 @@ async def msearch_template( typed_keys: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Runs multiple templated searches with a single request. + Run multiple templated searches. ``_ @@ -3083,13 +3098,15 @@ async def open_point_in_time( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - A search request by default executes against the most recent visible data of - the target indices, which is called point in time. Elasticsearch pit (point in - time) is a lightweight view into the state of the data as it existed when initiated. - In some cases, it’s preferred to perform multiple search requests using the same - point in time. For example, if refreshes happen between `search_after` requests, - then the results of those requests might not be consistent as changes happening - between searches are only visible to the more recent point in time. + Open a point in time. A search request by default runs against the most recent + visible data of the target indices, which is called point in time. Elasticsearch + pit (point in time) is a lightweight view into the state of the data as it existed + when initiated. In some cases, it’s preferred to perform multiple search requests + using the same point in time. For example, if refreshes happen between `search_after` + requests, then the results of those requests might not be consistent as changes + happening between searches are only visible to the more recent point in time. + A point in time must be opened explicitly before being used in search requests. + The `keep_alive` parameter tells Elasticsearch how long it should persist. ``_ @@ -3256,8 +3273,8 @@ async def rank_eval( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables you to evaluate the quality of ranked search results over a set of typical - search queries. + Evaluate ranked search results. Evaluate the quality of ranked search results + over a set of typical search queries. ``_ @@ -3501,7 +3518,7 @@ async def render_search_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Renders a search template as a search request body. + Render a search template. Render a search template as a search request body. ``_ @@ -3833,9 +3850,9 @@ async def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns search hits that match the query defined in the request. You can provide - search queries using the `q` query string parameter or the request body. If both - are specified, only the query parameter is used. + Run a search. Get search hits that match the query defined in the request. You + can provide search queries using the `q` query string parameter or the request + body. If both are specified, only the query parameter is used. ``_ @@ -4265,7 +4282,7 @@ async def search_mvt( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> BinaryApiResponse: """ - Search a vector tile. Searches a vector tile for geospatial values. + Search a vector tile. Search a vector tile for geospatial values. ``_ @@ -4419,8 +4436,10 @@ async def search_shards( routing: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about the indices and shards that a search request would - be executed against. + Get the search shards. Get the indices and shards that a search request would + be run against. This information can be useful for working out issues or planning + optimizations with routing and shard preferences. When filtered aliases are used, + the filter is returned as part of the indices section. ``_ @@ -4521,7 +4540,7 @@ async def search_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Runs a search with a search template. + Run a search with a search template. ``_ @@ -4759,8 +4778,8 @@ async def termvectors( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get term vector information. Returns information and statistics about terms in - the fields of a particular document. + Get term vector information. Get information and statistics about terms in the + fields of a particular document. ``_ diff --git a/elasticsearch/_async/client/async_search.py b/elasticsearch/_async/client/async_search.py index d5f4e78b8..0093273ca 100644 --- a/elasticsearch/_async/client/async_search.py +++ b/elasticsearch/_async/client/async_search.py @@ -148,10 +148,10 @@ async def status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get async search status. Retrieve the status of a previously submitted async - search request given its identifier, without retrieving search results. If the - Elasticsearch security features are enabled, use of this API is restricted to - the `monitoring_user` role. + Get the async search status. Get the status of a previously submitted async search + request given its identifier, without retrieving search results. If the Elasticsearch + security features are enabled, use of this API is restricted to the `monitoring_user` + role. ``_ diff --git a/elasticsearch/_async/client/autoscaling.py b/elasticsearch/_async/client/autoscaling.py index f4bdd444e..cb2eccabf 100644 --- a/elasticsearch/_async/client/autoscaling.py +++ b/elasticsearch/_async/client/autoscaling.py @@ -36,7 +36,8 @@ async def delete_autoscaling_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. + Delete an autoscaling policy. NOTE: This feature is designed for indirect use + by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. ``_ @@ -76,8 +77,18 @@ async def get_autoscaling_capacity( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets the current autoscaling capacity based on the configured autoscaling policy. - Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + Get the autoscaling capacity. NOTE: This feature is designed for indirect use + by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. + Direct use is not supported. This API gets the current autoscaling capacity based + on the configured autoscaling policy. It will return information to size the + cluster appropriately to the current workload. The `required_capacity` is calculated + as the maximum of the `required_capacity` result of all individual deciders that + are enabled for the policy. The operator should verify that the `current_nodes` + match the operator’s knowledge of the cluster to avoid making autoscaling decisions + based on stale or incomplete information. The response contains decider-specific + information you can use to diagnose how and why autoscaling determined a certain + capacity was required. This information is provided for diagnosis only. Do not + use this information to make autoscaling decisions. ``_ """ @@ -113,7 +124,8 @@ async def get_autoscaling_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. + Get an autoscaling policy. NOTE: This feature is designed for indirect use by + Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. ``_ @@ -158,8 +170,9 @@ async def put_autoscaling_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new autoscaling policy. Designed for indirect use by ECE/ESS and ECK. - Direct use is not supported. + Create or update an autoscaling policy. NOTE: This feature is designed for indirect + use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on + Kubernetes. Direct use is not supported. ``_ diff --git a/elasticsearch/_async/client/security.py b/elasticsearch/_async/client/security.py index b28da2d6f..f7e3d7ff7 100644 --- a/elasticsearch/_async/client/security.py +++ b/elasticsearch/_async/client/security.py @@ -617,6 +617,90 @@ async def create_api_key( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("access", "name", "expiration", "metadata"), + ) + async def create_cross_cluster_api_key( + self, + *, + access: t.Optional[t.Mapping[str, t.Any]] = None, + name: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + expiration: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + metadata: t.Optional[t.Mapping[str, t.Any]] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Create a cross-cluster API key. Create an API key of the `cross_cluster` type + for the API key based remote cluster access. A `cross_cluster` API key cannot + be used to authenticate through the REST interface. IMPORTANT: To authenticate + this request you must use a credential that is not an API key. Even if you use + an API key that has the required privilege, the API returns an error. Cross-cluster + API keys are created by the Elasticsearch API key service, which is automatically + enabled. NOTE: Unlike REST API keys, a cross-cluster API key does not capture + permissions of the authenticated user. The API key’s effective permission is + exactly as specified with the `access` property. A successful request returns + a JSON structure that contains the API key, its unique ID, and its name. If applicable, + it also returns expiration information for the API key in milliseconds. By default, + API keys never expire. You can specify expiration information when you create + the API keys. Cross-cluster API keys can only be updated with the update cross-cluster + API key API. Attempting to update them with the update REST API key API or the + bulk update REST API keys API will result in an error. + + ``_ + + :param access: The access to be granted to this API key. The access is composed + of permissions for cross-cluster search and cross-cluster replication. At + least one of them must be specified. NOTE: No explicit privileges should + be specified for either search or replication access. The creation process + automatically converts the access specification to a role descriptor which + has relevant privileges assigned accordingly. + :param name: Specifies the name for this API key. + :param expiration: Expiration time for the API key. By default, API keys never + expire. + :param metadata: Arbitrary metadata that you want to associate with the API key. + It supports nested data structure. Within the metadata object, keys beginning + with `_` are reserved for system usage. + """ + if access is None and body is None: + raise ValueError("Empty value passed for parameter 'access'") + if name is None and body is None: + raise ValueError("Empty value passed for parameter 'name'") + __path_parts: t.Dict[str, str] = {} + __path = "/_security/cross_cluster/api_key" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if access is not None: + __body["access"] = access + if name is not None: + __body["name"] = name + if expiration is not None: + __body["expiration"] = expiration + if metadata is not None: + __body["metadata"] = metadata + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.create_cross_cluster_api_key", + path_parts=__path_parts, + ) + @_rewrite_parameters() async def create_service_token( self, @@ -3491,6 +3575,74 @@ async def update_api_key( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("access", "expiration", "metadata"), + ) + async def update_cross_cluster_api_key( + self, + *, + id: str, + access: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + expiration: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + metadata: t.Optional[t.Mapping[str, t.Any]] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Update a cross-cluster API key. Update the attributes of an existing cross-cluster + API key, which is used for API key based remote cluster access. + + ``_ + + :param id: The ID of the cross-cluster API key to update. + :param access: The access to be granted to this API key. The access is composed + of permissions for cross cluster search and cross cluster replication. At + least one of them must be specified. When specified, the new access assignment + fully replaces the previously assigned access. + :param expiration: Expiration time for the API key. By default, API keys never + expire. This property can be omitted to leave the value unchanged. + :param metadata: Arbitrary metadata that you want to associate with the API key. + It supports nested data structure. Within the metadata object, keys beginning + with `_` are reserved for system usage. When specified, this information + fully replaces metadata previously associated with the API key. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + if access is None and body is None: + raise ValueError("Empty value passed for parameter 'access'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_security/cross_cluster/api_key/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if access is not None: + __body["access"] = access + if expiration is not None: + __body["expiration"] = expiration + if metadata is not None: + __body["metadata"] = metadata + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.update_cross_cluster_api_key", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=("data", "labels"), ) diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index d92347291..adf877523 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -741,7 +741,8 @@ def clear_scroll( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears the search context and results for a scrolling search. + Clear a scrolling search. Clear the search context and results for a scrolling + search. ``_ @@ -791,7 +792,11 @@ def close_point_in_time( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Closes a point-in-time. + Close a point in time. A point in time must be opened explicitly before being + used in search requests. The `keep_alive` parameter tells Elasticsearch how long + it should persist. A point in time is automatically closed when the `keep_alive` + period has elapsed. However, keeping points in time has a cost; close them as + soon as they are no longer required for search requests. ``_ @@ -1842,10 +1847,11 @@ def field_caps( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - The field capabilities API returns the information about the capabilities of - fields among multiple indices. The field capabilities API returns runtime fields - like any other field. For example, a runtime field with a type of keyword is - returned as any other field that belongs to the `keyword` family. + Get the field capabilities. Get information about the capabilities of fields + among multiple indices. For data streams, the API returns field capabilities + among the stream’s backing indices. It returns runtime fields like any other + field. For example, a runtime field with a type of keyword is returned the same + as any other field that belongs to the `keyword` family. ``_ @@ -2488,6 +2494,7 @@ def info( ), parameter_aliases={"_source": "source"}, ) + @_stability_warning(Stability.EXPERIMENTAL) def knn_search( self, *, @@ -2508,7 +2515,15 @@ def knn_search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs a kNN search. + Run a knn search. NOTE: The kNN search API has been replaced by the `knn` option + in the search API. Perform a k-nearest neighbor (kNN) search on a dense_vector + field and return the matching documents. Given a query vector, the API finds + the k closest vectors and returns those documents as search hits. Elasticsearch + uses the HNSW algorithm to support efficient kNN search. Like most kNN algorithms, + HNSW is an approximate method that sacrifices result accuracy for improved search + speed. This means the results returned are not always the true k closest neighbors. + The kNN search API supports restricting the search using a filter. The search + will return the top k documents that also match the filter query. ``_ @@ -2871,7 +2886,7 @@ def msearch_template( typed_keys: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Runs multiple templated searches with a single request. + Run multiple templated searches. ``_ @@ -3081,13 +3096,15 @@ def open_point_in_time( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - A search request by default executes against the most recent visible data of - the target indices, which is called point in time. Elasticsearch pit (point in - time) is a lightweight view into the state of the data as it existed when initiated. - In some cases, it’s preferred to perform multiple search requests using the same - point in time. For example, if refreshes happen between `search_after` requests, - then the results of those requests might not be consistent as changes happening - between searches are only visible to the more recent point in time. + Open a point in time. A search request by default runs against the most recent + visible data of the target indices, which is called point in time. Elasticsearch + pit (point in time) is a lightweight view into the state of the data as it existed + when initiated. In some cases, it’s preferred to perform multiple search requests + using the same point in time. For example, if refreshes happen between `search_after` + requests, then the results of those requests might not be consistent as changes + happening between searches are only visible to the more recent point in time. + A point in time must be opened explicitly before being used in search requests. + The `keep_alive` parameter tells Elasticsearch how long it should persist. ``_ @@ -3254,8 +3271,8 @@ def rank_eval( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables you to evaluate the quality of ranked search results over a set of typical - search queries. + Evaluate ranked search results. Evaluate the quality of ranked search results + over a set of typical search queries. ``_ @@ -3499,7 +3516,7 @@ def render_search_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Renders a search template as a search request body. + Render a search template. Render a search template as a search request body. ``_ @@ -3831,9 +3848,9 @@ def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns search hits that match the query defined in the request. You can provide - search queries using the `q` query string parameter or the request body. If both - are specified, only the query parameter is used. + Run a search. Get search hits that match the query defined in the request. You + can provide search queries using the `q` query string parameter or the request + body. If both are specified, only the query parameter is used. ``_ @@ -4263,7 +4280,7 @@ def search_mvt( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> BinaryApiResponse: """ - Search a vector tile. Searches a vector tile for geospatial values. + Search a vector tile. Search a vector tile for geospatial values. ``_ @@ -4417,8 +4434,10 @@ def search_shards( routing: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about the indices and shards that a search request would - be executed against. + Get the search shards. Get the indices and shards that a search request would + be run against. This information can be useful for working out issues or planning + optimizations with routing and shard preferences. When filtered aliases are used, + the filter is returned as part of the indices section. ``_ @@ -4519,7 +4538,7 @@ def search_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Runs a search with a search template. + Run a search with a search template. ``_ @@ -4757,8 +4776,8 @@ def termvectors( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get term vector information. Returns information and statistics about terms in - the fields of a particular document. + Get term vector information. Get information and statistics about terms in the + fields of a particular document. ``_ diff --git a/elasticsearch/_sync/client/async_search.py b/elasticsearch/_sync/client/async_search.py index b0b2bdaf9..147553dc3 100644 --- a/elasticsearch/_sync/client/async_search.py +++ b/elasticsearch/_sync/client/async_search.py @@ -148,10 +148,10 @@ def status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get async search status. Retrieve the status of a previously submitted async - search request given its identifier, without retrieving search results. If the - Elasticsearch security features are enabled, use of this API is restricted to - the `monitoring_user` role. + Get the async search status. Get the status of a previously submitted async search + request given its identifier, without retrieving search results. If the Elasticsearch + security features are enabled, use of this API is restricted to the `monitoring_user` + role. ``_ diff --git a/elasticsearch/_sync/client/autoscaling.py b/elasticsearch/_sync/client/autoscaling.py index a39e1ba35..6dc45d2a5 100644 --- a/elasticsearch/_sync/client/autoscaling.py +++ b/elasticsearch/_sync/client/autoscaling.py @@ -36,7 +36,8 @@ def delete_autoscaling_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. + Delete an autoscaling policy. NOTE: This feature is designed for indirect use + by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. ``_ @@ -76,8 +77,18 @@ def get_autoscaling_capacity( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets the current autoscaling capacity based on the configured autoscaling policy. - Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + Get the autoscaling capacity. NOTE: This feature is designed for indirect use + by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. + Direct use is not supported. This API gets the current autoscaling capacity based + on the configured autoscaling policy. It will return information to size the + cluster appropriately to the current workload. The `required_capacity` is calculated + as the maximum of the `required_capacity` result of all individual deciders that + are enabled for the policy. The operator should verify that the `current_nodes` + match the operator’s knowledge of the cluster to avoid making autoscaling decisions + based on stale or incomplete information. The response contains decider-specific + information you can use to diagnose how and why autoscaling determined a certain + capacity was required. This information is provided for diagnosis only. Do not + use this information to make autoscaling decisions. ``_ """ @@ -113,7 +124,8 @@ def get_autoscaling_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves an autoscaling policy. Designed for indirect use by ECE/ESS and ECK. + Get an autoscaling policy. NOTE: This feature is designed for indirect use by + Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. ``_ @@ -158,8 +170,9 @@ def put_autoscaling_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new autoscaling policy. Designed for indirect use by ECE/ESS and ECK. - Direct use is not supported. + Create or update an autoscaling policy. NOTE: This feature is designed for indirect + use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on + Kubernetes. Direct use is not supported. ``_ diff --git a/elasticsearch/_sync/client/security.py b/elasticsearch/_sync/client/security.py index fd4b1cac9..35d35a8db 100644 --- a/elasticsearch/_sync/client/security.py +++ b/elasticsearch/_sync/client/security.py @@ -617,6 +617,90 @@ def create_api_key( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("access", "name", "expiration", "metadata"), + ) + def create_cross_cluster_api_key( + self, + *, + access: t.Optional[t.Mapping[str, t.Any]] = None, + name: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + expiration: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + metadata: t.Optional[t.Mapping[str, t.Any]] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Create a cross-cluster API key. Create an API key of the `cross_cluster` type + for the API key based remote cluster access. A `cross_cluster` API key cannot + be used to authenticate through the REST interface. IMPORTANT: To authenticate + this request you must use a credential that is not an API key. Even if you use + an API key that has the required privilege, the API returns an error. Cross-cluster + API keys are created by the Elasticsearch API key service, which is automatically + enabled. NOTE: Unlike REST API keys, a cross-cluster API key does not capture + permissions of the authenticated user. The API key’s effective permission is + exactly as specified with the `access` property. A successful request returns + a JSON structure that contains the API key, its unique ID, and its name. If applicable, + it also returns expiration information for the API key in milliseconds. By default, + API keys never expire. You can specify expiration information when you create + the API keys. Cross-cluster API keys can only be updated with the update cross-cluster + API key API. Attempting to update them with the update REST API key API or the + bulk update REST API keys API will result in an error. + + ``_ + + :param access: The access to be granted to this API key. The access is composed + of permissions for cross-cluster search and cross-cluster replication. At + least one of them must be specified. NOTE: No explicit privileges should + be specified for either search or replication access. The creation process + automatically converts the access specification to a role descriptor which + has relevant privileges assigned accordingly. + :param name: Specifies the name for this API key. + :param expiration: Expiration time for the API key. By default, API keys never + expire. + :param metadata: Arbitrary metadata that you want to associate with the API key. + It supports nested data structure. Within the metadata object, keys beginning + with `_` are reserved for system usage. + """ + if access is None and body is None: + raise ValueError("Empty value passed for parameter 'access'") + if name is None and body is None: + raise ValueError("Empty value passed for parameter 'name'") + __path_parts: t.Dict[str, str] = {} + __path = "/_security/cross_cluster/api_key" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if access is not None: + __body["access"] = access + if name is not None: + __body["name"] = name + if expiration is not None: + __body["expiration"] = expiration + if metadata is not None: + __body["metadata"] = metadata + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.create_cross_cluster_api_key", + path_parts=__path_parts, + ) + @_rewrite_parameters() def create_service_token( self, @@ -3491,6 +3575,74 @@ def update_api_key( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("access", "expiration", "metadata"), + ) + def update_cross_cluster_api_key( + self, + *, + id: str, + access: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + expiration: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + metadata: t.Optional[t.Mapping[str, t.Any]] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Update a cross-cluster API key. Update the attributes of an existing cross-cluster + API key, which is used for API key based remote cluster access. + + ``_ + + :param id: The ID of the cross-cluster API key to update. + :param access: The access to be granted to this API key. The access is composed + of permissions for cross cluster search and cross cluster replication. At + least one of them must be specified. When specified, the new access assignment + fully replaces the previously assigned access. + :param expiration: Expiration time for the API key. By default, API keys never + expire. This property can be omitted to leave the value unchanged. + :param metadata: Arbitrary metadata that you want to associate with the API key. + It supports nested data structure. Within the metadata object, keys beginning + with `_` are reserved for system usage. When specified, this information + fully replaces metadata previously associated with the API key. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + if access is None and body is None: + raise ValueError("Empty value passed for parameter 'access'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_security/cross_cluster/api_key/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if access is not None: + __body["access"] = access + if expiration is not None: + __body["expiration"] = expiration + if metadata is not None: + __body["metadata"] = metadata + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.update_cross_cluster_api_key", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=("data", "labels"), ) From 5dff47b200daefa801ebf238238099007b4ae058 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 12 Nov 2024 15:59:49 +0400 Subject: [PATCH 08/65] [Backport 8.x] Make BulkIndexError and ScanError serializable (#2699) Co-authored-by: Quentin Pradet Co-authored-by: Sebastian Goodman <164915775+seagrine@users.noreply.github.com> --- elasticsearch/helpers/errors.py | 16 ++++++++++++---- test_elasticsearch/test_helpers.py | 17 +++++++++++++++++ 2 files changed, 29 insertions(+), 4 deletions(-) diff --git a/elasticsearch/helpers/errors.py b/elasticsearch/helpers/errors.py index 359fe87b1..4814ca581 100644 --- a/elasticsearch/helpers/errors.py +++ b/elasticsearch/helpers/errors.py @@ -15,18 +15,26 @@ # specific language governing permissions and limitations # under the License. -from typing import Any, Dict, List +from typing import Any, Dict, List, Tuple, Type class BulkIndexError(Exception): - def __init__(self, message: Any, errors: List[Dict[str, Any]]): + def __init__(self, message: str, errors: List[Dict[str, Any]]): super().__init__(message) self.errors: List[Dict[str, Any]] = errors + def __reduce__( + self, + ) -> Tuple[Type["BulkIndexError"], Tuple[str, List[Dict[str, Any]]]]: + return (self.__class__, (self.args[0], self.errors)) + class ScanError(Exception): scroll_id: str - def __init__(self, scroll_id: str, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) + def __init__(self, scroll_id: str, *args: Any) -> None: + super().__init__(*args) self.scroll_id = scroll_id + + def __reduce__(self) -> Tuple[Type["ScanError"], Tuple[str, str]]: + return (self.__class__, (self.scroll_id,) + self.args) diff --git a/test_elasticsearch/test_helpers.py b/test_elasticsearch/test_helpers.py index c9284afc5..e30635f44 100644 --- a/test_elasticsearch/test_helpers.py +++ b/test_elasticsearch/test_helpers.py @@ -15,6 +15,7 @@ # specific language governing permissions and limitations # under the License. +import pickle import threading import time from unittest import mock @@ -182,3 +183,19 @@ class TestExpandActions: @pytest.mark.parametrize("action", ["whatever", b"whatever"]) def test_string_actions_are_marked_as_simple_inserts(self, action): assert ({"index": {}}, b"whatever") == helpers.expand_action(action) + + +def test_serialize_bulk_index_error(): + error = helpers.BulkIndexError("message", [{"error": 1}]) + pickled = pickle.loads(pickle.dumps(error)) + assert pickled.__class__ == helpers.BulkIndexError + assert pickled.errors == error.errors + assert pickled.args == error.args + + +def test_serialize_scan_error(): + error = helpers.ScanError("scroll_id", "shard_message") + pickled = pickle.loads(pickle.dumps(error)) + assert pickled.__class__ == helpers.ScanError + assert pickled.scroll_id == error.scroll_id + assert pickled.args == error.args From 932eb969844b95ad4cc22aeade569bbb4e597b75 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Tue, 12 Nov 2024 13:01:23 +0100 Subject: [PATCH 09/65] Auto-generated code for 8.x (#2697) --- elasticsearch/_async/client/tasks.py | 8 ++++---- elasticsearch/_sync/client/tasks.py | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/elasticsearch/_async/client/tasks.py b/elasticsearch/_async/client/tasks.py index ca9c38eae..ffeb14f40 100644 --- a/elasticsearch/_async/client/tasks.py +++ b/elasticsearch/_async/client/tasks.py @@ -159,7 +159,7 @@ async def list( ] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - node_id: t.Optional[t.Sequence[str]] = None, + nodes: t.Optional[t.Union[str, t.Sequence[str]]] = None, parent_task_id: t.Optional[str] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, @@ -179,7 +179,7 @@ async def list( :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - :param node_id: Comma-separated list of node IDs or names used to limit returned + :param nodes: Comma-separated list of node IDs or names used to limit returned information. :param parent_task_id: Parent task ID used to limit returned information. To return all tasks, omit this parameter or use a value of `-1`. @@ -205,8 +205,8 @@ async def list( __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout - if node_id is not None: - __query["node_id"] = node_id + if nodes is not None: + __query["nodes"] = nodes if parent_task_id is not None: __query["parent_task_id"] = parent_task_id if pretty is not None: diff --git a/elasticsearch/_sync/client/tasks.py b/elasticsearch/_sync/client/tasks.py index f69ef007c..ab15a6c0c 100644 --- a/elasticsearch/_sync/client/tasks.py +++ b/elasticsearch/_sync/client/tasks.py @@ -159,7 +159,7 @@ def list( ] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - node_id: t.Optional[t.Sequence[str]] = None, + nodes: t.Optional[t.Union[str, t.Sequence[str]]] = None, parent_task_id: t.Optional[str] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, @@ -179,7 +179,7 @@ def list( :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - :param node_id: Comma-separated list of node IDs or names used to limit returned + :param nodes: Comma-separated list of node IDs or names used to limit returned information. :param parent_task_id: Parent task ID used to limit returned information. To return all tasks, omit this parameter or use a value of `-1`. @@ -205,8 +205,8 @@ def list( __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout - if node_id is not None: - __query["node_id"] = node_id + if nodes is not None: + __query["nodes"] = nodes if parent_task_id is not None: __query["parent_task_id"] = parent_task_id if pretty is not None: From 4e1adbbca9b6c761825159f17ffef6757051bf91 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 12 Nov 2024 16:12:19 +0400 Subject: [PATCH 10/65] [Backport 8.x] Allow retries for statuses other than 429 in streaming bulk (#2701) Co-authored-by: Miguel Grinberg Co-authored-by: Quentin Pradet Co-authored-by: Aaron Hoffer <4275843+ayayron@users.noreply.github.com> --- elasticsearch/_async/helpers.py | 29 +++++++++----- elasticsearch/helpers/actions.py | 29 +++++++++----- .../test_async/test_server/test_helpers.py | 39 +++++++++++++++++++ .../test_server/test_helpers.py | 39 +++++++++++++++++++ 4 files changed, 116 insertions(+), 20 deletions(-) diff --git a/elasticsearch/_async/helpers.py b/elasticsearch/_async/helpers.py index 1ab55850b..1bc339917 100644 --- a/elasticsearch/_async/helpers.py +++ b/elasticsearch/_async/helpers.py @@ -173,6 +173,7 @@ async def async_streaming_bulk( max_backoff: float = 600, yield_ok: bool = True, ignore_status: Union[int, Collection[int]] = (), + retry_on_status: Union[int, Collection[int]] = (429,), *args: Any, **kwargs: Any, ) -> AsyncIterable[Tuple[bool, Dict[str, Any]]]: @@ -184,10 +185,11 @@ async def async_streaming_bulk( entire input is consumed and sent. If you specify ``max_retries`` it will also retry any documents that were - rejected with a ``429`` status code. To do this it will wait (**by calling - asyncio.sleep**) for ``initial_backoff`` seconds and then, - every subsequent rejection for the same chunk, for double the time every - time up to ``max_backoff`` seconds. + rejected with a ``429`` status code. Use ``retry_on_status`` to + configure which status codes will be retried. To do this it will wait + (**by calling asyncio.sleep which will block**) for ``initial_backoff`` seconds + and then, every subsequent rejection for the same chunk, for double the time + every time up to ``max_backoff`` seconds. :arg client: instance of :class:`~elasticsearch.AsyncElasticsearch` to use :arg actions: iterable or async iterable containing the actions to be executed @@ -200,8 +202,11 @@ async def async_streaming_bulk( :arg expand_action_callback: callback executed on each action passed in, should return a tuple containing the action line and the data line (`None` if data line should be omitted). + :arg retry_on_status: HTTP status code that will trigger a retry. + (if `None` is specified only status 429 will retry). :arg max_retries: maximum number of times a document will be retried when - ``429`` is received, set to 0 (default) for no retries on ``429`` + retry_on_status (defaulting to ``429``) is received, + set to 0 (default) for no retries :arg initial_backoff: number of seconds we should wait before the first retry. Any subsequent retries will be powers of ``initial_backoff * 2**retry_number`` @@ -213,6 +218,9 @@ async def async_streaming_bulk( client = client.options() client._client_meta = (("h", "bp"),) + if isinstance(retry_on_status, int): + retry_on_status = (retry_on_status,) + async def map_actions() -> AsyncIterable[_TYPE_BULK_ACTION_HEADER_AND_BODY]: async for item in aiter(actions): yield expand_action_callback(item) @@ -264,11 +272,11 @@ async def map_actions() -> AsyncIterable[_TYPE_BULK_ACTION_HEADER_AND_BODY]: ): if not ok: action, info = info.popitem() - # retry if retries enabled, we get 429, and we are not - # in the last attempt + # retry if retries enabled, we are not in the last attempt, + # and status in retry_on_status (defaulting to 429) if ( max_retries - and info["status"] == 429 + and info["status"] in retry_on_status and (attempt + 1) <= max_retries ): # _process_bulk_chunk expects strings so we need to @@ -281,8 +289,9 @@ async def map_actions() -> AsyncIterable[_TYPE_BULK_ACTION_HEADER_AND_BODY]: yield ok, info except ApiError as e: - # suppress 429 errors since we will retry them - if attempt == max_retries or e.status_code != 429: + # suppress any status in retry_on_status (429 by default) + # since we will retry them + if attempt == max_retries or e.status_code not in retry_on_status: raise else: if not to_retry: diff --git a/elasticsearch/helpers/actions.py b/elasticsearch/helpers/actions.py index 1d6b0a27e..687bf4b84 100644 --- a/elasticsearch/helpers/actions.py +++ b/elasticsearch/helpers/actions.py @@ -374,6 +374,7 @@ def streaming_bulk( max_backoff: float = 600, yield_ok: bool = True, ignore_status: Union[int, Collection[int]] = (), + retry_on_status: Union[int, Collection[int]] = (429,), span_name: str = "helpers.streaming_bulk", *args: Any, **kwargs: Any, @@ -386,10 +387,11 @@ def streaming_bulk( entire input is consumed and sent. If you specify ``max_retries`` it will also retry any documents that were - rejected with a ``429`` status code. To do this it will wait (**by calling - time.sleep which will block**) for ``initial_backoff`` seconds and then, - every subsequent rejection for the same chunk, for double the time every - time up to ``max_backoff`` seconds. + rejected with a ``429`` status code. Use ``retry_on_status`` to + configure which status codes will be retried. To do this it will wait + (**by calling time.sleep which will block**) for ``initial_backoff`` seconds + and then, every subsequent rejection for the same chunk, for double the time + every time up to ``max_backoff`` seconds. :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use :arg actions: iterable containing the actions to be executed @@ -402,8 +404,11 @@ def streaming_bulk( :arg expand_action_callback: callback executed on each action passed in, should return a tuple containing the action line and the data line (`None` if data line should be omitted). + :arg retry_on_status: HTTP status code that will trigger a retry. + (if `None` is specified only status 429 will retry). :arg max_retries: maximum number of times a document will be retried when - ``429`` is received, set to 0 (default) for no retries on ``429`` + retry_on_status (defaulting to ``429``) is received, + set to 0 (default) for no retries :arg initial_backoff: number of seconds we should wait before the first retry. Any subsequent retries will be powers of ``initial_backoff * 2**retry_number`` @@ -415,6 +420,9 @@ def streaming_bulk( client = client.options() client._client_meta = (("h", "bp"),) + if isinstance(retry_on_status, int): + retry_on_status = (retry_on_status,) + serializer = client.transport.serializers.get_serializer("application/json") bulk_data: List[ @@ -458,11 +466,11 @@ def streaming_bulk( ): if not ok: action, info = info.popitem() - # retry if retries enabled, we get 429, and we are not - # in the last attempt + # retry if retries enabled, we are not in the last attempt, + # and status in retry_on_status (defaulting to 429) if ( max_retries - and info["status"] == 429 + and info["status"] in retry_on_status and (attempt + 1) <= max_retries ): # _process_bulk_chunk expects bytes so we need to @@ -475,8 +483,9 @@ def streaming_bulk( yield ok, info except ApiError as e: - # suppress 429 errors since we will retry them - if attempt == max_retries or e.status_code != 429: + # suppress any status in retry_on_status (429 by default) + # since we will retry them + if attempt == max_retries or e.status_code not in retry_on_status: raise else: if not to_retry: diff --git a/test_elasticsearch/test_async/test_server/test_helpers.py b/test_elasticsearch/test_async/test_server/test_helpers.py index 746dc1028..0bb781304 100644 --- a/test_elasticsearch/test_async/test_server/test_helpers.py +++ b/test_elasticsearch/test_async/test_server/test_helpers.py @@ -293,6 +293,45 @@ async def streaming_bulk(): await streaming_bulk() assert 4 == failing_client._called + async def test_connection_timeout_is_retried_with_retry_status_callback( + self, async_client + ): + failing_client = FailingBulkClient( + async_client, + fail_with=ApiError( + message="Connection timed out!", + body={}, + meta=ApiResponseMeta( + status=522, headers={}, http_version="1.1", duration=0, node=None + ), + ), + ) + docs = [ + {"_index": "i", "_id": 47, "f": "v"}, + {"_index": "i", "_id": 45, "f": "v"}, + {"_index": "i", "_id": 42, "f": "v"}, + ] + + results = [ + x + async for x in helpers.async_streaming_bulk( + failing_client, + docs, + raise_on_exception=False, + raise_on_error=False, + chunk_size=1, + retry_on_status=522, + max_retries=1, + initial_backoff=0, + ) + ] + assert 3 == len(results) + assert [True, True, True] == [r[0] for r in results] + await async_client.indices.refresh(index="i") + res = await async_client.search(index="i") + assert {"value": 3, "relation": "eq"} == res["hits"]["total"] + assert 4 == failing_client._called + class TestBulk: async def test_bulk_works_with_single_item(self, async_client): diff --git a/test_elasticsearch/test_server/test_helpers.py b/test_elasticsearch/test_server/test_helpers.py index 011803bc9..6ed43e2af 100644 --- a/test_elasticsearch/test_server/test_helpers.py +++ b/test_elasticsearch/test_server/test_helpers.py @@ -288,6 +288,45 @@ def streaming_bulk(): assert 4 == failing_client._called +def test_connection_timeout_is_retried_with_retry_status_callback(sync_client): + failing_client = FailingBulkClient( + sync_client, + fail_with=ApiError( + message="Connection timed out!", + body={}, + meta=ApiResponseMeta( + status=522, headers={}, http_version="1.1", duration=0, node=None + ), + ), + ) + docs = [ + {"_index": "i", "_id": 47, "f": "v"}, + {"_index": "i", "_id": 45, "f": "v"}, + {"_index": "i", "_id": 42, "f": "v"}, + ] + + results = list( + helpers.streaming_bulk( + failing_client, + docs, + index="i", + raise_on_exception=False, + raise_on_error=False, + chunk_size=1, + retry_on_status=522, + max_retries=1, + initial_backoff=0, + ) + ) + assert 3 == len(results) + print(results) + assert [True, True, True] == [r[0] for r in results] + sync_client.indices.refresh(index="i") + res = sync_client.search(index="i") + assert {"value": 3, "relation": "eq"} == res["hits"]["total"] + assert 4 == failing_client._called + + def test_bulk_works_with_single_item(sync_client): docs = [{"answer": 42, "_id": 1}] success, failed = helpers.bulk(sync_client, docs, index="test-index", refresh=True) From 2879230d9d70cd39ab1540788c711e09bf23f6eb Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 12 Nov 2024 19:05:40 +0400 Subject: [PATCH 11/65] Fix import when trace is missing from opentelemetry (#2694) (#2704) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit (cherry picked from commit de6ed823f1e6c0094ed3f94d78e9bab6fd5c124e) Co-authored-by: Nicolò Boschi --- elasticsearch/_otel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/elasticsearch/_otel.py b/elasticsearch/_otel.py index 264d8aa35..4bb57dae7 100644 --- a/elasticsearch/_otel.py +++ b/elasticsearch/_otel.py @@ -25,7 +25,7 @@ from opentelemetry import trace _tracer: trace.Tracer | None = trace.get_tracer("elasticsearch-api") -except ModuleNotFoundError: +except ImportError: _tracer = None from elastic_transport import OpenTelemetrySpan From 7d03a7df4f64ec1e9de1d926968c3ad3745f220a Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 13 Nov 2024 15:14:29 +0400 Subject: [PATCH 12/65] [Backport 8.x] Build dists as part of CI (#2709) Co-authored-by: Quentin Pradet --- .github/workflows/ci.yml | 25 ++++++++++++++++++++----- elasticsearch/_otel.py | 2 +- elasticsearch/_sync/client/utils.py | 3 +-- 3 files changed, 22 insertions(+), 8 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1d7b8ecfd..52298bd59 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -8,17 +8,32 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Python 3.x - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: - python-version: "3.11" + python-version: "3.x" - name: Install dependencies run: | python3 -m pip install nox - name: Lint the code run: nox -s lint + package: + runs-on: ubuntu-latest + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + - name: Set up Python 3.x + uses: actions/setup-python@v5 + with: + python-version: "3.11" + - name: Install dependencies + run: | + python3 -m pip install build + - name: Build dists + run: python utils/build-dists.py + test-linux: strategy: fail-fast: false @@ -32,9 +47,9 @@ jobs: continue-on-error: false steps: - name: Checkout Repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set Up Python - ${{ matrix.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install Dependencies diff --git a/elasticsearch/_otel.py b/elasticsearch/_otel.py index 4bb57dae7..f37ca24cd 100644 --- a/elasticsearch/_otel.py +++ b/elasticsearch/_otel.py @@ -102,7 +102,7 @@ def helpers_span(self, span_name: str) -> Generator[OpenTelemetrySpan, None, Non @contextlib.contextmanager def use_span(self, span: OpenTelemetrySpan) -> Generator[None, None, None]: - if not self.enabled or self.tracer is None: + if not self.enabled or self.tracer is None or span.otel_span is None: yield return diff --git a/elasticsearch/_sync/client/utils.py b/elasticsearch/_sync/client/utils.py index 959f61bfd..c5ec21dae 100644 --- a/elasticsearch/_sync/client/utils.py +++ b/elasticsearch/_sync/client/utils.py @@ -56,10 +56,9 @@ url_to_node_config, ) -from elasticsearch.exceptions import GeneralAvailabilityWarning - from ..._version import __versionstr__ from ...compat import to_bytes, to_str, warn_stacklevel +from ...exceptions import GeneralAvailabilityWarning if TYPE_CHECKING: from ._base import NamespacedClient From f69cb3d02f790c221486012fd18ab71c81ae392e Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 14 Nov 2024 09:53:07 +0400 Subject: [PATCH 13/65] [Backport 8.x] Bring 8.16.0 release to main (#2712) Co-authored-by: Quentin Pradet --- .../00fea15cbca83be9d5f1a024ff2ec708.asciidoc | 2 +- ...01ae196538fac197eedbbf458a4ef31b.asciidoc} | 14 ++- .../01b23f09d2b7f140faf649eadbbf3ac3.asciidoc | 2 +- .../01cd0ea360282a2c591a366679d7187d.asciidoc | 12 ++ .../03891265df2111a38e0b6b24c1b967e1.asciidoc | 2 +- .../04412d11783dac25b5fd2ec5407078a3.asciidoc | 2 +- .../046b2249bbc49e77848c114cee940f17.asciidoc | 2 +- .../04de2e3a9c00c2056b07bf9cf9e63a99.asciidoc | 2 +- ...05e637284bc3bedd46e0b7c26ad983c4.asciidoc} | 2 +- ...0709a38613d2de90d418ce12b36af30e.asciidoc} | 4 +- .../0737ebaea33631f001fb3f4226948492.asciidoc | 2 +- ...083b92e8ea264e49bf9fd40fc6a3094b.asciidoc} | 3 +- .../095e3f21941a9cc75f398389a075152d.asciidoc | 2 +- .../09769561f082b50558fb7d8707719963.asciidoc | 2 +- .../09cb1b18bf4033b4afafb25bd3dab12c.asciidoc | 2 +- .../0a650401134f07e40216f0d0d1a66a32.asciidoc | 2 +- .../0ad8edd10542ec2c4d5d8700d7e2ba97.asciidoc | 2 +- .../0ade87c8cb0e3c188d2e3dce279d5cc2.asciidoc | 2 +- .../0bee07a581c5776e068f6f4efad5a399.asciidoc | 14 +++ .../0c2ca704a39dda8b3a7c5806ec6c6cf8.asciidoc | 2 +- .../0c2d9ac7e3f28d4d802e21cbbbcfeb34.asciidoc | 2 +- .../0c8be7aec84ea86b243904f5d4162f5a.asciidoc | 18 +++ ...0d30077cd34e93377a3a86f2ebd69415.asciidoc} | 4 +- .../0d8063b484a18f8672fb5ed8712c5c97.asciidoc | 2 +- .../0e31b8ad176b31028becf9500989bcbd.asciidoc | 21 ++++ .../0eccea755bd4f6dd47579a9022690546.asciidoc | 2 +- .../103296e16b4233926ad1f07360385606.asciidoc | 2 +- .../11be807bdeaeecc8174dec88e0851ea7.asciidoc | 15 +++ .../1295f51b9e5d4ba9987b02478146b50b.asciidoc | 2 +- .../12e9e758f7f18a6cbf27e9d0aea57a19.asciidoc | 18 +++ .../13ecdf99114098c76b050397d9c3d4e6.asciidoc | 2 +- .../146bd22fd0e7be2345619e8f11d3a4cb.asciidoc | 2 +- .../150b5fee5678bf8cdf0932da73eada80.asciidoc | 2 +- .../1522a9297151d7046e6345b9b27539ca.asciidoc | 19 ++++ .../1570976f7807b88dc8a046b833be057b.asciidoc | 2 +- .../15f769bbd7b5fddeb3353ae726b71b14.asciidoc | 64 +++++++++++ .../1637ef51d673b35cc8894ee80cd61c87.asciidoc | 2 +- .../16a7ce08b4a6b3af269f27eecc71d664.asciidoc | 15 +++ .../17316a81c9dbdd120b7754116bf0461c.asciidoc | 31 ++++++ .../1745ac9e6d22a2ffe7ac381f9ba238f9.asciidoc | 10 -- .../182df084f028479ecbe8d7648ddad892.asciidoc | 2 +- .../18de6782bd18f4a9baec2feec8c02a8b.asciidoc | 18 --- .../191074b2eebd5f74e628c2ada4b6d2e4.asciidoc | 58 ++++++++++ .../192fa1f6f51dfb640e9e15bb5cd7eebc.asciidoc | 2 +- .../193704020a19714dec390452a4e75e8d.asciidoc | 10 ++ .../194bbac15e709174ac85b681f3a3d137.asciidoc | 2 +- .../196aed02b11def364bab84e455c1a073.asciidoc | 2 +- .../19c00c6b29bc7dbc5e92b3668da2da93.asciidoc | 2 +- .../19d60e4890cc57151d596326484d9076.asciidoc | 11 -- .../19f1f9f25933f8e7aba59a10881c648b.asciidoc | 2 +- .../1a3897cfb4f974c09d0d847baac8aa6d.asciidoc | 2 +- .../1a56df055b94466ca76818e0858752c6.asciidoc | 2 +- .../1a7483796087053ba55029d0dc2ab356.asciidoc | 23 ++++ .../1a9e03ce0355872a7db27fedc783fbec.asciidoc | 2 +- .../1aa96eeaf63fc967e166d1a2fcdccccc.asciidoc | 2 +- .../1b37e2237c9e3aaf84d56cc5c0bdb9ec.asciidoc | 2 +- .../1ba7afe23a26fe9ac7856d8c5bc1059d.asciidoc | 2 +- ...1c330f0fc9eac19d0edeb8c4017b9b93.asciidoc} | 2 +- .../1c9dac4183a3532c91dbd1a46907729b.asciidoc | 10 ++ ...1cbecd19be22979aefb45b4f160e77ea.asciidoc} | 2 +- .../1d918e206ad8dab916e59183da24d9ec.asciidoc | 13 --- .../1dadb7efe27b6c0c231eb6535e413bd9.asciidoc | 2 +- .../1e0b85750d4e63ebbc927d4627c44bf8.asciidoc | 2 +- .../1e26353d546d733634187b8c3a7837a7.asciidoc | 2 +- .../1e547696f54582840040b1aa6661760c.asciidoc | 2 +- .../1fb2c77c0988bc6545040b20e3afa7e9.asciidoc | 41 +++++++ .../2155c920d7d860f3ee7542f2211b4fec.asciidoc | 2 +- ...216e24f05cbb82c1718713fbab8623d2.asciidoc} | 4 +- .../21cd01cb90d3ea1acd0ab22d7edd2c88.asciidoc | 2 +- ...21d41e8cbd107fbdf0901f885834dafc.asciidoc} | 14 ++- .../2310d84ebf113f2a3ed14cc53172ae4a.asciidoc | 2 +- .../25cb9e1da00dfd971065ce182467434d.asciidoc | 2 +- .../2646710ece0c4c843aebeacd370d0396.asciidoc | 2 +- .../270549e6b062228312c4e7a54a2c2209.asciidoc | 2 +- ...2968ffb8135f77ba3a9b876dd4918119.asciidoc} | 2 +- .../2a5f7e7d6b92c66e52616845146d2820.asciidoc | 2 +- .../2a71e2d7f7179dd76183d30789046808.asciidoc | 2 +- .../2acf75803494fef29f9ca70671aa6be1.asciidoc | 2 +- .../2afdf0d83724953aa2875b5fb37d60cc.asciidoc | 2 +- .../2b1c560f00d9bcf5caaf56c03f6b5962.asciidoc | 2 +- .../2bb41b0b4876ce98cd0cd8fb6d337f18.asciidoc | 2 +- .../2c079d1ae4819a0c206b9e1aa5623523.asciidoc | 77 +++++++++++++ .../2c44657adf550b8ade5cf5334106d38b.asciidoc | 2 +- .../2c86840a46242a38cf82024a9321be46.asciidoc | 28 +++++ .../2d0244c020075595acb625aa5ba8f455.asciidoc | 38 +++++++ .../2de6885bacb8769b8f22dce253c96b0c.asciidoc | 2 +- .../2e7844477b41fcfa9efefee4ec0e7101.asciidoc | 45 ++++++++ .../2f2fd35905feef0b561c05d70c7064c1.asciidoc | 2 +- .../2f67db5e4d6c958258c3d70fb2d0b1c8.asciidoc | 13 --- .../2f98924c3d593ea2b60edb9cef5bee22.asciidoc | 2 +- .../2fa7ded8515b32f26c54394ea598f573.asciidoc | 2 +- ...310bdfb0d0d75bac7bff036a3fe51d4d.asciidoc} | 2 +- .../320645d771e952af2a67bb7445c3688d.asciidoc | 2 +- .../327466380bcd55361973b4a96c6dccb2.asciidoc | 2 +- ...334811cfceb6858aeec5b3461717dd63.asciidoc} | 4 +- .../339c4e5af9f9069ad9912aa574488b59.asciidoc | 70 ++++++++++++ .../33d480fc6812ada75756cf5337bc9092.asciidoc | 2 +- .../342ddf9121aeddd82fea2464665e25da.asciidoc | 2 +- .../346f28d82acb5427c304aa574fea0008.asciidoc | 2 +- .../35a272df8c919a12d7c3106a18245748.asciidoc | 2 +- .../365256ebdfa47b449780771d9beba8d9.asciidoc | 2 +- ...36ac0ef9ea63efc431580f7ade8ad53c.asciidoc} | 2 +- .../36b86b97feedcf5632824eefc251d6ed.asciidoc | 2 +- .../36d229f734adcdab00be266a7ce038b1.asciidoc | 2 +- .../370b297ed3433577adf53e64f572d89d.asciidoc | 2 +- .../371962cf63e65c10026177c6a1bad0b6.asciidoc | 2 +- .../37c73410bf13429279cbc61a413957d8.asciidoc | 2 +- .../38ba93890494bfa7beece58dffa44f98.asciidoc | 2 +- .../398389933901b572a06a752bc780af7c.asciidoc | 2 +- .../3a2f37f8f32b1aa6bcfb252b9e00f904.asciidoc | 2 +- .../3a489743e49902df38e3368cae00717a.asciidoc | 8 ++ ...3a4953663a5a3809b692c27446e16b7f.asciidoc} | 2 +- ...3ab8f65fcb55a0e3664c55749ec41efd.asciidoc} | 3 +- .../3afc6dacf90b42900ab571aad8a61d75.asciidoc | 2 +- .../3b6718257421b5419bf4cd6a7303c57e.asciidoc | 11 -- .../3c0d0c38e1c819a35a68cdba5ae8ccc4.asciidoc | 2 +- .../3c345feb7c52fd54bcb5d5505fd8bc3b.asciidoc | 2 +- .../3cd93a48906069709b76420c66930c01.asciidoc | 2 +- .../3f2e5132e35b9e8b3203a4a0541cf0d4.asciidoc | 2 +- .../3fe4264ace04405989141c43aadfff81.asciidoc | 2 +- .../405ac843a9156d3cab374e199cac87fb.asciidoc | 2 +- .../40bd86e400d27e68b8f0ae580c29d32d.asciidoc | 2 +- .../40c3e7bb1fdc125a1ab21bd7d7326694.asciidoc | 2 +- .../40f287bf733420bbab134b74c7d0ea5d.asciidoc | 24 ++++ .../40f97f70e8e743c6a6296c81b920aeb0.asciidoc | 2 +- .../41175d304e660da2931764f9a4418fd3.asciidoc | 2 +- .../413fdcc7c437775a16bb55b81c2bbe2b.asciidoc | 2 +- .../425eaaf9c7e3b1e77a3474fbab4183b4.asciidoc | 2 +- .../430705509f8367aef92be413f702520b.asciidoc | 2 +- .../4310869b97d4224acaa6d66b1e196048.asciidoc | 2 +- .../43d9e314431336a6f084cea76dfd6489.asciidoc | 22 ++++ .../44198781d164a15be633d4469485a544.asciidoc | 29 +++++ .../44385b61342e20ea05f254015b2b04d7.asciidoc | 2 +- .../443e8da9968f1c65f46a2a65a1e1e078.asciidoc | 2 +- .../445f8a6ef75fb43da52990b3a9063c78.asciidoc | 2 +- .../44bca3f17d403517af3616754dc795bb.asciidoc | 2 +- .../49a19615ebe2c013b8321152163478ab.asciidoc | 42 +++++++ .../4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc | 14 +++ .../4bef98a2dac575a50ee0783c2269f1db.asciidoc | 2 +- .../4c9350ed09b28f00e297ebe73c3b95a2.asciidoc | 2 +- .../4c95d54b32df4dc49e9762b6c1ae2c05.asciidoc | 2 +- .../4ca15672fc5ab1d80a127d086b6d2837.asciidoc | 2 +- .../4da0cb8693e9ceceee2ba3b558014bbf.asciidoc | 38 +++++++ .../4dab4c5168047ba596af1beb0e55b845.asciidoc | 2 +- ...4dc151eebefd484a28aed1a175743364.asciidoc} | 2 +- .../4e3414fc712b16311f9e433dd366f49d.asciidoc | 2 +- .../4ed946065faa92f9950f04e402676a97.asciidoc | 2 +- ...4eeded40f30949e359714a5bb6c88612.asciidoc} | 2 +- .../4f6694ef147a73b1163bde3c13779d26.asciidoc | 11 ++ .../4fcca1687d7b2cf08de526539fea5a76.asciidoc | 2 +- .../50d9c0508ddb0fc5ba5a893eec219dd8.asciidoc | 2 +- .../50ddf374cfa8128538ea092ee98b723d.asciidoc | 10 -- .../51b44224feee6e2e5974824334474c77.asciidoc | 2 +- .../529671ffaf7cc75fe83a81d729788be4.asciidoc | 31 ++++++ .../52b71aa4ae6563abae78cd20ff06d1e9.asciidoc | 2 +- .../52f4c5eb08d39f98e2e2f5527ece9731.asciidoc | 2 +- .../533087d787b48878a0bf3fa8d0851b64.asciidoc | 11 -- .../5457c94f0039c6b95c7f9f305d0c6b58.asciidoc | 2 +- ...548a9b6f447bb820380c1c23e57c18c3.asciidoc} | 2 +- .../54a47b5d07e7bfbea75c77f35eaae18d.asciidoc | 15 +++ .../55085e6a2891040b6ac696561d0787c8.asciidoc | 32 ++++++ ...551467688d8c701315d0a371850a4056.asciidoc} | 2 +- ...551799fef2f86e393db83a967e4a30d1.asciidoc} | 14 ++- ...565386eee0951865a684e41fab53b40c.asciidoc} | 5 +- ...56da252798b8e7b006738428aa1a7f4c.asciidoc} | 14 ++- .../57e0bbab98f17d5b564d1ea146a55fe4.asciidoc | 2 +- .../584f502cf840134f2db5f39e2483ced1.asciidoc | 2 +- .../58dd26afc919722e21358c91e112b27a.asciidoc | 18 +++ .../58f6b72009512851843c7b7a20e9504a.asciidoc | 19 ++++ .../59d736a4d064ed2013c7ead8e32e0998.asciidoc | 2 +- ...5a70db31f587b7ffed5e9bc1445430cb.asciidoc} | 2 +- .../5b2a13366bd4e1ab4b25d04d360570dc.asciidoc | 2 +- .../5bba213a7f543190139d1a69ab2ed076.asciidoc | 14 +++ .../5ceb734e3affe00e2cdc29af748d95bf.asciidoc | 23 ++++ ...5cf12cc4f98d98dc79bead7e6556679c.asciidoc} | 10 +- ...5daf8ede198be9b118da5bee9896cb00.asciidoc} | 14 ++- .../5deeed427f35cbaee4b8ddc45002a9d7.asciidoc | 2 +- ...5e021307d331a4483a5aa2198168451b.asciidoc} | 14 ++- .../60d3f9a99cc91b43aaa7524a9a74dba0.asciidoc | 10 ++ .../62d3c8fccb11471bdc12555c1a7777f2.asciidoc | 2 +- .../6329fb2840a4373ff6d342f2653247cb.asciidoc | 10 ++ .../636ee2066450605247ec1f68d04b8ee4.asciidoc | 2 +- .../63a53fcb0717ae9033a679cbfc932851.asciidoc | 20 ++++ .../63bf3480627a89b4b4ede4150e1d6bc0.asciidoc | 2 +- .../640621cea39cdeeb76fbc95bff31a18d.asciidoc | 2 +- .../640dbeecb736bd25f6f2b392b76a7531.asciidoc | 10 ++ .../643e19c3b6ac1134554dd890e2249c2b.asciidoc | 2 +- .../64a79861225553799b26e118d7851dcc.asciidoc | 2 +- .../6606d46685d10377b996b5f20f1229b5.asciidoc | 2 +- .../67154a4837cf996a9a9c3e61d6e9d1b3.asciidoc | 15 --- .../67bab07fda27ef77e3bc948211051a33.asciidoc | 2 +- .../69541f0bb81ab3797926bb2a00607cda.asciidoc | 19 ++++ .../69ab708fe65a75f870223d2289c3d171.asciidoc | 2 +- ...6b6e275efe3d2aafe0fc3443f2c96868.asciidoc} | 2 +- .../6b8c5c8145c287c4fc535fa57ccf95a7.asciidoc | 2 +- ...6cb1dae368c945ecf7c9ec332a5743a2.asciidoc} | 14 ++- .../6db118771354792646229e7a3c30c7e9.asciidoc | 2 +- .../6dcd3916679f6aa64f79524c75991ebd.asciidoc | 2 +- .../6ddd4e657efbf45def430a6419825796.asciidoc | 2 +- .../6e6b78e6b689a5d6aa637271b6d084e2.asciidoc | 52 +++++++++ .../6f8bdca97e43aac75e32de655aa4314a.asciidoc | 10 ++ .../7163346755400594d1dd7e445aa19ff0.asciidoc | 10 ++ ...71998bb300ac2a58419b0772cdc1c586.asciidoc} | 14 ++- .../71c629c44bf3c542a0daacbfc253c4b0.asciidoc | 2 +- .../72ae3851160fcf02b8e2cdfd4e57d238.asciidoc | 2 +- .../741180473ba526219578ad0422f4fe81.asciidoc | 2 +- .../7429b16221fe741fd31b0584786dd0b0.asciidoc | 2 +- .../744aeb2af40f519e430e21e004e3c3b7.asciidoc | 2 +- ...745864ef2427188241a4702b94ea57be.asciidoc} | 2 +- .../746e0a1cb5984f2672963b363505c7b3.asciidoc | 2 +- .../7471e97aaaf21c3a200abdd89f15c3cc.asciidoc | 2 +- ...750ac969f9a05567f5cdf4f93d6244b6.asciidoc} | 1 - .../7594a9a85c8511701e281974cbc253e1.asciidoc | 2 +- .../76c73b54f3f1e5cb1c0fcccd7c3fd18e.asciidoc | 87 +++++++++++++++ .../77082b1ffaae9ac52dfc133fa597baa7.asciidoc | 18 +++ .../7709a48020a6cefbbe547fb944541cdb.asciidoc | 2 +- .../7752b677825523bfb0c38ad9325a6d47.asciidoc | 2 +- .../776b553df0e507c96dbdbaedecaca0cc.asciidoc | 2 +- .../77b90f6787195767b6da60d8532714b4.asciidoc | 2 +- .../77d0780c5faea4c9ec51a322a6811b3b.asciidoc | 2 +- .../78176cd6f570e1534bb40b19e6e900b6.asciidoc | 2 +- ...7888c509774a2abfe82ca370c43d8789.asciidoc} | 2 +- .../79ff4e7fa5c004226d05d7e2bfb5dc1e.asciidoc | 51 +++++++++ .../7a0eb2222fe282d3aab66e12feff2a3b.asciidoc | 2 +- .../7a27336a61284d079f3cc3994cf927d1.asciidoc | 58 ++++++++++ .../7a32f44a1511ecb0d3f0b0ff2aca5c44.asciidoc | 23 ---- .../7af1f62b0cf496cbf593d83d30b472cc.asciidoc | 31 ++++++ .../7b9691bd34a02dd859562eb927f175e0.asciidoc | 23 ++++ .../7c8f207e43115ea8f20d2298be5aaebc.asciidoc | 39 ------- ...7d3a74fe0ba3fe95d1c3275365ff9315.asciidoc} | 14 ++- .../7db09cab02d71f3a10d91071216d80fc.asciidoc | 27 +++++ .../7db798942cf2d334456e30ef5fcb801b.asciidoc | 17 +++ .../7f1fade93225f8cf6000b93334d76ce4.asciidoc | 34 ++++++ .../7fde3ff91c4a2e7080444af37d5cd287.asciidoc | 2 +- ...8080cd9e24a8785728ce7c372ec4acf1.asciidoc} | 9 +- .../82844ef45e11c0eece100d3109db3182.asciidoc | 2 +- .../828f0045747fde4888a947bb99e190e3.asciidoc | 29 +++++ ...8417d8d35ec5fc5665dfb2f95d6d1101.asciidoc} | 2 +- .../84237aa9da49ab4b4c4e2b21d2548df2.asciidoc | 5 +- .../84243213614fe64930b1d430704afb29.asciidoc | 2 +- .../84c69fb07050f0e89720007a6507a221.asciidoc | 2 +- .../84ef9fe951c6d3caa7438238a5b23319.asciidoc | 15 +++ .../853fc710cea79fb4e1a85fb6d149f9c5.asciidoc | 44 ++++++++ .../8566f5ecf4ae14802ba63c8cc7c629f8.asciidoc | 2 +- .../8593715fcc70315a0816b435551258e0.asciidoc | 2 +- .../85f9fc6f98e8573efed9b034e853d5ae.asciidoc | 17 +++ .../8619bd17bbfe33490b1f277007f654db.asciidoc | 2 +- .../861f5f61409dc87f3671293b87839ff7.asciidoc | 2 +- .../87457bb3467484bec3e9df4e25942ba6.asciidoc | 2 +- ...894fce12d8f0d01e4c4083885a0c0077.asciidoc} | 2 +- .../8a1b6eae4893c5dd27b3d81fd8d70f5b.asciidoc | 2 +- .../8b8b6aac2111b2d8b93758ac737e6543.asciidoc | 31 ++++++ .../8bf1e7a6d529547906ba8b1d6501fa0c.asciidoc | 2 +- .../8c92c5e87facbae8dc4f58376ec21815.asciidoc | 2 +- ...8cad5d95a0e7c103f08be53d0b172558.asciidoc} | 8 +- .../8d05862be1f9e7edaba162b1888b5677.asciidoc | 61 ++++++++++ .../8d9b04f2a97f4229dec9e620126de049.asciidoc | 2 +- .../8de6fed6ba2b94ce6a12ce076be2b4d7.asciidoc | 2 +- .../8e2bbef535fef688d397e60e09aefa7f.asciidoc | 2 +- .../8e89fee0be6a436c4e3d7c152659c47e.asciidoc | 2 +- .../8f4a7f68f2ca3698abdf20026a2d8c5f.asciidoc | 2 +- .../90083d93e46fad2524755b8d4d1306fc.asciidoc | 2 +- .../9138550002cb26ab64918cce427963b8.asciidoc | 2 +- .../9169d19a80175ec94f80865d0f9bef4c.asciidoc | 43 +++++++ .../927b20a221f975b75d1227b67d0eb7e2.asciidoc | 2 +- .../92fa6608673cec5a2ed568a07e80d36b.asciidoc | 2 +- .../9313f534e1aa266cde7d4af74665497f.asciidoc | 13 +++ .../935566d5426d44ade486a49ec5289741.asciidoc | 2 +- .../9382f022086c692ba05efb0acae65946.asciidoc | 2 +- .../95414139c7b1203e3c2d99a354415801.asciidoc | 2 +- ...968fb5b92aa65af09544f7c002b0953e.asciidoc} | 4 +- .../971fd23adb81bb5842c7750e0379336a.asciidoc | 26 +++++ .../986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc | 2 +- .../99160b7c3c3fc1fac98aeb426dbcb3cb.asciidoc | 2 +- .../99803d7b111b862c0c82e9908e549b16.asciidoc | 2 +- .../998c8479c8704bca0e121d5969859517.asciidoc | 10 ++ .../9a05cc10eea1251e23b82a4549913536.asciidoc | 2 +- .../9a203aae3e1412d919546276fb52a5ca.asciidoc | 2 +- .../9ad0864bcd665b63551e944653d32423.asciidoc | 35 ++++++ .../9ae268058c0ea32ef8926568e011c728.asciidoc | 2 +- ...9aedc45f83e022732789e8d796f5a43c.asciidoc} | 1 - .../9b0f34d122a4b348dc86df7410d6ebb6.asciidoc | 2 +- .../9bd5a470ee6d2b4a1f5280adc39675d2.asciidoc | 35 ++++++ .../9c021836acf7c0370e289f611325868d.asciidoc | 2 +- ...9c2ce0132e4527077443f007d27b1158.asciidoc} | 14 ++- .../9cb150d67dfa0947f29aa809bcc93c6e.asciidoc | 2 +- .../9cbb097e5498a9fde39e3b1d3b62a4d2.asciidoc | 2 +- .../9d396afad93782699d7a929578c85284.asciidoc | 2 +- .../9d66cb59711f24e6b4ff85608c9b5a1b.asciidoc | 12 ++ .../9de4edafd22a8b9cb557632b2c8779cd.asciidoc | 2 +- .../9f16fca9813304e398ee052aa857dbcd.asciidoc | 2 +- .../9f3341489fefd38c4e439c29f6dcb86c.asciidoc | 2 +- .../a1070cf2f5969d42d71cda057223f152.asciidoc | 2 +- ...a1b668795243398f5bc40bcc9bead884.asciidoc} | 12 +- .../a1dda7e7c01be96a4acf7b725d70385f.asciidoc | 28 +++++ .../a2b2ce031120dac49b5120b26eea8758.asciidoc | 2 +- .../a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc | 32 ++++++ .../a3779f21f132787c48681bfb50453592.asciidoc | 34 ++++++ .../a4a3c3cd09efa75168dab90105afb2e9.asciidoc | 2 +- .../a4ee2214d621bcfaf768c46d21325958.asciidoc | 2 +- .../a594f05459d9eecc8050c73fc8da336f.asciidoc | 2 +- ...a5aeb2c8bdf91f6146026ec8edc476b6.asciidoc} | 14 ++- .../a75765e3fb130421dde6c3c2f12e8acb.asciidoc | 2 +- ...a769d696bf12f5e9de4b3250646d250c.asciidoc} | 2 +- ...a7d814caf2a995d2aeadecc3495011be.asciidoc} | 14 ++- ...a8dff54362184b2732b9bd248cf6df8a.asciidoc} | 14 ++- .../a9280b55a7284952f604ec7bece712f6.asciidoc | 2 +- ...a95ae76fca7c3e273e4bd10323b3caa6.asciidoc} | 2 +- .../a960b43e720b4934edb74ab4b085ca77.asciidoc | 2 +- .../a999b5661bebb802bbbfe04faacf1971.asciidoc | 2 +- .../aa676d54a59dee87ecd28bcc1edce59b.asciidoc | 2 +- .../aa814309ad5f1630886ba75255b444f5.asciidoc | 8 ++ .../aab810de3314d5e11bd564ea096785b8.asciidoc | 21 ++++ .../aaba346e0becdf12db13658296e0b8a1.asciidoc | 2 +- ...aad7d80990a6a3c391ff555ce09ae9dc.asciidoc} | 14 ++- .../ab24bfdfd8c1c7b3044b21a3b4684370.asciidoc | 2 +- ...ac5b91aa75696f9880451c9439fd9eec.asciidoc} | 14 ++- .../ac9fe9b64891095bcf84066f719b3dc4.asciidoc | 2 +- ...acc44366a9908684b2c8c2b119a4fb2b.asciidoc} | 14 ++- .../add82cbe7cd95c4be5ce1c9958f2f208.asciidoc | 20 ++++ .../ae3473adaf1515afcf7773f26c018e5c.asciidoc | 14 +++ .../afef5cac988592b97ae289ab39c2f437.asciidoc | 2 +- ...b0ee6f19875fe5bad8aab02d60e3532c.asciidoc} | 4 +- .../b0fe9a7c8e519995258786be4bef36c4.asciidoc | 2 +- .../b11a0675e49df0709be693297ca73a2c.asciidoc | 2 +- ...b3479ee4586c15020549afae58d94d65.asciidoc} | 14 ++- .../b3756e700d0f6c7e8919003bdf26bc8f.asciidoc | 2 +- ...b3cd07f02059165fd62a2f148be3dc58.asciidoc} | 12 +- ...b3f442a7d9eb391121dcab991787f9d6.asciidoc} | 12 +- .../b45a8c6fc746e9c90fd181e69a605fad.asciidoc | 2 +- .../b583bf8d3a2f49d633aa2cfed5606418.asciidoc | 2 +- .../b6a6aa9ba20e9a019371ae268488833f.asciidoc | 2 +- .../b6a7ffd2003c38f4aa321f067d162be5.asciidoc | 2 +- .../b6f690896001f8f9ad5bf24e1304a552.asciidoc | 2 +- .../b8400dbe39215705060500f0e569f452.asciidoc | 10 ++ .../b9ba66209b7fcc111a7bcef0b3e00052.asciidoc | 17 +++ ...ba650046f9063f6c43d76f47e0f94403.asciidoc} | 14 ++- .../bb5a67e3d2d9cd3016e487e627769fe8.asciidoc | 105 ++++++++++++++++++ .../bc01aee2ab2ce1690986374bd836e1c7.asciidoc | 20 ++++ .../bcc75fc01b45e482638c65b8fbdf09fa.asciidoc | 2 +- .../bcdfaa4487747249699a86a0dcd22f5e.asciidoc | 2 +- .../bd5bd5d8b3d81241335fe1e5747080ac.asciidoc | 2 +- .../bd68666ca2e0be12f7624016317a62bc.asciidoc | 2 +- ...bdaf00d791706d7fde25fd65d3735b94.asciidoc} | 14 ++- .../be5c5a9c25901737585e4fff9195da3c.asciidoc | 2 +- .../be9836fe55c5fada404a2adc1663d832.asciidoc | 2 +- .../befa73a8a419fcf3b7798548b54a20bf.asciidoc | 47 ++++++++ .../bf1de9fa1b825fa875d27fa08821a6d1.asciidoc | 2 +- .../c00c9412609832ebceb9e786dd9542df.asciidoc | 2 +- .../c02c2916b97b6fa7db82dbc7f0378310.asciidoc | 2 +- .../c067182d385f59ce5952fb9a716fbf05.asciidoc | 2 +- .../c0ddfb2e6315f5bcf0d3ef414b5bbed3.asciidoc | 2 +- .../c12d6e962f083c728f9397932f05202e.asciidoc | 2 +- .../c18100d62ed31bc9e05f62900156e6a8.asciidoc | 2 +- .../c186ecf6f799ddff7add1abdecea5821.asciidoc | 2 +- .../c21eb4bc30087188241cbba6b6b89999.asciidoc | 2 +- ...c26b185952ddf9842e18493aca2de147.asciidoc} | 2 +- .../c4607ca79b2bcde39305d6f4f21cad37.asciidoc | 2 +- .../c6339d09f85000a6432304b0ec63b8f6.asciidoc | 2 +- ...c793efe7280e9b6e09981c4d4f832348.asciidoc} | 14 ++- .../c8aa8e8c0ac160b8c4efd1ac3b9f48f3.asciidoc | 23 ++++ .../c8fa8d7e029792d539464fede18ce258.asciidoc | 18 +++ .../cc5eefcc2102aae7e87b0c87b4af10b8.asciidoc | 2 +- .../ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc | 77 +++++++++++++ .../cd38c601ab293a6ec0e2df71d0c96b58.asciidoc | 2 +- .../cdce7bc083dfb36e6f1d465a5c9d5049.asciidoc | 2 +- .../cde4104a29dfe942d55863cdd8718627.asciidoc | 2 +- .../ce2c2e8f5a2e4daf051b6e10122e5aae.asciidoc | 2 +- .../cedb56a71cc743d80263ce352bb21720.asciidoc | 2 +- .../cee491dd0a8d10ed0cb11a2faa0c99f0.asciidoc | 2 +- .../cf23f18761df33f08bc6f6d1875496fd.asciidoc | 2 +- .../d003f9110e5a474230abe11f36da9297.asciidoc | 2 +- .../d01a590fa9ea8a0cb34ed8dda502296c.asciidoc | 2 +- .../d03139a851888db53f8b7affd85eb495.asciidoc | 2 +- .../d04f0c8c44e8b4fb55f2e7d9d05977e7.asciidoc | 2 +- .../d1ea13e1e8372cbf1480a414723ff55a.asciidoc | 31 ++++++ .../d260225cf97e068ead2a8a6bb5aefd90.asciidoc | 2 +- ...d3440ec81dde5f1a01c0206cb35e539c.asciidoc} | 2 +- .../d3a5b70d493e0bd77b3f2b586341c83c.asciidoc | 2 +- .../d3e5edac5b461020017fd9d8ec7a91fa.asciidoc | 2 +- .../d603e76ab70131f7ec6b08758f95a0e3.asciidoc | 2 +- .../d69bd36335774c8ae1286cee21310241.asciidoc | 2 +- .../d6a21afa4a94b9baa734eac430940bcf.asciidoc | 2 +- .../d7919fb6f4d02dde1390775eb8365b79.asciidoc | 2 +- .../d98fb2ff2cdd154dff4a576430755d98.asciidoc | 2 +- .../dcee24dba43050e4b01b6e3a3211ce09.asciidoc | 2 +- .../dcf82f3aacae49c0bb4ccbc673f13e9f.asciidoc | 38 +++++++ .../dcfa7f479a33f459a2d222a92e651451.asciidoc | 2 +- ...dd3ee00ab2af607b32532180d60a41d4.asciidoc} | 4 +- ...dd7814258121d3c2e576a7f00469d7e3.asciidoc} | 2 +- .../dd792bb53703a57f9207e36d16e26255.asciidoc | 2 +- .../ddaadd91b7743a1c7e946ce1b593cd1b.asciidoc | 14 +++ .../dddb6a6ebd145f8411c5b4910d332f87.asciidoc | 2 +- .../df04e2e9af66d5e30b1bfdbd458cab13.asciidoc | 2 +- .../e017c2de6f93a8dd97f5c6e002dd5c4f.asciidoc | 28 +++++ .../e04267ffc50d916800b919c6cdc9622a.asciidoc | 13 +++ .../e0fcef99656799de6b88117d56f131e2.asciidoc | 2 +- .../e1d6ecab4148b09f4c605474157e7dbd.asciidoc | 2 +- .../e22a1da3c622611be6855e534c0709ae.asciidoc | 13 +++ .../e2a22c6fd58cc0becf4c383134a08f8b.asciidoc | 2 +- .../e308899a306e61d1a590868308689955.asciidoc | 36 ++++++ .../e3fe842951dc873d7d00c8f6a010c53f.asciidoc | 12 ++ .../e4b38973c74037335378d8480f1ce894.asciidoc | 44 ++++++++ .../e4b64b8277af259a52c8d3940157b5fa.asciidoc | 2 +- .../e4b6a6a921c97b4c0bbe97bd89f4cf33.asciidoc | 2 +- .../e4d1f01c025fb797a1d87f372760eabf.asciidoc | 2 +- .../e551ea38a2d8f8deac110b33304200cc.asciidoc | 2 +- ...e9625da419bff6470ffd9927c59ca159.asciidoc} | 2 +- .../e9a0b450af6219772631703d602c7092.asciidoc | 2 +- .../e9fc47015922d51c2b05e502ce9c622e.asciidoc | 2 +- .../eb54506fbc71a7d250e86b22d0600114.asciidoc | 2 +- .../eb96d7dd5f3116a50f7a86b729f1a934.asciidoc | 2 +- .../eb9a41f7fc8bdf5559bb9db822ae3a65.asciidoc | 2 +- .../ecfd0d94dd14ef05dfa861f22544b388.asciidoc | 2 +- .../ee223e604bb695cad2517d28ae63ac34.asciidoc | 2 +- .../eee6110831c08b9c1b3f56b24656e95b.asciidoc | 2 +- .../ef643bab44e7de6ddddde23a2eece5c7.asciidoc | 17 +++ .../ef9c29759459904fef162acd223462c4.asciidoc | 2 +- ...f03352bb1129938a89f97e4b650038dd.asciidoc} | 2 +- .../f097c02541056f3c0fc855e7bbeef8a8.asciidoc | 2 +- .../f160561efab38e40c2feebf5a2542ab5.asciidoc | 2 +- .../f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc | 2 +- .../f1bf0c03581b79c3324cfa3246a60e4d.asciidoc | 22 ++++ .../f298c4eb50ea97b34c57f8756eb350d3.asciidoc | 2 +- .../f29a28fffa7ec604a33a838f48f7ea79.asciidoc | 2 +- .../f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc | 2 +- .../f321d4e92aa83d573ecf52bf56b0b774.asciidoc | 16 +++ .../f3fb52680482925c202c2e2f8af6f044.asciidoc | 2 +- .../f45990264f8755b96b11c69c12c90ff4.asciidoc | 2 +- .../f57ce7de0946e9416ddb9150e95f4b74.asciidoc | 2 +- .../f625fdbbe78c4198d9e40b35f3f008b3.asciidoc | 14 +++ .../f6566395f85d3afe917228643d7318d6.asciidoc | 2 +- .../f679e414de48b8fe25e458844be05618.asciidoc | 13 +++ .../f6ead39c5505045543b9225deca7367d.asciidoc | 2 +- .../f6f647eb644a2d236637ff05f833cb73.asciidoc | 12 ++ .../f785b5d17eb59f8d2a353c2dee66eb5b.asciidoc | 2 +- ...f7b20e4bb8366f6d2e4486f3bf4211bc.asciidoc} | 2 +- ...f86337e13526c968848cfe29a52d658f.asciidoc} | 2 +- .../f8f960550104c33e00dc78bc8723ccef.asciidoc | 10 ++ ...f95a4d7ab02bf400246c8822f0245f02.asciidoc} | 2 +- ...f9bad6fd369764185e1cb09b89ee39cc.asciidoc} | 14 ++- ...fb0152f6c70f647a8b6709969113486d.asciidoc} | 14 ++- .../fb56c2ac77d4c308d7702b6b33698382.asciidoc | 12 ++ .../fc1907515f6a913884a9f86451e90ee8.asciidoc | 2 +- .../fd620f09dbce62c6f0f603a366623607.asciidoc | 2 +- .../fd7eeadab6251d9113c4380a7fbe2572.asciidoc | 2 +- ...fe6429d0d82174aa5acf95e96e237380.asciidoc} | 14 ++- .../ff1b96d2fdcf628bd938bff9e939943c.asciidoc | 2 +- .../ff27e5cddd1f58d8a8f84f807fd27eec.asciidoc | 2 +- .../ff776c0fccf93e1c7050f7cb7efbae0b.asciidoc | 2 +- docs/guide/release-notes.asciidoc | 34 +++++- elasticsearch/_version.py | 2 +- pyproject.toml | 2 +- .../generate-docs-examples/package-lock.json | 8 +- 454 files changed, 3278 insertions(+), 617 deletions(-) rename docs/examples/{77113c65e1755313183a8969233a5a07.asciidoc => 01ae196538fac197eedbbf458a4ef31b.asciidoc} (70%) create mode 100644 docs/examples/01cd0ea360282a2c591a366679d7187d.asciidoc rename docs/examples/{9868ce609f4450702934fcbf4c340bf1.asciidoc => 05e637284bc3bedd46e0b7c26ad983c4.asciidoc} (90%) rename docs/examples/{611c1e05f4ebb48a1a8c8488238ce34d.asciidoc => 0709a38613d2de90d418ce12b36af30e.asciidoc} (74%) rename docs/examples/{840f8c863c30b04abcf2dd66b846f157.asciidoc => 083b92e8ea264e49bf9fd40fc6a3094b.asciidoc} (86%) create mode 100644 docs/examples/0bee07a581c5776e068f6f4efad5a399.asciidoc create mode 100644 docs/examples/0c8be7aec84ea86b243904f5d4162f5a.asciidoc rename docs/examples/{ce13afc0c976c5e1f424b58e0c97fd64.asciidoc => 0d30077cd34e93377a3a86f2ebd69415.asciidoc} (80%) create mode 100644 docs/examples/0e31b8ad176b31028becf9500989bcbd.asciidoc create mode 100644 docs/examples/11be807bdeaeecc8174dec88e0851ea7.asciidoc create mode 100644 docs/examples/12e9e758f7f18a6cbf27e9d0aea57a19.asciidoc create mode 100644 docs/examples/1522a9297151d7046e6345b9b27539ca.asciidoc create mode 100644 docs/examples/15f769bbd7b5fddeb3353ae726b71b14.asciidoc create mode 100644 docs/examples/16a7ce08b4a6b3af269f27eecc71d664.asciidoc create mode 100644 docs/examples/17316a81c9dbdd120b7754116bf0461c.asciidoc delete mode 100644 docs/examples/1745ac9e6d22a2ffe7ac381f9ba238f9.asciidoc delete mode 100644 docs/examples/18de6782bd18f4a9baec2feec8c02a8b.asciidoc create mode 100644 docs/examples/191074b2eebd5f74e628c2ada4b6d2e4.asciidoc create mode 100644 docs/examples/193704020a19714dec390452a4e75e8d.asciidoc delete mode 100644 docs/examples/19d60e4890cc57151d596326484d9076.asciidoc create mode 100644 docs/examples/1a7483796087053ba55029d0dc2ab356.asciidoc rename docs/examples/{be30ea12f605fd61acba689b68e00bbe.asciidoc => 1c330f0fc9eac19d0edeb8c4017b9b93.asciidoc} (91%) create mode 100644 docs/examples/1c9dac4183a3532c91dbd1a46907729b.asciidoc rename docs/examples/{7c63a1d2fbec5283e913ff39fafd0604.asciidoc => 1cbecd19be22979aefb45b4f160e77ea.asciidoc} (91%) delete mode 100644 docs/examples/1d918e206ad8dab916e59183da24d9ec.asciidoc create mode 100644 docs/examples/1fb2c77c0988bc6545040b20e3afa7e9.asciidoc rename docs/examples/{3fab530a2e43807929c0ef3ebf7d268c.asciidoc => 216e24f05cbb82c1718713fbab8623d2.asciidoc} (87%) rename docs/examples/{983a867c90e63e070518f2f709f659ee.asciidoc => 21d41e8cbd107fbdf0901f885834dafc.asciidoc} (67%) rename docs/examples/{dfcdadcf91529d3a399e05684195028e.asciidoc => 2968ffb8135f77ba3a9b876dd4918119.asciidoc} (85%) create mode 100644 docs/examples/2c079d1ae4819a0c206b9e1aa5623523.asciidoc create mode 100644 docs/examples/2c86840a46242a38cf82024a9321be46.asciidoc create mode 100644 docs/examples/2d0244c020075595acb625aa5ba8f455.asciidoc create mode 100644 docs/examples/2e7844477b41fcfa9efefee4ec0e7101.asciidoc delete mode 100644 docs/examples/2f67db5e4d6c958258c3d70fb2d0b1c8.asciidoc rename docs/examples/{5f8d90515995a5eee189d722abe3b111.asciidoc => 310bdfb0d0d75bac7bff036a3fe51d4d.asciidoc} (91%) rename docs/examples/{d3a558ef226e9dccc1c7c61e1167547f.asciidoc => 334811cfceb6858aeec5b3461717dd63.asciidoc} (85%) create mode 100644 docs/examples/339c4e5af9f9069ad9912aa574488b59.asciidoc rename docs/examples/{65e892a362d940e4a74965f21c15ca09.asciidoc => 36ac0ef9ea63efc431580f7ade8ad53c.asciidoc} (86%) create mode 100644 docs/examples/3a489743e49902df38e3368cae00717a.asciidoc rename docs/examples/{60d689aae3f8de1e6830329dfd69a6a6.asciidoc => 3a4953663a5a3809b692c27446e16b7f.asciidoc} (85%) rename docs/examples/{d1a285aa244ec461d68f13e7078a33c0.asciidoc => 3ab8f65fcb55a0e3664c55749ec41efd.asciidoc} (93%) delete mode 100644 docs/examples/3b6718257421b5419bf4cd6a7303c57e.asciidoc create mode 100644 docs/examples/40f287bf733420bbab134b74c7d0ea5d.asciidoc create mode 100644 docs/examples/43d9e314431336a6f084cea76dfd6489.asciidoc create mode 100644 docs/examples/44198781d164a15be633d4469485a544.asciidoc create mode 100644 docs/examples/49a19615ebe2c013b8321152163478ab.asciidoc create mode 100644 docs/examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc create mode 100644 docs/examples/4da0cb8693e9ceceee2ba3b558014bbf.asciidoc rename docs/examples/{a53ff77d83222c0e76453e630d64787e.asciidoc => 4dc151eebefd484a28aed1a175743364.asciidoc} (92%) rename docs/examples/{c9373ff5ed6b026173428fbb92ca2d9f.asciidoc => 4eeded40f30949e359714a5bb6c88612.asciidoc} (87%) create mode 100644 docs/examples/4f6694ef147a73b1163bde3c13779d26.asciidoc delete mode 100644 docs/examples/50ddf374cfa8128538ea092ee98b723d.asciidoc create mode 100644 docs/examples/529671ffaf7cc75fe83a81d729788be4.asciidoc delete mode 100644 docs/examples/533087d787b48878a0bf3fa8d0851b64.asciidoc rename docs/examples/{3a7a6ab88a49b484fafb10c8eb09b562.asciidoc => 548a9b6f447bb820380c1c23e57c18c3.asciidoc} (92%) create mode 100644 docs/examples/54a47b5d07e7bfbea75c77f35eaae18d.asciidoc create mode 100644 docs/examples/55085e6a2891040b6ac696561d0787c8.asciidoc rename docs/examples/{b468d0124dc485385a34504d5b7af82a.asciidoc => 551467688d8c701315d0a371850a4056.asciidoc} (85%) rename docs/examples/{97f260817b60f3deb7f7034d7dee7e12.asciidoc => 551799fef2f86e393db83a967e4a30d1.asciidoc} (75%) rename docs/examples/{1b60ad542abb511cbd926ac8c55b609c.asciidoc => 565386eee0951865a684e41fab53b40c.asciidoc} (84%) rename docs/examples/{5db5349162a4fbe74bffb646926a2495.asciidoc => 56da252798b8e7b006738428aa1a7f4c.asciidoc} (67%) create mode 100644 docs/examples/58dd26afc919722e21358c91e112b27a.asciidoc create mode 100644 docs/examples/58f6b72009512851843c7b7a20e9504a.asciidoc rename docs/examples/{804cdf477ec829740e3d045140400c3b.asciidoc => 5a70db31f587b7ffed5e9bc1445430cb.asciidoc} (86%) create mode 100644 docs/examples/5bba213a7f543190139d1a69ab2ed076.asciidoc create mode 100644 docs/examples/5ceb734e3affe00e2cdc29af748d95bf.asciidoc rename docs/examples/{0bef1fdefeb2956d60d52d3f38397cad.asciidoc => 5cf12cc4f98d98dc79bead7e6556679c.asciidoc} (55%) rename docs/examples/{1d827ae674970692643ea81991e5396e.asciidoc => 5daf8ede198be9b118da5bee9896cb00.asciidoc} (75%) rename docs/examples/{4b113c7f475cfe484a150ddbb8e6c5c7.asciidoc => 5e021307d331a4483a5aa2198168451b.asciidoc} (61%) create mode 100644 docs/examples/60d3f9a99cc91b43aaa7524a9a74dba0.asciidoc create mode 100644 docs/examples/6329fb2840a4373ff6d342f2653247cb.asciidoc create mode 100644 docs/examples/63a53fcb0717ae9033a679cbfc932851.asciidoc create mode 100644 docs/examples/640dbeecb736bd25f6f2b392b76a7531.asciidoc delete mode 100644 docs/examples/67154a4837cf996a9a9c3e61d6e9d1b3.asciidoc create mode 100644 docs/examples/69541f0bb81ab3797926bb2a00607cda.asciidoc rename docs/examples/{a69c7c3412af73758f629e76263063b5.asciidoc => 6b6e275efe3d2aafe0fc3443f2c96868.asciidoc} (84%) rename docs/examples/{10535507a9735fcf06600444b9067d4c.asciidoc => 6cb1dae368c945ecf7c9ec332a5743a2.asciidoc} (74%) create mode 100644 docs/examples/6e6b78e6b689a5d6aa637271b6d084e2.asciidoc create mode 100644 docs/examples/6f8bdca97e43aac75e32de655aa4314a.asciidoc create mode 100644 docs/examples/7163346755400594d1dd7e445aa19ff0.asciidoc rename docs/examples/{8bf51fd50195b46bacbf872f460ebec2.asciidoc => 71998bb300ac2a58419b0772cdc1c586.asciidoc} (70%) rename docs/examples/{a4ec42130f3c75fc9d1d5f7cb6222cd5.asciidoc => 745864ef2427188241a4702b94ea57be.asciidoc} (94%) rename docs/examples/{35b686d9d9e915d0dea7a4251781767d.asciidoc => 750ac969f9a05567f5cdf4f93d6244b6.asciidoc} (95%) create mode 100644 docs/examples/76c73b54f3f1e5cb1c0fcccd7c3fd18e.asciidoc create mode 100644 docs/examples/77082b1ffaae9ac52dfc133fa597baa7.asciidoc rename docs/examples/{84490ee2c6c07dbd2101ce2e3751e1aa.asciidoc => 7888c509774a2abfe82ca370c43d8789.asciidoc} (86%) create mode 100644 docs/examples/79ff4e7fa5c004226d05d7e2bfb5dc1e.asciidoc create mode 100644 docs/examples/7a27336a61284d079f3cc3994cf927d1.asciidoc delete mode 100644 docs/examples/7a32f44a1511ecb0d3f0b0ff2aca5c44.asciidoc create mode 100644 docs/examples/7af1f62b0cf496cbf593d83d30b472cc.asciidoc create mode 100644 docs/examples/7b9691bd34a02dd859562eb927f175e0.asciidoc delete mode 100644 docs/examples/7c8f207e43115ea8f20d2298be5aaebc.asciidoc rename docs/examples/{f3574cfee3971d98417b8dc574a91be0.asciidoc => 7d3a74fe0ba3fe95d1c3275365ff9315.asciidoc} (76%) create mode 100644 docs/examples/7db09cab02d71f3a10d91071216d80fc.asciidoc create mode 100644 docs/examples/7db798942cf2d334456e30ef5fcb801b.asciidoc create mode 100644 docs/examples/7f1fade93225f8cf6000b93334d76ce4.asciidoc rename docs/examples/{981b331db1404b39c1a612a135e4e76d.asciidoc => 8080cd9e24a8785728ce7c372ec4acf1.asciidoc} (50%) create mode 100644 docs/examples/828f0045747fde4888a947bb99e190e3.asciidoc rename docs/examples/{9aa2327ae315c39f2bce2bd22e0deb1b.asciidoc => 8417d8d35ec5fc5665dfb2f95d6d1101.asciidoc} (92%) create mode 100644 docs/examples/84ef9fe951c6d3caa7438238a5b23319.asciidoc create mode 100644 docs/examples/853fc710cea79fb4e1a85fb6d149f9c5.asciidoc create mode 100644 docs/examples/85f9fc6f98e8573efed9b034e853d5ae.asciidoc rename docs/examples/{113ac8466084ee6ac4ed272e342dc468.asciidoc => 894fce12d8f0d01e4c4083885a0c0077.asciidoc} (86%) create mode 100644 docs/examples/8b8b6aac2111b2d8b93758ac737e6543.asciidoc rename docs/examples/{bdb671866e2f0195f8dfbdb7f20bf591.asciidoc => 8cad5d95a0e7c103f08be53d0b172558.asciidoc} (56%) create mode 100644 docs/examples/8d05862be1f9e7edaba162b1888b5677.asciidoc create mode 100644 docs/examples/9169d19a80175ec94f80865d0f9bef4c.asciidoc create mode 100644 docs/examples/9313f534e1aa266cde7d4af74665497f.asciidoc rename docs/examples/{7e5bee18e61d950e823782da1b733903.asciidoc => 968fb5b92aa65af09544f7c002b0953e.asciidoc} (87%) create mode 100644 docs/examples/971fd23adb81bb5842c7750e0379336a.asciidoc create mode 100644 docs/examples/998c8479c8704bca0e121d5969859517.asciidoc create mode 100644 docs/examples/9ad0864bcd665b63551e944653d32423.asciidoc rename docs/examples/{2cd8439db5054c93c49f1bf50433e1bb.asciidoc => 9aedc45f83e022732789e8d796f5a43c.asciidoc} (96%) create mode 100644 docs/examples/9bd5a470ee6d2b4a1f5280adc39675d2.asciidoc rename docs/examples/{5b86d54900e2c4c043a54ca7ae2df0f0.asciidoc => 9c2ce0132e4527077443f007d27b1158.asciidoc} (67%) create mode 100644 docs/examples/9d66cb59711f24e6b4ff85608c9b5a1b.asciidoc rename docs/examples/{f38262ef72f73816ec35fa4c9c85760d.asciidoc => a1b668795243398f5bc40bcc9bead884.asciidoc} (81%) create mode 100644 docs/examples/a1dda7e7c01be96a4acf7b725d70385f.asciidoc create mode 100644 docs/examples/a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc create mode 100644 docs/examples/a3779f21f132787c48681bfb50453592.asciidoc rename docs/examples/{14a49c13c399840e64c00b487aa820c9.asciidoc => a5aeb2c8bdf91f6146026ec8edc476b6.asciidoc} (67%) rename docs/examples/{82eff1d681a5d0d1538ef011bb32ab9a.asciidoc => a769d696bf12f5e9de4b3250646d250c.asciidoc} (83%) rename docs/examples/{2826510e4aeb1c0d8dc43d317ed7624a.asciidoc => a7d814caf2a995d2aeadecc3495011be.asciidoc} (67%) rename docs/examples/{794d9a321b944347d2a8834a07b5eb22.asciidoc => a8dff54362184b2732b9bd248cf6df8a.asciidoc} (66%) rename docs/examples/{0ba5acede9d43af424e85428e7d35420.asciidoc => a95ae76fca7c3e273e4bd10323b3caa6.asciidoc} (91%) create mode 100644 docs/examples/aa814309ad5f1630886ba75255b444f5.asciidoc create mode 100644 docs/examples/aab810de3314d5e11bd564ea096785b8.asciidoc rename docs/examples/{0e5db64154a722a5cbdb84b588ce2ce8.asciidoc => aad7d80990a6a3c391ff555ce09ae9dc.asciidoc} (65%) rename docs/examples/{f9cb2547ab04461a12bfd25a35be5f96.asciidoc => ac5b91aa75696f9880451c9439fd9eec.asciidoc} (73%) rename docs/examples/{8f0a3d7b5fbdf5351750a23c493cc078.asciidoc => acc44366a9908684b2c8c2b119a4fb2b.asciidoc} (56%) create mode 100644 docs/examples/add82cbe7cd95c4be5ce1c9958f2f208.asciidoc create mode 100644 docs/examples/ae3473adaf1515afcf7773f26c018e5c.asciidoc rename docs/examples/{b577e7e7eb5ce9d16cb582356e2cc45c.asciidoc => b0ee6f19875fe5bad8aab02d60e3532c.asciidoc} (85%) rename docs/examples/{c1bb395546102279296534522061829f.asciidoc => b3479ee4586c15020549afae58d94d65.asciidoc} (71%) rename docs/examples/{36063ff9a318dba7bb0be3a230655dc8.asciidoc => b3cd07f02059165fd62a2f148be3dc58.asciidoc} (73%) rename docs/examples/{51390ca10aa22d7104e8970f09ea4512.asciidoc => b3f442a7d9eb391121dcab991787f9d6.asciidoc} (73%) create mode 100644 docs/examples/b8400dbe39215705060500f0e569f452.asciidoc create mode 100644 docs/examples/b9ba66209b7fcc111a7bcef0b3e00052.asciidoc rename docs/examples/{fe6a21b4a6b33cd6abc522947d6f3ea2.asciidoc => ba650046f9063f6c43d76f47e0f94403.asciidoc} (67%) create mode 100644 docs/examples/bb5a67e3d2d9cd3016e487e627769fe8.asciidoc create mode 100644 docs/examples/bc01aee2ab2ce1690986374bd836e1c7.asciidoc rename docs/examples/{63d1c07d22a3ca3b0ec6d950547c011c.asciidoc => bdaf00d791706d7fde25fd65d3735b94.asciidoc} (67%) create mode 100644 docs/examples/befa73a8a419fcf3b7798548b54a20bf.asciidoc rename docs/examples/{8575c966b004fb124c7afd6bb5827b50.asciidoc => c26b185952ddf9842e18493aca2de147.asciidoc} (86%) rename docs/examples/{e566e898902e432bc7ea0568400f0c50.asciidoc => c793efe7280e9b6e09981c4d4f832348.asciidoc} (71%) create mode 100644 docs/examples/c8aa8e8c0ac160b8c4efd1ac3b9f48f3.asciidoc create mode 100644 docs/examples/c8fa8d7e029792d539464fede18ce258.asciidoc create mode 100644 docs/examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc create mode 100644 docs/examples/d1ea13e1e8372cbf1480a414723ff55a.asciidoc rename docs/examples/{443f0e8fbba83777b2df624879d188d5.asciidoc => d3440ec81dde5f1a01c0206cb35e539c.asciidoc} (85%) create mode 100644 docs/examples/dcf82f3aacae49c0bb4ccbc673f13e9f.asciidoc rename docs/examples/{3ff634a50e2e4556bad7ea8553576992.asciidoc => dd3ee00ab2af607b32532180d60a41d4.asciidoc} (85%) rename docs/examples/{0e3abd15dde97a2334621190c4ad4f96.asciidoc => dd7814258121d3c2e576a7f00469d7e3.asciidoc} (92%) create mode 100644 docs/examples/ddaadd91b7743a1c7e946ce1b593cd1b.asciidoc create mode 100644 docs/examples/e017c2de6f93a8dd97f5c6e002dd5c4f.asciidoc create mode 100644 docs/examples/e04267ffc50d916800b919c6cdc9622a.asciidoc create mode 100644 docs/examples/e22a1da3c622611be6855e534c0709ae.asciidoc create mode 100644 docs/examples/e308899a306e61d1a590868308689955.asciidoc create mode 100644 docs/examples/e3fe842951dc873d7d00c8f6a010c53f.asciidoc create mode 100644 docs/examples/e4b38973c74037335378d8480f1ce894.asciidoc rename docs/examples/{4655c3dea0c61935b7ecf1e57441df66.asciidoc => e9625da419bff6470ffd9927c59ca159.asciidoc} (79%) create mode 100644 docs/examples/ef643bab44e7de6ddddde23a2eece5c7.asciidoc rename docs/examples/{349823d86980d40ac45248c19a59e339.asciidoc => f03352bb1129938a89f97e4b650038dd.asciidoc} (91%) create mode 100644 docs/examples/f1bf0c03581b79c3324cfa3246a60e4d.asciidoc create mode 100644 docs/examples/f321d4e92aa83d573ecf52bf56b0b774.asciidoc create mode 100644 docs/examples/f625fdbbe78c4198d9e40b35f3f008b3.asciidoc create mode 100644 docs/examples/f679e414de48b8fe25e458844be05618.asciidoc create mode 100644 docs/examples/f6f647eb644a2d236637ff05f833cb73.asciidoc rename docs/examples/{f8525c2460a577edfef156c13f55b8a7.asciidoc => f7b20e4bb8366f6d2e4486f3bf4211bc.asciidoc} (94%) rename docs/examples/{517d291044c3e4448b8804322616ab4a.asciidoc => f86337e13526c968848cfe29a52d658f.asciidoc} (92%) create mode 100644 docs/examples/f8f960550104c33e00dc78bc8723ccef.asciidoc rename docs/examples/{1af9742c71ce0587cd49a73ec7fc1f6c.asciidoc => f95a4d7ab02bf400246c8822f0245f02.asciidoc} (92%) rename docs/examples/{1e0f203aced9344382081ab095c44dde.asciidoc => f9bad6fd369764185e1cb09b89ee39cc.asciidoc} (70%) rename docs/examples/{633c8a9fc57268979d8735c557705809.asciidoc => fb0152f6c70f647a8b6709969113486d.asciidoc} (69%) create mode 100644 docs/examples/fb56c2ac77d4c308d7702b6b33698382.asciidoc rename docs/examples/{1fcc4a3280be399753dcfd5c489ff682.asciidoc => fe6429d0d82174aa5acf95e96e237380.asciidoc} (70%) diff --git a/docs/examples/00fea15cbca83be9d5f1a024ff2ec708.asciidoc b/docs/examples/00fea15cbca83be9d5f1a024ff2ec708.asciidoc index b7885f45d..820bb804e 100644 --- a/docs/examples/00fea15cbca83be9d5f1a024ff2ec708.asciidoc +++ b/docs/examples/00fea15cbca83be9d5f1a024ff2ec708.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-elasticsearch.asciidoc:112 +// inference/service-elasticsearch.asciidoc:163 [source, python] ---- diff --git a/docs/examples/77113c65e1755313183a8969233a5a07.asciidoc b/docs/examples/01ae196538fac197eedbbf458a4ef31b.asciidoc similarity index 70% rename from docs/examples/77113c65e1755313183a8969233a5a07.asciidoc rename to docs/examples/01ae196538fac197eedbbf458a4ef31b.asciidoc index 2bce7f079..e087a80ec 100644 --- a/docs/examples/77113c65e1755313183a8969233a5a07.asciidoc +++ b/docs/examples/01ae196538fac197eedbbf458a4ef31b.asciidoc @@ -1,14 +1,20 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/keyword.asciidoc:249 +// mapping/types/keyword.asciidoc:260 [source, python] ---- resp = client.indices.create( index="idx", + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, mappings={ - "_source": { - "mode": "synthetic" - }, "properties": { "kwd": { "type": "keyword", diff --git a/docs/examples/01b23f09d2b7f140faf649eadbbf3ac3.asciidoc b/docs/examples/01b23f09d2b7f140faf649eadbbf3ac3.asciidoc index b3dcd8bc1..b0675f45b 100644 --- a/docs/examples/01b23f09d2b7f140faf649eadbbf3ac3.asciidoc +++ b/docs/examples/01b23f09d2b7f140faf649eadbbf3ac3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/index-templates.asciidoc:83 +// indices/index-templates.asciidoc:84 [source, python] ---- diff --git a/docs/examples/01cd0ea360282a2c591a366679d7187d.asciidoc b/docs/examples/01cd0ea360282a2c591a366679d7187d.asciidoc new file mode 100644 index 000000000..79f5d0185 --- /dev/null +++ b/docs/examples/01cd0ea360282a2c591a366679d7187d.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// troubleshooting/common-issues/task-queue-backlog.asciidoc:60 + +[source, python] +---- +resp = client.tasks.list( + human=True, + detailed=True, + actions="indices:data/write/bulk", +) +print(resp) +---- diff --git a/docs/examples/03891265df2111a38e0b6b24c1b967e1.asciidoc b/docs/examples/03891265df2111a38e0b6b24c1b967e1.asciidoc index f46691025..4c265f7ad 100644 --- a/docs/examples/03891265df2111a38e0b6b24c1b967e1.asciidoc +++ b/docs/examples/03891265df2111a38e0b6b24c1b967e1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-service-accounts.asciidoc:267 +// rest-api/security/get-service-accounts.asciidoc:299 [source, python] ---- diff --git a/docs/examples/04412d11783dac25b5fd2ec5407078a3.asciidoc b/docs/examples/04412d11783dac25b5fd2ec5407078a3.asciidoc index 639dc270c..223f90baf 100644 --- a/docs/examples/04412d11783dac25b5fd2ec5407078a3.asciidoc +++ b/docs/examples/04412d11783dac25b5fd2ec5407078a3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-api-key-id-api.asciidoc:86 +// connector/apis/update-connector-api-key-id-api.asciidoc:87 [source, python] ---- diff --git a/docs/examples/046b2249bbc49e77848c114cee940f17.asciidoc b/docs/examples/046b2249bbc49e77848c114cee940f17.asciidoc index 0289db8e2..4a3b96176 100644 --- a/docs/examples/046b2249bbc49e77848c114cee940f17.asciidoc +++ b/docs/examples/046b2249bbc49e77848c114cee940f17.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/text-expansion-query.asciidoc:157 +// query-dsl/text-expansion-query.asciidoc:164 [source, python] ---- diff --git a/docs/examples/04de2e3a9c00c2056b07bf9cf9e63a99.asciidoc b/docs/examples/04de2e3a9c00c2056b07bf9cf9e63a99.asciidoc index 078edad78..9e4fffd1f 100644 --- a/docs/examples/04de2e3a9c00c2056b07bf9cf9e63a99.asciidoc +++ b/docs/examples/04de2e3a9c00c2056b07bf9cf9e63a99.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-google-vertex-ai.asciidoc:107 +// inference/service-google-vertex-ai.asciidoc:127 [source, python] ---- diff --git a/docs/examples/9868ce609f4450702934fcbf4c340bf1.asciidoc b/docs/examples/05e637284bc3bedd46e0b7c26ad983c4.asciidoc similarity index 90% rename from docs/examples/9868ce609f4450702934fcbf4c340bf1.asciidoc rename to docs/examples/05e637284bc3bedd46e0b7c26ad983c4.asciidoc index af6cc83ab..cea3d0eb3 100644 --- a/docs/examples/9868ce609f4450702934fcbf4c340bf1.asciidoc +++ b/docs/examples/05e637284bc3bedd46e0b7c26ad983c4.asciidoc @@ -4,7 +4,7 @@ [source, python] ---- resp = client.ingest.put_pipeline( - id="alibabacloud_ai_search_embeddings", + id="alibabacloud_ai_search_embeddings_pipeline", processors=[ { "inference": { diff --git a/docs/examples/611c1e05f4ebb48a1a8c8488238ce34d.asciidoc b/docs/examples/0709a38613d2de90d418ce12b36af30e.asciidoc similarity index 74% rename from docs/examples/611c1e05f4ebb48a1a8c8488238ce34d.asciidoc rename to docs/examples/0709a38613d2de90d418ce12b36af30e.asciidoc index c03987dcd..719d89389 100644 --- a/docs/examples/611c1e05f4ebb48a1a8c8488238ce34d.asciidoc +++ b/docs/examples/0709a38613d2de90d418ce12b36af30e.asciidoc @@ -3,8 +3,6 @@ [source, python] ---- -resp = client.cluster.reroute( - metric="none", -) +resp = client.cluster.reroute() print(resp) ---- diff --git a/docs/examples/0737ebaea33631f001fb3f4226948492.asciidoc b/docs/examples/0737ebaea33631f001fb3f4226948492.asciidoc index 7f7af688a..8b47a2a5b 100644 --- a/docs/examples/0737ebaea33631f001fb3f4226948492.asciidoc +++ b/docs/examples/0737ebaea33631f001fb3f4226948492.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/processors/geoip.asciidoc:235 +// ingest/processors/geoip.asciidoc:237 [source, python] ---- diff --git a/docs/examples/840f8c863c30b04abcf2dd66b846f157.asciidoc b/docs/examples/083b92e8ea264e49bf9fd40fc6a3094b.asciidoc similarity index 86% rename from docs/examples/840f8c863c30b04abcf2dd66b846f157.asciidoc rename to docs/examples/083b92e8ea264e49bf9fd40fc6a3094b.asciidoc index 2ab4931b0..1407a4b6c 100644 --- a/docs/examples/840f8c863c30b04abcf2dd66b846f157.asciidoc +++ b/docs/examples/083b92e8ea264e49bf9fd40fc6a3094b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-elasticsearch.asciidoc:172 +// inference/service-elasticsearch.asciidoc:223 [source, python] ---- @@ -14,6 +14,7 @@ resp = client.inference.put( "min_number_of_allocations": 3, "max_number_of_allocations": 10 }, + "num_threads": 1, "model_id": ".multilingual-e5-small" } }, diff --git a/docs/examples/095e3f21941a9cc75f398389a075152d.asciidoc b/docs/examples/095e3f21941a9cc75f398389a075152d.asciidoc index 01469ab8e..00532f55c 100644 --- a/docs/examples/095e3f21941a9cc75f398389a075152d.asciidoc +++ b/docs/examples/095e3f21941a9cc75f398389a075152d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/infer-trained-model.asciidoc:1043 +// ml/trained-models/apis/infer-trained-model.asciidoc:1144 [source, python] ---- diff --git a/docs/examples/09769561f082b50558fb7d8707719963.asciidoc b/docs/examples/09769561f082b50558fb7d8707719963.asciidoc index 9eb29e774..9aa9cba6e 100644 --- a/docs/examples/09769561f082b50558fb7d8707719963.asciidoc +++ b/docs/examples/09769561f082b50558fb7d8707719963.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/nodes-stats.asciidoc:2578 +// cluster/nodes-stats.asciidoc:2582 [source, python] ---- diff --git a/docs/examples/09cb1b18bf4033b4afafb25bd3dab12c.asciidoc b/docs/examples/09cb1b18bf4033b4afafb25bd3dab12c.asciidoc index 6e910f0cc..7ed47659e 100644 --- a/docs/examples/09cb1b18bf4033b4afafb25bd3dab12c.asciidoc +++ b/docs/examples/09cb1b18bf4033b4afafb25bd3dab12c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/rule-query.asciidoc:65 +// query-dsl/rule-query.asciidoc:71 [source, python] ---- diff --git a/docs/examples/0a650401134f07e40216f0d0d1a66a32.asciidoc b/docs/examples/0a650401134f07e40216f0d0d1a66a32.asciidoc index e9aa27856..c9a221494 100644 --- a/docs/examples/0a650401134f07e40216f0d0d1a66a32.asciidoc +++ b/docs/examples/0a650401134f07e40216f0d0d1a66a32.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/allocation.asciidoc:119 +// cat/allocation.asciidoc:120 [source, python] ---- diff --git a/docs/examples/0ad8edd10542ec2c4d5d8700d7e2ba97.asciidoc b/docs/examples/0ad8edd10542ec2c4d5d8700d7e2ba97.asciidoc index 25df3cdc2..0941c559f 100644 --- a/docs/examples/0ad8edd10542ec2c4d5d8700d7e2ba97.asciidoc +++ b/docs/examples/0ad8edd10542ec2c4d5d8700d7e2ba97.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-amazon-bedrock.asciidoc:136 +// inference/service-amazon-bedrock.asciidoc:156 [source, python] ---- diff --git a/docs/examples/0ade87c8cb0e3c188d2e3dce279d5cc2.asciidoc b/docs/examples/0ade87c8cb0e3c188d2e3dce279d5cc2.asciidoc index bf81f55e9..d92db47de 100644 --- a/docs/examples/0ade87c8cb0e3c188d2e3dce279d5cc2.asciidoc +++ b/docs/examples/0ade87c8cb0e3c188d2e3dce279d5cc2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-filtering-api.asciidoc:115 +// connector/apis/update-connector-filtering-api.asciidoc:116 [source, python] ---- diff --git a/docs/examples/0bee07a581c5776e068f6f4efad5a399.asciidoc b/docs/examples/0bee07a581c5776e068f6f4efad5a399.asciidoc new file mode 100644 index 000000000..3e564fc3a --- /dev/null +++ b/docs/examples/0bee07a581c5776e068f6f4efad5a399.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// esql/esql-across-clusters.asciidoc:197 + +[source, python] +---- +resp = client.esql.async_query( + format="json", + body={ + "query": "\n FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ", + "include_ccs_metadata": True + }, +) +print(resp) +---- diff --git a/docs/examples/0c2ca704a39dda8b3a7c5806ec6c6cf8.asciidoc b/docs/examples/0c2ca704a39dda8b3a7c5806ec6c6cf8.asciidoc index c2f9ea70e..e9e7a172a 100644 --- a/docs/examples/0c2ca704a39dda8b3a7c5806ec6c6cf8.asciidoc +++ b/docs/examples/0c2ca704a39dda8b3a7c5806ec6c6cf8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/runtime.asciidoc:1379 +// mapping/runtime.asciidoc:1377 [source, python] ---- diff --git a/docs/examples/0c2d9ac7e3f28d4d802e21cbbbcfeb34.asciidoc b/docs/examples/0c2d9ac7e3f28d4d802e21cbbbcfeb34.asciidoc index 110616292..0a3e4fe55 100644 --- a/docs/examples/0c2d9ac7e3f28d4d802e21cbbbcfeb34.asciidoc +++ b/docs/examples/0c2d9ac7e3f28d4d802e21cbbbcfeb34.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/recovery.asciidoc:113 +// cat/recovery.asciidoc:112 [source, python] ---- diff --git a/docs/examples/0c8be7aec84ea86b243904f5d4162f5a.asciidoc b/docs/examples/0c8be7aec84ea86b243904f5d4162f5a.asciidoc new file mode 100644 index 000000000..455b6a220 --- /dev/null +++ b/docs/examples/0c8be7aec84ea86b243904f5d4162f5a.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/full-text-filtering-tutorial.asciidoc:292 + +[source, python] +---- +resp = client.search( + index="cooking_blog", + query={ + "match": { + "title": { + "query": "fluffy pancakes breakfast", + "minimum_should_match": 2 + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/ce13afc0c976c5e1f424b58e0c97fd64.asciidoc b/docs/examples/0d30077cd34e93377a3a86f2ebd69415.asciidoc similarity index 80% rename from docs/examples/ce13afc0c976c5e1f424b58e0c97fd64.asciidoc rename to docs/examples/0d30077cd34e93377a3a86f2ebd69415.asciidoc index bb8f5780c..1d9eb5f3c 100644 --- a/docs/examples/ce13afc0c976c5e1f424b58e0c97fd64.asciidoc +++ b/docs/examples/0d30077cd34e93377a3a86f2ebd69415.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/create-connector-api.asciidoc:111 +// connector/apis/create-connector-api.asciidoc:112 [source, python] ---- @@ -9,7 +9,7 @@ resp = client.connector.put( name="My Connector", description="My Connector to sync data to Elastic index from Google Drive", service_type="google_drive", - language="english", + language="en", ) print(resp) ---- diff --git a/docs/examples/0d8063b484a18f8672fb5ed8712c5c97.asciidoc b/docs/examples/0d8063b484a18f8672fb5ed8712c5c97.asciidoc index dd88587e1..a0fcce54b 100644 --- a/docs/examples/0d8063b484a18f8672fb5ed8712c5c97.asciidoc +++ b/docs/examples/0d8063b484a18f8672fb5ed8712c5c97.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-index-template.asciidoc:296 +// indices/put-index-template.asciidoc:299 [source, python] ---- diff --git a/docs/examples/0e31b8ad176b31028becf9500989bcbd.asciidoc b/docs/examples/0e31b8ad176b31028becf9500989bcbd.asciidoc new file mode 100644 index 000000000..5965ade94 --- /dev/null +++ b/docs/examples/0e31b8ad176b31028becf9500989bcbd.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// inference/service-watsonx-ai.asciidoc:96 + +[source, python] +---- +resp = client.inference.put( + task_type="text_embedding", + inference_id="watsonx-embeddings", + inference_config={ + "service": "watsonxai", + "service_settings": { + "api_key": "", + "url": "", + "model_id": "ibm/slate-30m-english-rtrvr", + "project_id": "", + "api_version": "2024-03-14" + } + }, +) +print(resp) +---- diff --git a/docs/examples/0eccea755bd4f6dd47579a9022690546.asciidoc b/docs/examples/0eccea755bd4f6dd47579a9022690546.asciidoc index d5e443e7e..5325fc189 100644 --- a/docs/examples/0eccea755bd4f6dd47579a9022690546.asciidoc +++ b/docs/examples/0eccea755bd4f6dd47579a9022690546.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// modules/cluster/remote-clusters-migration.asciidoc:132 +// modules/cluster/remote-clusters-migration.asciidoc:133 [source, python] ---- diff --git a/docs/examples/103296e16b4233926ad1f07360385606.asciidoc b/docs/examples/103296e16b4233926ad1f07360385606.asciidoc index 8becee40d..311b42d6c 100644 --- a/docs/examples/103296e16b4233926ad1f07360385606.asciidoc +++ b/docs/examples/103296e16b4233926ad1f07360385606.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// analysis/analyzers/lang-analyzer.asciidoc:1793 +// analysis/analyzers/lang-analyzer.asciidoc:1794 [source, python] ---- diff --git a/docs/examples/11be807bdeaeecc8174dec88e0851ea7.asciidoc b/docs/examples/11be807bdeaeecc8174dec88e0851ea7.asciidoc new file mode 100644 index 000000000..409af45c2 --- /dev/null +++ b/docs/examples/11be807bdeaeecc8174dec88e0851ea7.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// connector/docs/connectors-API-tutorial.asciidoc:437 + +[source, python] +---- +resp = client.perform_request( + "GET", + "/_connector/_sync_job", + params={ + "connector_id": "my-connector-id", + "size": "1" + }, +) +print(resp) +---- diff --git a/docs/examples/1295f51b9e5d4ba9987b02478146b50b.asciidoc b/docs/examples/1295f51b9e5d4ba9987b02478146b50b.asciidoc index 8f63108f6..69cab898b 100644 --- a/docs/examples/1295f51b9e5d4ba9987b02478146b50b.asciidoc +++ b/docs/examples/1295f51b9e5d4ba9987b02478146b50b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/high-jvm-memory-pressure.asciidoc:70 +// troubleshooting/common-issues/high-jvm-memory-pressure.asciidoc:72 [source, python] ---- diff --git a/docs/examples/12e9e758f7f18a6cbf27e9d0aea57a19.asciidoc b/docs/examples/12e9e758f7f18a6cbf27e9d0aea57a19.asciidoc new file mode 100644 index 000000000..19be5e48e --- /dev/null +++ b/docs/examples/12e9e758f7f18a6cbf27e9d0aea57a19.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// connector/docs/connectors-managed-service.asciidoc:167 + +[source, python] +---- +resp = client.update( + index=".elastic-connectors", + id="connector_id", + doc={ + "features": { + "native_connector_api_keys": { + "enabled": True + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc b/docs/examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc index fa54e865d..67a855b1c 100644 --- a/docs/examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc +++ b/docs/examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/post-inference.asciidoc:201 +// inference/post-inference.asciidoc:196 [source, python] ---- diff --git a/docs/examples/146bd22fd0e7be2345619e8f11d3a4cb.asciidoc b/docs/examples/146bd22fd0e7be2345619e8f11d3a4cb.asciidoc index e1ecfa48c..4e1e563b6 100644 --- a/docs/examples/146bd22fd0e7be2345619e8f11d3a4cb.asciidoc +++ b/docs/examples/146bd22fd0e7be2345619e8f11d3a4cb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/hotspotting.asciidoc:247 +// troubleshooting/common-issues/hotspotting.asciidoc:249 [source, python] ---- diff --git a/docs/examples/150b5fee5678bf8cdf0932da73eada80.asciidoc b/docs/examples/150b5fee5678bf8cdf0932da73eada80.asciidoc index fb07636cc..f5004f878 100644 --- a/docs/examples/150b5fee5678bf8cdf0932da73eada80.asciidoc +++ b/docs/examples/150b5fee5678bf8cdf0932da73eada80.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/nodes-stats.asciidoc:2546 +// cluster/nodes-stats.asciidoc:2550 [source, python] ---- diff --git a/docs/examples/1522a9297151d7046e6345b9b27539ca.asciidoc b/docs/examples/1522a9297151d7046e6345b9b27539ca.asciidoc new file mode 100644 index 000000000..1b3b7fee7 --- /dev/null +++ b/docs/examples/1522a9297151d7046e6345b9b27539ca.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// connector/docs/connectors-API-tutorial.asciidoc:340 + +[source, python] +---- +resp = client.connector.update_configuration( + connector_id="my-connector-id", + values={ + "host": "127.0.0.1", + "port": 5432, + "username": "myuser", + "password": "mypassword", + "database": "chinook", + "schema": "public", + "tables": "album,artist" + }, +) +print(resp) +---- diff --git a/docs/examples/1570976f7807b88dc8a046b833be057b.asciidoc b/docs/examples/1570976f7807b88dc8a046b833be057b.asciidoc index b5ec59379..293680f4f 100644 --- a/docs/examples/1570976f7807b88dc8a046b833be057b.asciidoc +++ b/docs/examples/1570976f7807b88dc8a046b833be057b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/hotspotting.asciidoc:28 +// troubleshooting/common-issues/hotspotting.asciidoc:30 [source, python] ---- diff --git a/docs/examples/15f769bbd7b5fddeb3353ae726b71b14.asciidoc b/docs/examples/15f769bbd7b5fddeb3353ae726b71b14.asciidoc new file mode 100644 index 000000000..9478424ba --- /dev/null +++ b/docs/examples/15f769bbd7b5fddeb3353ae726b71b14.asciidoc @@ -0,0 +1,64 @@ +// This file is autogenerated, DO NOT EDIT +// vectors/vector-functions.asciidoc:401 + +[source, python] +---- +resp = client.search( + index="my-index-bit-vectors", + query={ + "script_score": { + "query": { + "match_all": {} + }, + "script": { + "source": "dotProduct(params.query_vector, 'my_dense_vector')", + "params": { + "query_vector": [ + 0.23, + 1.45, + 3.67, + 4.89, + -0.56, + 2.34, + 3.21, + 1.78, + -2.45, + 0.98, + -0.12, + 3.45, + 4.56, + 2.78, + 1.23, + 0.67, + 3.89, + 4.12, + -2.34, + 1.56, + 0.78, + 3.21, + 4.12, + 2.45, + -1.67, + 0.34, + -3.45, + 4.56, + -2.78, + 1.23, + -0.67, + 3.89, + -4.34, + 2.12, + -1.56, + 0.78, + -3.21, + 4.45, + 2.12, + 1.67 + ] + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/1637ef51d673b35cc8894ee80cd61c87.asciidoc b/docs/examples/1637ef51d673b35cc8894ee80cd61c87.asciidoc index cb4a24a1a..3701d222a 100644 --- a/docs/examples/1637ef51d673b35cc8894ee80cd61c87.asciidoc +++ b/docs/examples/1637ef51d673b35cc8894ee80cd61c87.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// tab-widgets/cpu-usage.asciidoc:14 +// troubleshooting/common-issues/high-cpu-usage.asciidoc:24 [source, python] ---- diff --git a/docs/examples/16a7ce08b4a6b3af269f27eecc71d664.asciidoc b/docs/examples/16a7ce08b4a6b3af269f27eecc71d664.asciidoc new file mode 100644 index 000000000..1e35ec0d9 --- /dev/null +++ b/docs/examples/16a7ce08b4a6b3af269f27eecc71d664.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/getting-started.asciidoc:546 + +[source, python] +---- +resp = client.indices.delete( + index="books", +) +print(resp) + +resp1 = client.indices.delete( + index="my-explicit-mappings-books", +) +print(resp1) +---- diff --git a/docs/examples/17316a81c9dbdd120b7754116bf0461c.asciidoc b/docs/examples/17316a81c9dbdd120b7754116bf0461c.asciidoc new file mode 100644 index 000000000..cd2b37009 --- /dev/null +++ b/docs/examples/17316a81c9dbdd120b7754116bf0461c.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// connector/docs/_connectors-create-native-api-key.asciidoc:12 + +[source, python] +---- +resp = client.security.create_api_key( + name="my-connector-api-key", + role_descriptors={ + "my-connector-connector-role": { + "cluster": [ + "monitor", + "manage_connector" + ], + "indices": [ + { + "names": [ + "my-index_name", + ".search-acl-filter-my-index_name", + ".elastic-connectors*" + ], + "privileges": [ + "all" + ], + "allow_restricted_indices": False + } + ] + } + }, +) +print(resp) +---- diff --git a/docs/examples/1745ac9e6d22a2ffe7ac381f9ba238f9.asciidoc b/docs/examples/1745ac9e6d22a2ffe7ac381f9ba238f9.asciidoc deleted file mode 100644 index 3b131eced..000000000 --- a/docs/examples/1745ac9e6d22a2ffe7ac381f9ba238f9.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/high-cpu-usage.asciidoc:26 - -[source, python] ----- -resp = client.nodes.hot_threads( - node_id="my-node,my-other-node", -) -print(resp) ----- diff --git a/docs/examples/182df084f028479ecbe8d7648ddad892.asciidoc b/docs/examples/182df084f028479ecbe8d7648ddad892.asciidoc index 7a7bde5c4..d6f5ea695 100644 --- a/docs/examples/182df084f028479ecbe8d7648ddad892.asciidoc +++ b/docs/examples/182df084f028479ecbe8d7648ddad892.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ilm/apis/get-status.asciidoc:60 +// tab-widgets/troubleshooting/data/start-ilm.asciidoc:84 [source, python] ---- diff --git a/docs/examples/18de6782bd18f4a9baec2feec8c02a8b.asciidoc b/docs/examples/18de6782bd18f4a9baec2feec8c02a8b.asciidoc deleted file mode 100644 index 999873e69..000000000 --- a/docs/examples/18de6782bd18f4a9baec2feec8c02a8b.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// migration/migrate_8_0/migrate_to_java_time.asciidoc:268 - -[source, python] ----- -resp = client.indices.create( - index="my-index-000002", - mappings={ - "properties": { - "datetime": { - "type": "date", - "format": "uuuu/MM/dd HH:mm:ss||uuuu/MM/dd||epoch_millis" - } - } - }, -) -print(resp) ----- diff --git a/docs/examples/191074b2eebd5f74e628c2ada4b6d2e4.asciidoc b/docs/examples/191074b2eebd5f74e628c2ada4b6d2e4.asciidoc new file mode 100644 index 000000000..157634563 --- /dev/null +++ b/docs/examples/191074b2eebd5f74e628c2ada4b6d2e4.asciidoc @@ -0,0 +1,58 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/full-text-filtering-tutorial.asciidoc:521 + +[source, python] +---- +resp = client.search( + index="cooking_blog", + query={ + "bool": { + "must": [ + { + "term": { + "category.keyword": "Main Course" + } + }, + { + "term": { + "tags": "vegetarian" + } + }, + { + "range": { + "rating": { + "gte": 4.5 + } + } + } + ], + "should": [ + { + "multi_match": { + "query": "curry spicy", + "fields": [ + "title^2", + "description" + ] + } + }, + { + "range": { + "date": { + "gte": "now-1M/d" + } + } + } + ], + "must_not": [ + { + "term": { + "category.keyword": "Dessert" + } + } + ] + } + }, +) +print(resp) +---- diff --git a/docs/examples/192fa1f6f51dfb640e9e15bb5cd7eebc.asciidoc b/docs/examples/192fa1f6f51dfb640e9e15bb5cd7eebc.asciidoc index e33ee65ec..16e0c0f3b 100644 --- a/docs/examples/192fa1f6f51dfb640e9e15bb5cd7eebc.asciidoc +++ b/docs/examples/192fa1f6f51dfb640e9e15bb5cd7eebc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ilm/error-handling.asciidoc:144 +// ilm/error-handling.asciidoc:147 [source, python] ---- diff --git a/docs/examples/193704020a19714dec390452a4e75e8d.asciidoc b/docs/examples/193704020a19714dec390452a4e75e8d.asciidoc new file mode 100644 index 000000000..dc757c835 --- /dev/null +++ b/docs/examples/193704020a19714dec390452a4e75e8d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/getting-started.asciidoc:54 + +[source, python] +---- +resp = client.indices.create( + index="books", +) +print(resp) +---- diff --git a/docs/examples/194bbac15e709174ac85b681f3a3d137.asciidoc b/docs/examples/194bbac15e709174ac85b681f3a3d137.asciidoc index e1a1a265e..9ace291db 100644 --- a/docs/examples/194bbac15e709174ac85b681f3a3d137.asciidoc +++ b/docs/examples/194bbac15e709174ac85b681f3a3d137.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-index-template.asciidoc:186 +// indices/put-index-template.asciidoc:189 [source, python] ---- diff --git a/docs/examples/196aed02b11def364bab84e455c1a073.asciidoc b/docs/examples/196aed02b11def364bab84e455c1a073.asciidoc index 4b37db9ca..8eeb99faf 100644 --- a/docs/examples/196aed02b11def364bab84e455c1a073.asciidoc +++ b/docs/examples/196aed02b11def364bab84e455c1a073.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-index-template.asciidoc:324 +// indices/put-index-template.asciidoc:327 [source, python] ---- diff --git a/docs/examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc b/docs/examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc index b41aa69e7..565dfeef0 100644 --- a/docs/examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc +++ b/docs/examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/apis/simulate-ingest.asciidoc:205 +// ingest/apis/simulate-ingest.asciidoc:273 [source, python] ---- diff --git a/docs/examples/19d60e4890cc57151d596326484d9076.asciidoc b/docs/examples/19d60e4890cc57151d596326484d9076.asciidoc deleted file mode 100644 index 62e24dbbd..000000000 --- a/docs/examples/19d60e4890cc57151d596326484d9076.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// ingest/apis/delete-geoip-database.asciidoc:10 - -[source, python] ----- -resp = client.perform_request( - "DELETE", - "/_ingest/geoip/database/my-database-id", -) -print(resp) ----- diff --git a/docs/examples/19f1f9f25933f8e7aba59a10881c648b.asciidoc b/docs/examples/19f1f9f25933f8e7aba59a10881c648b.asciidoc index 3846208d6..0b123e38b 100644 --- a/docs/examples/19f1f9f25933f8e7aba59a10881c648b.asciidoc +++ b/docs/examples/19f1f9f25933f8e7aba59a10881c648b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/semantic-text.asciidoc:22 +// mapping/types/semantic-text.asciidoc:21 [source, python] ---- diff --git a/docs/examples/1a3897cfb4f974c09d0d847baac8aa6d.asciidoc b/docs/examples/1a3897cfb4f974c09d0d847baac8aa6d.asciidoc index 8fdcf6d8a..3935ff8bc 100644 --- a/docs/examples/1a3897cfb4f974c09d0d847baac8aa6d.asciidoc +++ b/docs/examples/1a3897cfb4f974c09d0d847baac8aa6d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/hotspotting.asciidoc:190 +// troubleshooting/common-issues/hotspotting.asciidoc:192 [source, python] ---- diff --git a/docs/examples/1a56df055b94466ca76818e0858752c6.asciidoc b/docs/examples/1a56df055b94466ca76818e0858752c6.asciidoc index e45764af1..d861ad7b6 100644 --- a/docs/examples/1a56df055b94466ca76818e0858752c6.asciidoc +++ b/docs/examples/1a56df055b94466ca76818e0858752c6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// tab-widgets/inference-api/infer-api-task.asciidoc:89 +// tab-widgets/inference-api/infer-api-task.asciidoc:97 [source, python] ---- diff --git a/docs/examples/1a7483796087053ba55029d0dc2ab356.asciidoc b/docs/examples/1a7483796087053ba55029d0dc2ab356.asciidoc new file mode 100644 index 000000000..cd3f38242 --- /dev/null +++ b/docs/examples/1a7483796087053ba55029d0dc2ab356.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// esql/multivalued-fields.asciidoc:187 + +[source, python] +---- +resp = client.index( + index="mv", + refresh=True, + document={ + "a": [ + 2, + None, + 1 + ] + }, +) +print(resp) + +resp1 = client.esql.query( + query="FROM mv | LIMIT 1", +) +print(resp1) +---- diff --git a/docs/examples/1a9e03ce0355872a7db27fedc783fbec.asciidoc b/docs/examples/1a9e03ce0355872a7db27fedc783fbec.asciidoc index e0431da95..c5c8f0d23 100644 --- a/docs/examples/1a9e03ce0355872a7db27fedc783fbec.asciidoc +++ b/docs/examples/1a9e03ce0355872a7db27fedc783fbec.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-google-vertex-ai.asciidoc:125 +// inference/service-google-vertex-ai.asciidoc:145 [source, python] ---- diff --git a/docs/examples/1aa96eeaf63fc967e166d1a2fcdccccc.asciidoc b/docs/examples/1aa96eeaf63fc967e166d1a2fcdccccc.asciidoc index f2acde7ac..6ae6ba615 100644 --- a/docs/examples/1aa96eeaf63fc967e166d1a2fcdccccc.asciidoc +++ b/docs/examples/1aa96eeaf63fc967e166d1a2fcdccccc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/params/subobjects.asciidoc:130 +// mapping/params/subobjects.asciidoc:131 [source, python] ---- diff --git a/docs/examples/1b37e2237c9e3aaf84d56cc5c0bdb9ec.asciidoc b/docs/examples/1b37e2237c9e3aaf84d56cc5c0bdb9ec.asciidoc index 827fafa03..6e8bc60ae 100644 --- a/docs/examples/1b37e2237c9e3aaf84d56cc5c0bdb9ec.asciidoc +++ b/docs/examples/1b37e2237c9e3aaf84d56cc5c0bdb9ec.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ilm/error-handling.asciidoc:15 +// ilm/error-handling.asciidoc:18 [source, python] ---- diff --git a/docs/examples/1ba7afe23a26fe9ac7856d8c5bc1059d.asciidoc b/docs/examples/1ba7afe23a26fe9ac7856d8c5bc1059d.asciidoc index dc02df84d..67df96154 100644 --- a/docs/examples/1ba7afe23a26fe9ac7856d8c5bc1059d.asciidoc +++ b/docs/examples/1ba7afe23a26fe9ac7856d8c5bc1059d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// analysis/analyzers/lang-analyzer.asciidoc:1501 +// analysis/analyzers/lang-analyzer.asciidoc:1502 [source, python] ---- diff --git a/docs/examples/be30ea12f605fd61acba689b68e00bbe.asciidoc b/docs/examples/1c330f0fc9eac19d0edeb8c4017b9b93.asciidoc similarity index 91% rename from docs/examples/be30ea12f605fd61acba689b68e00bbe.asciidoc rename to docs/examples/1c330f0fc9eac19d0edeb8c4017b9b93.asciidoc index 936d4c7a2..e222ea879 100644 --- a/docs/examples/be30ea12f605fd61acba689b68e00bbe.asciidoc +++ b/docs/examples/1c330f0fc9eac19d0edeb8c4017b9b93.asciidoc @@ -4,7 +4,7 @@ [source, python] ---- resp = client.ingest.put_pipeline( - id="hugging_face_embeddings", + id="hugging_face_embeddings_pipeline", processors=[ { "inference": { diff --git a/docs/examples/1c9dac4183a3532c91dbd1a46907729b.asciidoc b/docs/examples/1c9dac4183a3532c91dbd1a46907729b.asciidoc new file mode 100644 index 000000000..08cabd096 --- /dev/null +++ b/docs/examples/1c9dac4183a3532c91dbd1a46907729b.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// connector/docs/connectors-API-tutorial.asciidoc:459 + +[source, python] +---- +resp = client.indices.delete( + index="music", +) +print(resp) +---- diff --git a/docs/examples/7c63a1d2fbec5283e913ff39fafd0604.asciidoc b/docs/examples/1cbecd19be22979aefb45b4f160e77ea.asciidoc similarity index 91% rename from docs/examples/7c63a1d2fbec5283e913ff39fafd0604.asciidoc rename to docs/examples/1cbecd19be22979aefb45b4f160e77ea.asciidoc index 8bcc50f7f..7a1d92818 100644 --- a/docs/examples/7c63a1d2fbec5283e913ff39fafd0604.asciidoc +++ b/docs/examples/1cbecd19be22979aefb45b4f160e77ea.asciidoc @@ -4,7 +4,7 @@ [source, python] ---- resp = client.ingest.put_pipeline( - id="google_vertex_ai_embeddings", + id="google_vertex_ai_embeddings_pipeline", processors=[ { "inference": { diff --git a/docs/examples/1d918e206ad8dab916e59183da24d9ec.asciidoc b/docs/examples/1d918e206ad8dab916e59183da24d9ec.asciidoc deleted file mode 100644 index 236c5a03f..000000000 --- a/docs/examples/1d918e206ad8dab916e59183da24d9ec.asciidoc +++ /dev/null @@ -1,13 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// watcher/how-watcher-works.asciidoc:156 - -[source, python] ----- -resp = client.indices.put_settings( - index=".watches", - settings={ - "index.routing.allocation.include.role": "watcher" - }, -) -print(resp) ----- diff --git a/docs/examples/1dadb7efe27b6c0c231eb6535e413bd9.asciidoc b/docs/examples/1dadb7efe27b6c0c231eb6535e413bd9.asciidoc index 78c452868..631cc34b0 100644 --- a/docs/examples/1dadb7efe27b6c0c231eb6535e413bd9.asciidoc +++ b/docs/examples/1dadb7efe27b6c0c231eb6535e413bd9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-azure-ai-studio.asciidoc:142 +// inference/service-azure-ai-studio.asciidoc:162 [source, python] ---- diff --git a/docs/examples/1e0b85750d4e63ebbc927d4627c44bf8.asciidoc b/docs/examples/1e0b85750d4e63ebbc927d4627c44bf8.asciidoc index 7083e786e..b4d009c90 100644 --- a/docs/examples/1e0b85750d4e63ebbc927d4627c44bf8.asciidoc +++ b/docs/examples/1e0b85750d4e63ebbc927d4627c44bf8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// how-to/size-your-shards.asciidoc:601 +// how-to/size-your-shards.asciidoc:604 [source, python] ---- diff --git a/docs/examples/1e26353d546d733634187b8c3a7837a7.asciidoc b/docs/examples/1e26353d546d733634187b8c3a7837a7.asciidoc index 588151968..e5f2463f7 100644 --- a/docs/examples/1e26353d546d733634187b8c3a7837a7.asciidoc +++ b/docs/examples/1e26353d546d733634187b8c3a7837a7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/list-connectors-api.asciidoc:100 +// connector/apis/list-connectors-api.asciidoc:101 [source, python] ---- diff --git a/docs/examples/1e547696f54582840040b1aa6661760c.asciidoc b/docs/examples/1e547696f54582840040b1aa6661760c.asciidoc index 3166b147c..ef5d0cf52 100644 --- a/docs/examples/1e547696f54582840040b1aa6661760c.asciidoc +++ b/docs/examples/1e547696f54582840040b1aa6661760c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/rollover-index.asciidoc:226 +// tab-widgets/troubleshooting/data/restore-from-snapshot.asciidoc:400 [source, python] ---- diff --git a/docs/examples/1fb2c77c0988bc6545040b20e3afa7e9.asciidoc b/docs/examples/1fb2c77c0988bc6545040b20e3afa7e9.asciidoc new file mode 100644 index 000000000..14366fbc8 --- /dev/null +++ b/docs/examples/1fb2c77c0988bc6545040b20e3afa7e9.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// connector/docs/dls-e2e-guide.asciidoc:139 + +[source, python] +---- +resp = client.security.create_api_key( + name="john-api-key", + expiration="1d", + role_descriptors={ + "sharepoint-online-role": { + "index": [ + { + "names": [ + "sharepoint-search-application" + ], + "privileges": [ + "read" + ], + "query": { + "template": { + "params": { + "access_control": [ + "john@example.co", + "Engineering Members" + ] + }, + "source": "\n {\n \"bool\": {\n \"should\": [\n {\n \"bool\": {\n \"must_not\": {\n \"exists\": {\n \"field\": \"_allow_access_control\"\n }\n }\n }\n },\n {\n \"terms\": {\n \"_allow_access_control.enum\": {{#toJson}}access_control{{/toJson}}\n }\n }\n ]\n }\n }\n " + } + } + } + ], + "restriction": { + "workflows": [ + "search_application_query" + ] + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/2155c920d7d860f3ee7542f2211b4fec.asciidoc b/docs/examples/2155c920d7d860f3ee7542f2211b4fec.asciidoc index d478f5eb2..b4c25f594 100644 --- a/docs/examples/2155c920d7d860f3ee7542f2211b4fec.asciidoc +++ b/docs/examples/2155c920d7d860f3ee7542f2211b4fec.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/text-expansion-query.asciidoc:18 +// query-dsl/text-expansion-query.asciidoc:25 [source, python] ---- diff --git a/docs/examples/3fab530a2e43807929c0ef3ebf7d268c.asciidoc b/docs/examples/216e24f05cbb82c1718713fbab8623d2.asciidoc similarity index 87% rename from docs/examples/3fab530a2e43807929c0ef3ebf7d268c.asciidoc rename to docs/examples/216e24f05cbb82c1718713fbab8623d2.asciidoc index 1d8590c21..9ddf0fe44 100644 --- a/docs/examples/3fab530a2e43807929c0ef3ebf7d268c.asciidoc +++ b/docs/examples/216e24f05cbb82c1718713fbab8623d2.asciidoc @@ -1,11 +1,11 @@ // This file is autogenerated, DO NOT EDIT -// ingest/processors/geoip.asciidoc:134 +// ingest/processors/geoip.asciidoc:136 [source, python] ---- resp = client.ingest.put_pipeline( id="geoip", - description="Add geoip info", + description="Add ip geolocation info", processors=[ { "geoip": { diff --git a/docs/examples/21cd01cb90d3ea1acd0ab22d7edd2c88.asciidoc b/docs/examples/21cd01cb90d3ea1acd0ab22d7edd2c88.asciidoc index 00a052e0f..23c92bf91 100644 --- a/docs/examples/21cd01cb90d3ea1acd0ab22d7edd2c88.asciidoc +++ b/docs/examples/21cd01cb90d3ea1acd0ab22d7edd2c88.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// tab-widgets/inference-api/infer-api-task.asciidoc:154 +// tab-widgets/inference-api/infer-api-task.asciidoc:162 [source, python] ---- diff --git a/docs/examples/983a867c90e63e070518f2f709f659ee.asciidoc b/docs/examples/21d41e8cbd107fbdf0901f885834dafc.asciidoc similarity index 67% rename from docs/examples/983a867c90e63e070518f2f709f659ee.asciidoc rename to docs/examples/21d41e8cbd107fbdf0901f885834dafc.asciidoc index 23a1b1dd5..4ce3834f3 100644 --- a/docs/examples/983a867c90e63e070518f2f709f659ee.asciidoc +++ b/docs/examples/21d41e8cbd107fbdf0901f885834dafc.asciidoc @@ -1,14 +1,20 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/wildcard.asciidoc:141 +// mapping/types/wildcard.asciidoc:139 [source, python] ---- resp = client.indices.create( index="idx", + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, mappings={ - "_source": { - "mode": "synthetic" - }, "properties": { "card": { "type": "wildcard" diff --git a/docs/examples/2310d84ebf113f2a3ed14cc53172ae4a.asciidoc b/docs/examples/2310d84ebf113f2a3ed14cc53172ae4a.asciidoc index 79f87f9cc..0e4c70c10 100644 --- a/docs/examples/2310d84ebf113f2a3ed14cc53172ae4a.asciidoc +++ b/docs/examples/2310d84ebf113f2a3ed14cc53172ae4a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/text-expansion-query.asciidoc:93 +// query-dsl/text-expansion-query.asciidoc:100 [source, python] ---- diff --git a/docs/examples/25cb9e1da00dfd971065ce182467434d.asciidoc b/docs/examples/25cb9e1da00dfd971065ce182467434d.asciidoc index 4a396c3df..4b3a463ba 100644 --- a/docs/examples/25cb9e1da00dfd971065ce182467434d.asciidoc +++ b/docs/examples/25cb9e1da00dfd971065ce182467434d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/voting-exclusions.asciidoc:112 +// cluster/voting-exclusions.asciidoc:116 [source, python] ---- diff --git a/docs/examples/2646710ece0c4c843aebeacd370d0396.asciidoc b/docs/examples/2646710ece0c4c843aebeacd370d0396.asciidoc index 41168da02..978eb8a86 100644 --- a/docs/examples/2646710ece0c4c843aebeacd370d0396.asciidoc +++ b/docs/examples/2646710ece0c4c843aebeacd370d0396.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/dense-vector.asciidoc:137 +// mapping/types/dense-vector.asciidoc:142 [source, python] ---- diff --git a/docs/examples/270549e6b062228312c4e7a54a2c2209.asciidoc b/docs/examples/270549e6b062228312c4e7a54a2c2209.asciidoc index 3fdc374f1..9ae9ebbce 100644 --- a/docs/examples/270549e6b062228312c4e7a54a2c2209.asciidoc +++ b/docs/examples/270549e6b062228312c4e7a54a2c2209.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/task-queue-backlog.asciidoc:34 +// troubleshooting/common-issues/task-queue-backlog.asciidoc:39 [source, python] ---- diff --git a/docs/examples/dfcdadcf91529d3a399e05684195028e.asciidoc b/docs/examples/2968ffb8135f77ba3a9b876dd4918119.asciidoc similarity index 85% rename from docs/examples/dfcdadcf91529d3a399e05684195028e.asciidoc rename to docs/examples/2968ffb8135f77ba3a9b876dd4918119.asciidoc index 635e058fb..67a616b10 100644 --- a/docs/examples/dfcdadcf91529d3a399e05684195028e.asciidoc +++ b/docs/examples/2968ffb8135f77ba3a9b876dd4918119.asciidoc @@ -11,7 +11,7 @@ resp = client.reindex( }, dest={ "index": "azure-ai-studio-embeddings", - "pipeline": "azure_ai_studio_embeddings" + "pipeline": "azure_ai_studio_embeddings_pipeline" }, ) print(resp) diff --git a/docs/examples/2a5f7e7d6b92c66e52616845146d2820.asciidoc b/docs/examples/2a5f7e7d6b92c66e52616845146d2820.asciidoc index 757d2d7a4..b944107e3 100644 --- a/docs/examples/2a5f7e7d6b92c66e52616845146d2820.asciidoc +++ b/docs/examples/2a5f7e7d6b92c66e52616845146d2820.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// transform/painless-examples.asciidoc:513 +// transform/painless-examples.asciidoc:522 [source, python] ---- diff --git a/docs/examples/2a71e2d7f7179dd76183d30789046808.asciidoc b/docs/examples/2a71e2d7f7179dd76183d30789046808.asciidoc index 46ca2436d..9d9d7f53e 100644 --- a/docs/examples/2a71e2d7f7179dd76183d30789046808.asciidoc +++ b/docs/examples/2a71e2d7f7179dd76183d30789046808.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/multivalued-fields.asciidoc:177 +// esql/multivalued-fields.asciidoc:219 [source, python] ---- diff --git a/docs/examples/2acf75803494fef29f9ca70671aa6be1.asciidoc b/docs/examples/2acf75803494fef29f9ca70671aa6be1.asciidoc index fbba69008..64607d0d5 100644 --- a/docs/examples/2acf75803494fef29f9ca70671aa6be1.asciidoc +++ b/docs/examples/2acf75803494fef29f9ca70671aa6be1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/bulk-delete-roles.asciidoc:95 +// rest-api/security/bulk-delete-roles.asciidoc:94 [source, python] ---- diff --git a/docs/examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc b/docs/examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc index 65c237a2b..e8b76eaa8 100644 --- a/docs/examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc +++ b/docs/examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-rest.asciidoc:381 +// esql/esql-rest.asciidoc:383 [source, python] ---- diff --git a/docs/examples/2b1c560f00d9bcf5caaf56c03f6b5962.asciidoc b/docs/examples/2b1c560f00d9bcf5caaf56c03f6b5962.asciidoc index 5027a5dcb..ed97c5df2 100644 --- a/docs/examples/2b1c560f00d9bcf5caaf56c03f6b5962.asciidoc +++ b/docs/examples/2b1c560f00d9bcf5caaf56c03f6b5962.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/list-connector-sync-jobs-api.asciidoc:78 +// connector/apis/list-connector-sync-jobs-api.asciidoc:79 [source, python] ---- diff --git a/docs/examples/2bb41b0b4876ce98cd0cd8fb6d337f18.asciidoc b/docs/examples/2bb41b0b4876ce98cd0cd8fb6d337f18.asciidoc index 64dfcb7b5..7c611874c 100644 --- a/docs/examples/2bb41b0b4876ce98cd0cd8fb6d337f18.asciidoc +++ b/docs/examples/2bb41b0b4876ce98cd0cd8fb6d337f18.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// migration/transient-settings-migration-guide.asciidoc:64 +// migration/migrate_9_0/transient-settings-migration-guide.asciidoc:64 [source, python] ---- diff --git a/docs/examples/2c079d1ae4819a0c206b9e1aa5623523.asciidoc b/docs/examples/2c079d1ae4819a0c206b9e1aa5623523.asciidoc new file mode 100644 index 000000000..fa2d8bedb --- /dev/null +++ b/docs/examples/2c079d1ae4819a0c206b9e1aa5623523.asciidoc @@ -0,0 +1,77 @@ +// This file is autogenerated, DO NOT EDIT +// mapping/types/passthrough.asciidoc:11 + +[source, python] +---- +resp = client.indices.create( + index="my-index-000001", + mappings={ + "properties": { + "attributes": { + "type": "passthrough", + "priority": 10, + "properties": { + "id": { + "type": "keyword" + } + } + } + } + }, +) +print(resp) + +resp1 = client.index( + index="my-index-000001", + id="1", + document={ + "attributes": { + "id": "foo", + "zone": 10 + } + }, +) +print(resp1) + +resp2 = client.search( + index="my-index-000001", + query={ + "bool": { + "must": [ + { + "match": { + "id": "foo" + } + }, + { + "match": { + "zone": 10 + } + } + ] + } + }, +) +print(resp2) + +resp3 = client.search( + index="my-index-000001", + query={ + "bool": { + "must": [ + { + "match": { + "attributes.id": "foo" + } + }, + { + "match": { + "attributes.zone": 10 + } + } + ] + } + }, +) +print(resp3) +---- diff --git a/docs/examples/2c44657adf550b8ade5cf5334106d38b.asciidoc b/docs/examples/2c44657adf550b8ade5cf5334106d38b.asciidoc index 6b4e8588e..baf7c4d24 100644 --- a/docs/examples/2c44657adf550b8ade5cf5334106d38b.asciidoc +++ b/docs/examples/2c44657adf550b8ade5cf5334106d38b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/runtime.asciidoc:1406 +// mapping/runtime.asciidoc:1404 [source, python] ---- diff --git a/docs/examples/2c86840a46242a38cf82024a9321be46.asciidoc b/docs/examples/2c86840a46242a38cf82024a9321be46.asciidoc new file mode 100644 index 000000000..b377f60c6 --- /dev/null +++ b/docs/examples/2c86840a46242a38cf82024a9321be46.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/getting-started.asciidoc:362 + +[source, python] +---- +resp = client.indices.create( + index="my-explicit-mappings-books", + mappings={ + "dynamic": False, + "properties": { + "name": { + "type": "text" + }, + "author": { + "type": "text" + }, + "release_date": { + "type": "date", + "format": "yyyy-MM-dd" + }, + "page_count": { + "type": "integer" + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/2d0244c020075595acb625aa5ba8f455.asciidoc b/docs/examples/2d0244c020075595acb625aa5ba8f455.asciidoc new file mode 100644 index 000000000..3b4923e2b --- /dev/null +++ b/docs/examples/2d0244c020075595acb625aa5ba8f455.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// mapping/fields/synthetic-source.asciidoc:260 + +[source, python] +---- +resp = client.index( + index="idx_keep", + id="1", + document={ + "path": { + "to": [ + { + "foo": [ + 3, + 2, + 1 + ] + }, + { + "foo": [ + 30, + 20, + 10 + ] + } + ], + "bar": "baz" + }, + "ids": [ + 200, + 100, + 300, + 100 + ] + }, +) +print(resp) +---- diff --git a/docs/examples/2de6885bacb8769b8f22dce253c96b0c.asciidoc b/docs/examples/2de6885bacb8769b8f22dce253c96b0c.asciidoc index 4b1cfdc52..3aa15bd1d 100644 --- a/docs/examples/2de6885bacb8769b8f22dce253c96b0c.asciidoc +++ b/docs/examples/2de6885bacb8769b8f22dce253c96b0c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/intervals-query.asciidoc:343 +// query-dsl/intervals-query.asciidoc:414 [source, python] ---- diff --git a/docs/examples/2e7844477b41fcfa9efefee4ec0e7101.asciidoc b/docs/examples/2e7844477b41fcfa9efefee4ec0e7101.asciidoc new file mode 100644 index 000000000..04f8c55ea --- /dev/null +++ b/docs/examples/2e7844477b41fcfa9efefee4ec0e7101.asciidoc @@ -0,0 +1,45 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/search-using-query-rules.asciidoc:241 + +[source, python] +---- +resp = client.search( + index="my-index-000001", + retriever={ + "rule": { + "match_criteria": { + "query_string": "puggles", + "user_country": "us" + }, + "ruleset_ids": [ + "my-ruleset" + ], + "retriever": { + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "query_string": { + "query": "pugs" + } + } + } + }, + { + "standard": { + "query": { + "query_string": { + "query": "puggles" + } + } + } + } + ] + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/2f2fd35905feef0b561c05d70c7064c1.asciidoc b/docs/examples/2f2fd35905feef0b561c05d70c7064c1.asciidoc index 98ad01d8c..d7bf20ff3 100644 --- a/docs/examples/2f2fd35905feef0b561c05d70c7064c1.asciidoc +++ b/docs/examples/2f2fd35905feef0b561c05d70c7064c1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// migration/migrate_8_0/migrate_to_java_time.asciidoc:239 +// mapping/dynamic/templates.asciidoc:570 [source, python] ---- diff --git a/docs/examples/2f67db5e4d6c958258c3d70fb2d0b1c8.asciidoc b/docs/examples/2f67db5e4d6c958258c3d70fb2d0b1c8.asciidoc deleted file mode 100644 index 00612b5df..000000000 --- a/docs/examples/2f67db5e4d6c958258c3d70fb2d0b1c8.asciidoc +++ /dev/null @@ -1,13 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// migration/migrate_8_0/index-setting-changes.asciidoc:48 - -[source, python] ----- -resp = client.indices.put_settings( - index="my-index-000001", - settings={ - "index.merge.policy.max_merge_at_once_explicit": None - }, -) -print(resp) ----- diff --git a/docs/examples/2f98924c3d593ea2b60edb9cef5bee22.asciidoc b/docs/examples/2f98924c3d593ea2b60edb9cef5bee22.asciidoc index 6b434ce62..e811461d4 100644 --- a/docs/examples/2f98924c3d593ea2b60edb9cef5bee22.asciidoc +++ b/docs/examples/2f98924c3d593ea2b60edb9cef5bee22.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// how-to/size-your-shards.asciidoc:483 +// how-to/size-your-shards.asciidoc:484 [source, python] ---- diff --git a/docs/examples/2fa7ded8515b32f26c54394ea598f573.asciidoc b/docs/examples/2fa7ded8515b32f26c54394ea598f573.asciidoc index 4dcbb9d05..a444a667b 100644 --- a/docs/examples/2fa7ded8515b32f26c54394ea598f573.asciidoc +++ b/docs/examples/2fa7ded8515b32f26c54394ea598f573.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/index-templates.asciidoc:120 +// indices/index-templates.asciidoc:121 [source, python] ---- diff --git a/docs/examples/5f8d90515995a5eee189d722abe3b111.asciidoc b/docs/examples/310bdfb0d0d75bac7bff036a3fe51d4d.asciidoc similarity index 91% rename from docs/examples/5f8d90515995a5eee189d722abe3b111.asciidoc rename to docs/examples/310bdfb0d0d75bac7bff036a3fe51d4d.asciidoc index 42081a588..502c80666 100644 --- a/docs/examples/5f8d90515995a5eee189d722abe3b111.asciidoc +++ b/docs/examples/310bdfb0d0d75bac7bff036a3fe51d4d.asciidoc @@ -4,7 +4,7 @@ [source, python] ---- resp = client.ingest.put_pipeline( - id="azure_ai_studio_embeddings", + id="azure_ai_studio_embeddings_pipeline", processors=[ { "inference": { diff --git a/docs/examples/320645d771e952af2a67bb7445c3688d.asciidoc b/docs/examples/320645d771e952af2a67bb7445c3688d.asciidoc index f29deb5b7..4afe0c1b7 100644 --- a/docs/examples/320645d771e952af2a67bb7445c3688d.asciidoc +++ b/docs/examples/320645d771e952af2a67bb7445c3688d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// analysis/analyzers/lang-analyzer.asciidoc:1647 +// analysis/analyzers/lang-analyzer.asciidoc:1648 [source, python] ---- diff --git a/docs/examples/327466380bcd55361973b4a96c6dccb2.asciidoc b/docs/examples/327466380bcd55361973b4a96c6dccb2.asciidoc index 307952a06..4d6c43729 100644 --- a/docs/examples/327466380bcd55361973b4a96c6dccb2.asciidoc +++ b/docs/examples/327466380bcd55361973b4a96c6dccb2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// analysis/analyzers/lang-analyzer.asciidoc:1697 +// analysis/analyzers/lang-analyzer.asciidoc:1698 [source, python] ---- diff --git a/docs/examples/d3a558ef226e9dccc1c7c61e1167547f.asciidoc b/docs/examples/334811cfceb6858aeec5b3461717dd63.asciidoc similarity index 85% rename from docs/examples/d3a558ef226e9dccc1c7c61e1167547f.asciidoc rename to docs/examples/334811cfceb6858aeec5b3461717dd63.asciidoc index 0ed88bba1..cf1347fab 100644 --- a/docs/examples/d3a558ef226e9dccc1c7c61e1167547f.asciidoc +++ b/docs/examples/334811cfceb6858aeec5b3461717dd63.asciidoc @@ -1,11 +1,11 @@ // This file is autogenerated, DO NOT EDIT -// ingest/processors/geoip.asciidoc:186 +// ingest/processors/geoip.asciidoc:188 [source, python] ---- resp = client.ingest.put_pipeline( id="geoip", - description="Add geoip info", + description="Add ip geolocation info", processors=[ { "geoip": { diff --git a/docs/examples/339c4e5af9f9069ad9912aa574488b59.asciidoc b/docs/examples/339c4e5af9f9069ad9912aa574488b59.asciidoc new file mode 100644 index 000000000..b027282dd --- /dev/null +++ b/docs/examples/339c4e5af9f9069ad9912aa574488b59.asciidoc @@ -0,0 +1,70 @@ +// This file is autogenerated, DO NOT EDIT +// vectors/vector-functions.asciidoc:342 + +[source, python] +---- +resp = client.indices.create( + index="my-index-bit-vectors", + mappings={ + "properties": { + "my_dense_vector": { + "type": "dense_vector", + "index": False, + "element_type": "bit", + "dims": 40 + } + } + }, +) +print(resp) + +resp1 = client.index( + index="my-index-bit-vectors", + id="1", + document={ + "my_dense_vector": [ + 8, + 5, + -15, + 1, + -7 + ] + }, +) +print(resp1) + +resp2 = client.index( + index="my-index-bit-vectors", + id="2", + document={ + "my_dense_vector": [ + -1, + 115, + -3, + 4, + -128 + ] + }, +) +print(resp2) + +resp3 = client.index( + index="my-index-bit-vectors", + id="3", + document={ + "my_dense_vector": [ + 2, + 18, + -5, + 0, + -124 + ] + }, +) +print(resp3) + +resp4 = client.indices.refresh( + index="my-index-bit-vectors", +) +print(resp4) +---- diff --git a/docs/examples/33d480fc6812ada75756cf5337bc9092.asciidoc b/docs/examples/33d480fc6812ada75756cf5337bc9092.asciidoc index b2a058e49..382153680 100644 --- a/docs/examples/33d480fc6812ada75756cf5337bc9092.asciidoc +++ b/docs/examples/33d480fc6812ada75756cf5337bc9092.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/list-connector-sync-jobs-api.asciidoc:57 +// connector/apis/list-connector-sync-jobs-api.asciidoc:58 [source, python] ---- diff --git a/docs/examples/342ddf9121aeddd82fea2464665e25da.asciidoc b/docs/examples/342ddf9121aeddd82fea2464665e25da.asciidoc index 13694ba05..d2c8c5d77 100644 --- a/docs/examples/342ddf9121aeddd82fea2464665e25da.asciidoc +++ b/docs/examples/342ddf9121aeddd82fea2464665e25da.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/create-connector-api.asciidoc:20 +// connector/apis/create-connector-api.asciidoc:21 [source, python] ---- diff --git a/docs/examples/346f28d82acb5427c304aa574fea0008.asciidoc b/docs/examples/346f28d82acb5427c304aa574fea0008.asciidoc index a0683551d..efd1f5771 100644 --- a/docs/examples/346f28d82acb5427c304aa574fea0008.asciidoc +++ b/docs/examples/346f28d82acb5427c304aa574fea0008.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// analysis/analyzers/lang-analyzer.asciidoc:1846 +// analysis/analyzers/lang-analyzer.asciidoc:1847 [source, python] ---- diff --git a/docs/examples/35a272df8c919a12d7c3106a18245748.asciidoc b/docs/examples/35a272df8c919a12d7c3106a18245748.asciidoc index db4d626eb..0a003a7e3 100644 --- a/docs/examples/35a272df8c919a12d7c3106a18245748.asciidoc +++ b/docs/examples/35a272df8c919a12d7c3106a18245748.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/infer-trained-model.asciidoc:849 +// ml/trained-models/apis/infer-trained-model.asciidoc:950 [source, python] ---- diff --git a/docs/examples/365256ebdfa47b449780771d9beba8d9.asciidoc b/docs/examples/365256ebdfa47b449780771d9beba8d9.asciidoc index 0cd8f33d6..ecfda89c9 100644 --- a/docs/examples/365256ebdfa47b449780771d9beba8d9.asciidoc +++ b/docs/examples/365256ebdfa47b449780771d9beba8d9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/check-in-connector-sync-job-api.asciidoc:49 +// connector/apis/check-in-connector-sync-job-api.asciidoc:50 [source, python] ---- diff --git a/docs/examples/65e892a362d940e4a74965f21c15ca09.asciidoc b/docs/examples/36ac0ef9ea63efc431580f7ade8ad53c.asciidoc similarity index 86% rename from docs/examples/65e892a362d940e4a74965f21c15ca09.asciidoc rename to docs/examples/36ac0ef9ea63efc431580f7ade8ad53c.asciidoc index 02927c0a2..5ccef54b7 100644 --- a/docs/examples/65e892a362d940e4a74965f21c15ca09.asciidoc +++ b/docs/examples/36ac0ef9ea63efc431580f7ade8ad53c.asciidoc @@ -11,7 +11,7 @@ resp = client.reindex( }, dest={ "index": "openai-embeddings", - "pipeline": "openai_embeddings" + "pipeline": "openai_embeddings_pipeline" }, ) print(resp) diff --git a/docs/examples/36b86b97feedcf5632824eefc251d6ed.asciidoc b/docs/examples/36b86b97feedcf5632824eefc251d6ed.asciidoc index 22d10734a..d0107a261 100644 --- a/docs/examples/36b86b97feedcf5632824eefc251d6ed.asciidoc +++ b/docs/examples/36b86b97feedcf5632824eefc251d6ed.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// quickstart/getting-started.asciidoc:244 +// quickstart/getting-started.asciidoc:484 [source, python] ---- diff --git a/docs/examples/36d229f734adcdab00be266a7ce038b1.asciidoc b/docs/examples/36d229f734adcdab00be266a7ce038b1.asciidoc index f4c484a27..b2740333f 100644 --- a/docs/examples/36d229f734adcdab00be266a7ce038b1.asciidoc +++ b/docs/examples/36d229f734adcdab00be266a7ce038b1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/dense-vector.asciidoc:375 +// mapping/types/dense-vector.asciidoc:406 [source, python] ---- diff --git a/docs/examples/370b297ed3433577adf53e64f572d89d.asciidoc b/docs/examples/370b297ed3433577adf53e64f572d89d.asciidoc index c294fea7b..56aace56f 100644 --- a/docs/examples/370b297ed3433577adf53e64f572d89d.asciidoc +++ b/docs/examples/370b297ed3433577adf53e64f572d89d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/delete-connector-sync-job-api.asciidoc:45 +// connector/apis/delete-connector-sync-job-api.asciidoc:46 [source, python] ---- diff --git a/docs/examples/371962cf63e65c10026177c6a1bad0b6.asciidoc b/docs/examples/371962cf63e65c10026177c6a1bad0b6.asciidoc index b46d30acf..6fbf2785e 100644 --- a/docs/examples/371962cf63e65c10026177c6a1bad0b6.asciidoc +++ b/docs/examples/371962cf63e65c10026177c6a1bad0b6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// slm/apis/slm-start.asciidoc:41 +// tab-widgets/troubleshooting/data/start-slm.asciidoc:63 [source, python] ---- diff --git a/docs/examples/37c73410bf13429279cbc61a413957d8.asciidoc b/docs/examples/37c73410bf13429279cbc61a413957d8.asciidoc index 56fc5156b..b75a76507 100644 --- a/docs/examples/37c73410bf13429279cbc61a413957d8.asciidoc +++ b/docs/examples/37c73410bf13429279cbc61a413957d8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// how-to/size-your-shards.asciidoc:557 +// how-to/size-your-shards.asciidoc:558 [source, python] ---- diff --git a/docs/examples/38ba93890494bfa7beece58dffa44f98.asciidoc b/docs/examples/38ba93890494bfa7beece58dffa44f98.asciidoc index b82fab19a..cf80b3ae8 100644 --- a/docs/examples/38ba93890494bfa7beece58dffa44f98.asciidoc +++ b/docs/examples/38ba93890494bfa7beece58dffa44f98.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/semantic-text.asciidoc:177 +// mapping/types/semantic-text.asciidoc:207 [source, python] ---- diff --git a/docs/examples/398389933901b572a06a752bc780af7c.asciidoc b/docs/examples/398389933901b572a06a752bc780af7c.asciidoc index 8855fa937..f27040fba 100644 --- a/docs/examples/398389933901b572a06a752bc780af7c.asciidoc +++ b/docs/examples/398389933901b572a06a752bc780af7c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-anthropic.asciidoc:111 +// inference/service-anthropic.asciidoc:131 [source, python] ---- diff --git a/docs/examples/3a2f37f8f32b1aa6bcfb252b9e00f904.asciidoc b/docs/examples/3a2f37f8f32b1aa6bcfb252b9e00f904.asciidoc index 8b4e3c4e5..ff19e59b7 100644 --- a/docs/examples/3a2f37f8f32b1aa6bcfb252b9e00f904.asciidoc +++ b/docs/examples/3a2f37f8f32b1aa6bcfb252b9e00f904.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// index-modules.asciidoc:95 +// index-modules.asciidoc:97 [source, python] ---- diff --git a/docs/examples/3a489743e49902df38e3368cae00717a.asciidoc b/docs/examples/3a489743e49902df38e3368cae00717a.asciidoc new file mode 100644 index 000000000..91462454d --- /dev/null +++ b/docs/examples/3a489743e49902df38e3368cae00717a.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// troubleshooting/common-issues/high-cpu-usage.asciidoc:43 + +[source, python] +---- +resp = client.nodes.hot_threads() +print(resp) +---- diff --git a/docs/examples/60d689aae3f8de1e6830329dfd69a6a6.asciidoc b/docs/examples/3a4953663a5a3809b692c27446e16b7f.asciidoc similarity index 85% rename from docs/examples/60d689aae3f8de1e6830329dfd69a6a6.asciidoc rename to docs/examples/3a4953663a5a3809b692c27446e16b7f.asciidoc index 17aa35fd1..5d9373bf9 100644 --- a/docs/examples/60d689aae3f8de1e6830329dfd69a6a6.asciidoc +++ b/docs/examples/3a4953663a5a3809b692c27446e16b7f.asciidoc @@ -11,7 +11,7 @@ resp = client.reindex( }, dest={ "index": "amazon-bedrock-embeddings", - "pipeline": "amazon_bedrock_embeddings" + "pipeline": "amazon_bedrock_embeddings_pipeline" }, ) print(resp) diff --git a/docs/examples/d1a285aa244ec461d68f13e7078a33c0.asciidoc b/docs/examples/3ab8f65fcb55a0e3664c55749ec41efd.asciidoc similarity index 93% rename from docs/examples/d1a285aa244ec461d68f13e7078a33c0.asciidoc rename to docs/examples/3ab8f65fcb55a0e3664c55749ec41efd.asciidoc index ee97186fe..292815dbd 100644 --- a/docs/examples/d1a285aa244ec461d68f13e7078a33c0.asciidoc +++ b/docs/examples/3ab8f65fcb55a0e3664c55749ec41efd.asciidoc @@ -32,7 +32,8 @@ resp = client.indices.create( "decimal_digit", "arabic_normalization", "persian_normalization", - "persian_stop" + "persian_stop", + "persian_stem" ] } } diff --git a/docs/examples/3afc6dacf90b42900ab571aad8a61d75.asciidoc b/docs/examples/3afc6dacf90b42900ab571aad8a61d75.asciidoc index 9168398c6..db5b89d75 100644 --- a/docs/examples/3afc6dacf90b42900ab571aad8a61d75.asciidoc +++ b/docs/examples/3afc6dacf90b42900ab571aad8a61d75.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// analysis/analyzers/lang-analyzer.asciidoc:1598 +// analysis/analyzers/lang-analyzer.asciidoc:1599 [source, python] ---- diff --git a/docs/examples/3b6718257421b5419bf4cd6a7303c57e.asciidoc b/docs/examples/3b6718257421b5419bf4cd6a7303c57e.asciidoc deleted file mode 100644 index 0bea327f3..000000000 --- a/docs/examples/3b6718257421b5419bf4cd6a7303c57e.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// ingest/apis/get-geoip-database.asciidoc:55 - -[source, python] ----- -resp = client.perform_request( - "GET", - "/_ingest/geoip/database/my-database-id", -) -print(resp) ----- diff --git a/docs/examples/3c0d0c38e1c819a35a68cdba5ae8ccc4.asciidoc b/docs/examples/3c0d0c38e1c819a35a68cdba5ae8ccc4.asciidoc index d020f99d7..7f0c81b94 100644 --- a/docs/examples/3c0d0c38e1c819a35a68cdba5ae8ccc4.asciidoc +++ b/docs/examples/3c0d0c38e1c819a35a68cdba5ae8ccc4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// tab-widgets/inference-api/infer-api-task.asciidoc:254 +// tab-widgets/inference-api/infer-api-task.asciidoc:262 [source, python] ---- diff --git a/docs/examples/3c345feb7c52fd54bcb5d5505fd8bc3b.asciidoc b/docs/examples/3c345feb7c52fd54bcb5d5505fd8bc3b.asciidoc index e7a52923d..727da4e7c 100644 --- a/docs/examples/3c345feb7c52fd54bcb5d5505fd8bc3b.asciidoc +++ b/docs/examples/3c345feb7c52fd54bcb5d5505fd8bc3b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/infer-trained-model.asciidoc:1008 +// ml/trained-models/apis/infer-trained-model.asciidoc:1109 [source, python] ---- diff --git a/docs/examples/3cd93a48906069709b76420c66930c01.asciidoc b/docs/examples/3cd93a48906069709b76420c66930c01.asciidoc index fffdc26dd..6c3859dd8 100644 --- a/docs/examples/3cd93a48906069709b76420c66930c01.asciidoc +++ b/docs/examples/3cd93a48906069709b76420c66930c01.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// analysis/tokenfilters/stemmer-tokenfilter.asciidoc:265 +// analysis/tokenfilters/stemmer-tokenfilter.asciidoc:264 [source, python] ---- diff --git a/docs/examples/3f2e5132e35b9e8b3203a4a0541cf0d4.asciidoc b/docs/examples/3f2e5132e35b9e8b3203a4a0541cf0d4.asciidoc index 6c2756e0b..a87fc4d6b 100644 --- a/docs/examples/3f2e5132e35b9e8b3203a4a0541cf0d4.asciidoc +++ b/docs/examples/3f2e5132e35b9e8b3203a4a0541cf0d4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ilm/actions/ilm-searchable-snapshot.asciidoc:93 +// ilm/actions/ilm-searchable-snapshot.asciidoc:96 [source, python] ---- diff --git a/docs/examples/3fe4264ace04405989141c43aadfff81.asciidoc b/docs/examples/3fe4264ace04405989141c43aadfff81.asciidoc index dc3644d3d..28dd407e9 100644 --- a/docs/examples/3fe4264ace04405989141c43aadfff81.asciidoc +++ b/docs/examples/3fe4264ace04405989141c43aadfff81.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/create-roles.asciidoc:154 +// rest-api/security/create-roles.asciidoc:167 [source, python] ---- diff --git a/docs/examples/405ac843a9156d3cab374e199cac87fb.asciidoc b/docs/examples/405ac843a9156d3cab374e199cac87fb.asciidoc index fa979e0df..15e9b0326 100644 --- a/docs/examples/405ac843a9156d3cab374e199cac87fb.asciidoc +++ b/docs/examples/405ac843a9156d3cab374e199cac87fb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/create-connector-sync-job-api.asciidoc:15 +// connector/apis/create-connector-sync-job-api.asciidoc:16 [source, python] ---- diff --git a/docs/examples/40bd86e400d27e68b8f0ae580c29d32d.asciidoc b/docs/examples/40bd86e400d27e68b8f0ae580c29d32d.asciidoc index 2cb413860..f150be621 100644 --- a/docs/examples/40bd86e400d27e68b8f0ae580c29d32d.asciidoc +++ b/docs/examples/40bd86e400d27e68b8f0ae580c29d32d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// how-to/size-your-shards.asciidoc:278 +// how-to/size-your-shards.asciidoc:279 [source, python] ---- diff --git a/docs/examples/40c3e7bb1fdc125a1ab21bd7d7326694.asciidoc b/docs/examples/40c3e7bb1fdc125a1ab21bd7d7326694.asciidoc index 7aeed7da6..1cbdde427 100644 --- a/docs/examples/40c3e7bb1fdc125a1ab21bd7d7326694.asciidoc +++ b/docs/examples/40c3e7bb1fdc125a1ab21bd7d7326694.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/multivalued-fields.asciidoc:133 +// esql/multivalued-fields.asciidoc:142 [source, python] ---- diff --git a/docs/examples/40f287bf733420bbab134b74c7d0ea5d.asciidoc b/docs/examples/40f287bf733420bbab134b74c7d0ea5d.asciidoc new file mode 100644 index 000000000..003c7f9ea --- /dev/null +++ b/docs/examples/40f287bf733420bbab134b74c7d0ea5d.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/ingest-vectors.asciidoc:68 + +[source, python] +---- +resp = client.index( + index="amazon-reviews", + id="1", + document={ + "review_text": "This product is lifechanging! I'm telling all my friends about it.", + "review_vector": [ + 0.1, + 0.2, + 0.3, + 0.4, + 0.5, + 0.6, + 0.7, + 0.8 + ] + }, +) +print(resp) +---- diff --git a/docs/examples/40f97f70e8e743c6a6296c81b920aeb0.asciidoc b/docs/examples/40f97f70e8e743c6a6296c81b920aeb0.asciidoc index 8f4e0432d..06969d9fd 100644 --- a/docs/examples/40f97f70e8e743c6a6296c81b920aeb0.asciidoc +++ b/docs/examples/40f97f70e8e743c6a6296c81b920aeb0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// how-to/size-your-shards.asciidoc:313 +// how-to/size-your-shards.asciidoc:314 [source, python] ---- diff --git a/docs/examples/41175d304e660da2931764f9a4418fd3.asciidoc b/docs/examples/41175d304e660da2931764f9a4418fd3.asciidoc index d58c99075..d37e82323 100644 --- a/docs/examples/41175d304e660da2931764f9a4418fd3.asciidoc +++ b/docs/examples/41175d304e660da2931764f9a4418fd3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-pipeline-api.asciidoc:87 +// connector/apis/update-connector-pipeline-api.asciidoc:88 [source, python] ---- diff --git a/docs/examples/413fdcc7c437775a16bb55b81c2bbe2b.asciidoc b/docs/examples/413fdcc7c437775a16bb55b81c2bbe2b.asciidoc index fa73e0203..a626e7e41 100644 --- a/docs/examples/413fdcc7c437775a16bb55b81c2bbe2b.asciidoc +++ b/docs/examples/413fdcc7c437775a16bb55b81c2bbe2b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/runtime.asciidoc:1618 +// mapping/runtime.asciidoc:1616 [source, python] ---- diff --git a/docs/examples/425eaaf9c7e3b1e77a3474fbab4183b4.asciidoc b/docs/examples/425eaaf9c7e3b1e77a3474fbab4183b4.asciidoc index 3f7f7db24..b6bb4fb2c 100644 --- a/docs/examples/425eaaf9c7e3b1e77a3474fbab4183b4.asciidoc +++ b/docs/examples/425eaaf9c7e3b1e77a3474fbab4183b4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/task-queue-backlog.asciidoc:22 +// troubleshooting/common-issues/task-queue-backlog.asciidoc:25 [source, python] ---- diff --git a/docs/examples/430705509f8367aef92be413f702520b.asciidoc b/docs/examples/430705509f8367aef92be413f702520b.asciidoc index 2bf5117b8..7e2d3af29 100644 --- a/docs/examples/430705509f8367aef92be413f702520b.asciidoc +++ b/docs/examples/430705509f8367aef92be413f702520b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-status-api.asciidoc:75 +// connector/apis/update-connector-status-api.asciidoc:76 [source, python] ---- diff --git a/docs/examples/4310869b97d4224acaa6d66b1e196048.asciidoc b/docs/examples/4310869b97d4224acaa6d66b1e196048.asciidoc index cab2434e0..6b7f6bef2 100644 --- a/docs/examples/4310869b97d4224acaa6d66b1e196048.asciidoc +++ b/docs/examples/4310869b97d4224acaa6d66b1e196048.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/semantic-search-elser.asciidoc:172 +// search/search-your-data/semantic-search-elser.asciidoc:184 [source, python] ---- diff --git a/docs/examples/43d9e314431336a6f084cea76dfd6489.asciidoc b/docs/examples/43d9e314431336a6f084cea76dfd6489.asciidoc new file mode 100644 index 000000000..14c7b4a3c --- /dev/null +++ b/docs/examples/43d9e314431336a6f084cea76dfd6489.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// search/retriever.asciidoc:235 + +[source, python] +---- +resp = client.search( + index="restaurants", + retriever={ + "knn": { + "field": "vector", + "query_vector": [ + 10, + 22, + 77 + ], + "k": 10, + "num_candidates": 10 + } + }, +) +print(resp) +---- diff --git a/docs/examples/44198781d164a15be633d4469485a544.asciidoc b/docs/examples/44198781d164a15be633d4469485a544.asciidoc new file mode 100644 index 000000000..db8b96e88 --- /dev/null +++ b/docs/examples/44198781d164a15be633d4469485a544.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// vectors/vector-functions.asciidoc:379 + +[source, python] +---- +resp = client.search( + index="my-index-bit-vectors", + query={ + "script_score": { + "query": { + "match_all": {} + }, + "script": { + "source": "dotProduct(params.query_vector, 'my_dense_vector')", + "params": { + "query_vector": [ + 8, + 5, + -15, + 1, + -7 + ] + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/44385b61342e20ea05f254015b2b04d7.asciidoc b/docs/examples/44385b61342e20ea05f254015b2b04d7.asciidoc index 54d047589..e8c93ac34 100644 --- a/docs/examples/44385b61342e20ea05f254015b2b04d7.asciidoc +++ b/docs/examples/44385b61342e20ea05f254015b2b04d7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/bulk-delete-roles.asciidoc:49 +// rest-api/security/bulk-delete-roles.asciidoc:48 [source, python] ---- diff --git a/docs/examples/443e8da9968f1c65f46a2a65a1e1e078.asciidoc b/docs/examples/443e8da9968f1c65f46a2a65a1e1e078.asciidoc index d6f42c8fb..0f02f4bb2 100644 --- a/docs/examples/443e8da9968f1c65f46a2a65a1e1e078.asciidoc +++ b/docs/examples/443e8da9968f1c65f46a2a65a1e1e078.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/set-up-tsds.asciidoc:146 +// data-streams/set-up-tsds.asciidoc:147 [source, python] ---- diff --git a/docs/examples/445f8a6ef75fb43da52990b3a9063c78.asciidoc b/docs/examples/445f8a6ef75fb43da52990b3a9063c78.asciidoc index b91e1f5cf..6473b0542 100644 --- a/docs/examples/445f8a6ef75fb43da52990b3a9063c78.asciidoc +++ b/docs/examples/445f8a6ef75fb43da52990b3a9063c78.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/runtime.asciidoc:1658 +// mapping/runtime.asciidoc:1656 [source, python] ---- diff --git a/docs/examples/44bca3f17d403517af3616754dc795bb.asciidoc b/docs/examples/44bca3f17d403517af3616754dc795bb.asciidoc index 948217540..f7231526e 100644 --- a/docs/examples/44bca3f17d403517af3616754dc795bb.asciidoc +++ b/docs/examples/44bca3f17d403517af3616754dc795bb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/script-score-query.asciidoc:345 +// query-dsl/script-score-query.asciidoc:352 [source, python] ---- diff --git a/docs/examples/49a19615ebe2c013b8321152163478ab.asciidoc b/docs/examples/49a19615ebe2c013b8321152163478ab.asciidoc new file mode 100644 index 000000000..db5e78aa5 --- /dev/null +++ b/docs/examples/49a19615ebe2c013b8321152163478ab.asciidoc @@ -0,0 +1,42 @@ +// This file is autogenerated, DO NOT EDIT +// scripting/fields.asciidoc:92 + +[source, python] +---- +resp = client.index( + index="my-index-000001", + id="1", + refresh=True, + document={ + "text": "quick brown fox" + }, +) +print(resp) + +resp1 = client.index( + index="my-index-000001", + id="2", + refresh=True, + document={ + "text": "quick fox" + }, +) +print(resp1) + +resp2 = client.search( + index="my-index-000001", + query={ + "script_score": { + "query": { + "match": { + "text": "quick brown fox" + } + }, + "script": { + "source": "_termStats.termFreq().getAverage()" + } + } + }, +) +print(resp2) +---- diff --git a/docs/examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc b/docs/examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc new file mode 100644 index 000000000..46d369d7e --- /dev/null +++ b/docs/examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// inference/stream-inference.asciidoc:78 + +[source, python] +---- +resp = client.inference.stream_inference( + task_type="completion", + inference_id="openai-completion", + body={ + "input": "What is Elastic?" + }, +) +print(resp) +---- diff --git a/docs/examples/4bef98a2dac575a50ee0783c2269f1db.asciidoc b/docs/examples/4bef98a2dac575a50ee0783c2269f1db.asciidoc index 2058b87cf..3a7b0bcd6 100644 --- a/docs/examples/4bef98a2dac575a50ee0783c2269f1db.asciidoc +++ b/docs/examples/4bef98a2dac575a50ee0783c2269f1db.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/dense-vector.asciidoc:469 +// mapping/types/dense-vector.asciidoc:500 [source, python] ---- diff --git a/docs/examples/4c9350ed09b28f00e297ebe73c3b95a2.asciidoc b/docs/examples/4c9350ed09b28f00e297ebe73c3b95a2.asciidoc index f8f432fcb..df7d893d3 100644 --- a/docs/examples/4c9350ed09b28f00e297ebe73c3b95a2.asciidoc +++ b/docs/examples/4c9350ed09b28f00e297ebe73c3b95a2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-elasticsearch.asciidoc:144 +// inference/service-elasticsearch.asciidoc:195 [source, python] ---- diff --git a/docs/examples/4c95d54b32df4dc49e9762b6c1ae2c05.asciidoc b/docs/examples/4c95d54b32df4dc49e9762b6c1ae2c05.asciidoc index 20d5098b5..da1513b91 100644 --- a/docs/examples/4c95d54b32df4dc49e9762b6c1ae2c05.asciidoc +++ b/docs/examples/4c95d54b32df4dc49e9762b6c1ae2c05.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/text.asciidoc:356 +// mapping/types/text.asciidoc:368 [source, python] ---- diff --git a/docs/examples/4ca15672fc5ab1d80a127d086b6d2837.asciidoc b/docs/examples/4ca15672fc5ab1d80a127d086b6d2837.asciidoc index 11b34e97f..77d2d7733 100644 --- a/docs/examples/4ca15672fc5ab1d80a127d086b6d2837.asciidoc +++ b/docs/examples/4ca15672fc5ab1d80a127d086b6d2837.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/allocation-explain.asciidoc:443 +// cluster/allocation-explain.asciidoc:447 [source, python] ---- diff --git a/docs/examples/4da0cb8693e9ceceee2ba3b558014bbf.asciidoc b/docs/examples/4da0cb8693e9ceceee2ba3b558014bbf.asciidoc new file mode 100644 index 000000000..7ea6ba0c0 --- /dev/null +++ b/docs/examples/4da0cb8693e9ceceee2ba3b558014bbf.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// connector/docs/connectors-sharepoint-online.asciidoc:934 + +[source, python] +---- +resp = client.update_by_query( + index="INDEX_NAME", + conflicts="proceed", + query={ + "bool": { + "filter": [ + { + "match": { + "object_type": "drive_item" + } + }, + { + "exists": { + "field": "file" + } + }, + { + "range": { + "lastModifiedDateTime": { + "lte": "now-180d" + } + } + } + ] + } + }, + script={ + "source": "ctx._source.body = ''", + "lang": "painless" + }, +) +print(resp) +---- diff --git a/docs/examples/4dab4c5168047ba596af1beb0e55b845.asciidoc b/docs/examples/4dab4c5168047ba596af1beb0e55b845.asciidoc index 275db14c9..314cff18a 100644 --- a/docs/examples/4dab4c5168047ba596af1beb0e55b845.asciidoc +++ b/docs/examples/4dab4c5168047ba596af1beb0e55b845.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// migration/transient-settings-migration-guide.asciidoc:82 +// migration/migrate_9_0/transient-settings-migration-guide.asciidoc:82 [source, python] ---- diff --git a/docs/examples/a53ff77d83222c0e76453e630d64787e.asciidoc b/docs/examples/4dc151eebefd484a28aed1a175743364.asciidoc similarity index 92% rename from docs/examples/a53ff77d83222c0e76453e630d64787e.asciidoc rename to docs/examples/4dc151eebefd484a28aed1a175743364.asciidoc index 52aeed34e..37d5cc81d 100644 --- a/docs/examples/a53ff77d83222c0e76453e630d64787e.asciidoc +++ b/docs/examples/4dc151eebefd484a28aed1a175743364.asciidoc @@ -4,7 +4,7 @@ [source, python] ---- resp = client.ingest.put_pipeline( - id="openai_embeddings", + id="openai_embeddings_pipeline", processors=[ { "inference": { diff --git a/docs/examples/4e3414fc712b16311f9e433dd366f49d.asciidoc b/docs/examples/4e3414fc712b16311f9e433dd366f49d.asciidoc index 0335ed397..f39150a79 100644 --- a/docs/examples/4e3414fc712b16311f9e433dd366f49d.asciidoc +++ b/docs/examples/4e3414fc712b16311f9e433dd366f49d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/delete-inference.asciidoc:70 +// inference/delete-inference.asciidoc:64 [source, python] ---- diff --git a/docs/examples/4ed946065faa92f9950f04e402676a97.asciidoc b/docs/examples/4ed946065faa92f9950f04e402676a97.asciidoc index 47af87b3b..60d9e2c87 100644 --- a/docs/examples/4ed946065faa92f9950f04e402676a97.asciidoc +++ b/docs/examples/4ed946065faa92f9950f04e402676a97.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/info.asciidoc:200 +// rest-api/info.asciidoc:204 [source, python] ---- diff --git a/docs/examples/c9373ff5ed6b026173428fbb92ca2d9f.asciidoc b/docs/examples/4eeded40f30949e359714a5bb6c88612.asciidoc similarity index 87% rename from docs/examples/c9373ff5ed6b026173428fbb92ca2d9f.asciidoc rename to docs/examples/4eeded40f30949e359714a5bb6c88612.asciidoc index 8f1d14d52..b5bb3b50f 100644 --- a/docs/examples/c9373ff5ed6b026173428fbb92ca2d9f.asciidoc +++ b/docs/examples/4eeded40f30949e359714a5bb6c88612.asciidoc @@ -11,7 +11,7 @@ resp = client.reindex( }, dest={ "index": "elser-embeddings", - "pipeline": "elser_embeddings" + "pipeline": "elser_embeddings_pipeline" }, ) print(resp) diff --git a/docs/examples/4f6694ef147a73b1163bde3c13779d26.asciidoc b/docs/examples/4f6694ef147a73b1163bde3c13779d26.asciidoc new file mode 100644 index 000000000..0167e378e --- /dev/null +++ b/docs/examples/4f6694ef147a73b1163bde3c13779d26.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// troubleshooting/common-issues/rejected-requests.asciidoc:58 + +[source, python] +---- +resp = client.nodes.stats( + human=True, + filter_path="nodes.*.indexing_pressure", +) +print(resp) +---- diff --git a/docs/examples/4fcca1687d7b2cf08de526539fea5a76.asciidoc b/docs/examples/4fcca1687d7b2cf08de526539fea5a76.asciidoc index 49ed59db1..1abf2a2a7 100644 --- a/docs/examples/4fcca1687d7b2cf08de526539fea5a76.asciidoc +++ b/docs/examples/4fcca1687d7b2cf08de526539fea5a76.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/text-expansion-query.asciidoc:112 +// query-dsl/text-expansion-query.asciidoc:119 [source, python] ---- diff --git a/docs/examples/50d9c0508ddb0fc5ba5a893eec219dd8.asciidoc b/docs/examples/50d9c0508ddb0fc5ba5a893eec219dd8.asciidoc index 13c958321..98b732223 100644 --- a/docs/examples/50d9c0508ddb0fc5ba5a893eec219dd8.asciidoc +++ b/docs/examples/50d9c0508ddb0fc5ba5a893eec219dd8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/fields/synthetic-source.asciidoc:125 +// mapping/fields/synthetic-source.asciidoc:136 [source, python] ---- diff --git a/docs/examples/50ddf374cfa8128538ea092ee98b723d.asciidoc b/docs/examples/50ddf374cfa8128538ea092ee98b723d.asciidoc deleted file mode 100644 index e5951558a..000000000 --- a/docs/examples/50ddf374cfa8128538ea092ee98b723d.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/task-queue-backlog.asciidoc:45 - -[source, python] ----- -resp = client.tasks.list( - filter_path="nodes.*.tasks", -) -print(resp) ----- diff --git a/docs/examples/51b44224feee6e2e5974824334474c77.asciidoc b/docs/examples/51b44224feee6e2e5974824334474c77.asciidoc index 134b309bb..9aada93d3 100644 --- a/docs/examples/51b44224feee6e2e5974824334474c77.asciidoc +++ b/docs/examples/51b44224feee6e2e5974824334474c77.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/repository-s3.asciidoc:342 +// snapshot-restore/repository-s3.asciidoc:364 [source, python] ---- diff --git a/docs/examples/529671ffaf7cc75fe83a81d729788be4.asciidoc b/docs/examples/529671ffaf7cc75fe83a81d729788be4.asciidoc new file mode 100644 index 000000000..69a3e2868 --- /dev/null +++ b/docs/examples/529671ffaf7cc75fe83a81d729788be4.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// connector/docs/connectors-known-issues.asciidoc:124 + +[source, python] +---- +resp = client.update( + index=".elastic-connectors", + id="connector_id", + doc={ + "configuration": { + "field_a": { + "type": "str", + "value": "" + }, + "field_b": { + "type": "bool", + "value": False + }, + "field_c": { + "type": "int", + "value": 1 + }, + "field_d": { + "type": "list", + "value": "a,b" + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/52b71aa4ae6563abae78cd20ff06d1e9.asciidoc b/docs/examples/52b71aa4ae6563abae78cd20ff06d1e9.asciidoc index 8121426ba..2c3ecbaf6 100644 --- a/docs/examples/52b71aa4ae6563abae78cd20ff06d1e9.asciidoc +++ b/docs/examples/52b71aa4ae6563abae78cd20ff06d1e9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/hotspotting.asciidoc:142 +// troubleshooting/common-issues/hotspotting.asciidoc:144 [source, python] ---- diff --git a/docs/examples/52f4c5eb08d39f98e2e2f5527ece9731.asciidoc b/docs/examples/52f4c5eb08d39f98e2e2f5527ece9731.asciidoc index 4e861160e..282fd0579 100644 --- a/docs/examples/52f4c5eb08d39f98e2e2f5527ece9731.asciidoc +++ b/docs/examples/52f4c5eb08d39f98e2e2f5527ece9731.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-alibabacloud-ai-search.asciidoc:154 +// inference/service-alibabacloud-ai-search.asciidoc:204 [source, python] ---- diff --git a/docs/examples/533087d787b48878a0bf3fa8d0851b64.asciidoc b/docs/examples/533087d787b48878a0bf3fa8d0851b64.asciidoc deleted file mode 100644 index ee9fdaefd..000000000 --- a/docs/examples/533087d787b48878a0bf3fa8d0851b64.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// ingest/apis/delete-geoip-database.asciidoc:52 - -[source, python] ----- -resp = client.perform_request( - "DELETE", - "/_ingest/geoip/database/example-database-id", -) -print(resp) ----- diff --git a/docs/examples/5457c94f0039c6b95c7f9f305d0c6b58.asciidoc b/docs/examples/5457c94f0039c6b95c7f9f305d0c6b58.asciidoc index 95e608a13..81c231f99 100644 --- a/docs/examples/5457c94f0039c6b95c7f9f305d0c6b58.asciidoc +++ b/docs/examples/5457c94f0039c6b95c7f9f305d0c6b58.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/nodes-stats.asciidoc:2528 +// cluster/nodes-stats.asciidoc:2532 [source, python] ---- diff --git a/docs/examples/3a7a6ab88a49b484fafb10c8eb09b562.asciidoc b/docs/examples/548a9b6f447bb820380c1c23e57c18c3.asciidoc similarity index 92% rename from docs/examples/3a7a6ab88a49b484fafb10c8eb09b562.asciidoc rename to docs/examples/548a9b6f447bb820380c1c23e57c18c3.asciidoc index 3ee86992c..46022c5f3 100644 --- a/docs/examples/3a7a6ab88a49b484fafb10c8eb09b562.asciidoc +++ b/docs/examples/548a9b6f447bb820380c1c23e57c18c3.asciidoc @@ -4,7 +4,7 @@ [source, python] ---- resp = client.ingest.put_pipeline( - id="cohere_embeddings", + id="cohere_embeddings_pipeline", processors=[ { "inference": { diff --git a/docs/examples/54a47b5d07e7bfbea75c77f35eaae18d.asciidoc b/docs/examples/54a47b5d07e7bfbea75c77f35eaae18d.asciidoc new file mode 100644 index 000000000..474cc58ef --- /dev/null +++ b/docs/examples/54a47b5d07e7bfbea75c77f35eaae18d.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// connector/docs/connectors-known-issues.asciidoc:77 + +[source, python] +---- +resp = client.indices.put_mapping( + index=".elastic-connectors-sync-jobs-v1", + properties={ + "job_type": { + "type": "keyword" + } + }, +) +print(resp) +---- diff --git a/docs/examples/55085e6a2891040b6ac696561d0787c8.asciidoc b/docs/examples/55085e6a2891040b6ac696561d0787c8.asciidoc new file mode 100644 index 000000000..7e29fa416 --- /dev/null +++ b/docs/examples/55085e6a2891040b6ac696561d0787c8.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// mapping/types/passthrough.asciidoc:93 + +[source, python] +---- +resp = client.indices.create( + index="my-index-000002", + mappings={ + "properties": { + "attributes": { + "type": "passthrough", + "priority": 10, + "properties": { + "id": { + "type": "keyword" + } + } + }, + "resource.attributes": { + "type": "passthrough", + "priority": 20, + "properties": { + "id": { + "type": "keyword" + } + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/b468d0124dc485385a34504d5b7af82a.asciidoc b/docs/examples/551467688d8c701315d0a371850a4056.asciidoc similarity index 85% rename from docs/examples/b468d0124dc485385a34504d5b7af82a.asciidoc rename to docs/examples/551467688d8c701315d0a371850a4056.asciidoc index a613d0c3d..6cd149a4a 100644 --- a/docs/examples/b468d0124dc485385a34504d5b7af82a.asciidoc +++ b/docs/examples/551467688d8c701315d0a371850a4056.asciidoc @@ -11,7 +11,7 @@ resp = client.reindex( }, dest={ "index": "hugging-face-embeddings", - "pipeline": "hugging_face_embeddings" + "pipeline": "hugging_face_embeddings_pipeline" }, ) print(resp) diff --git a/docs/examples/97f260817b60f3deb7f7034d7dee7e12.asciidoc b/docs/examples/551799fef2f86e393db83a967e4a30d1.asciidoc similarity index 75% rename from docs/examples/97f260817b60f3deb7f7034d7dee7e12.asciidoc rename to docs/examples/551799fef2f86e393db83a967e4a30d1.asciidoc index 09cf33165..e2b93fb61 100644 --- a/docs/examples/97f260817b60f3deb7f7034d7dee7e12.asciidoc +++ b/docs/examples/551799fef2f86e393db83a967e4a30d1.asciidoc @@ -1,14 +1,20 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/aggregate-metric-double.asciidoc:267 +// mapping/types/aggregate-metric-double.asciidoc:264 [source, python] ---- resp = client.indices.create( index="idx", + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, mappings={ - "_source": { - "mode": "synthetic" - }, "properties": { "agg_metric": { "type": "aggregate_metric_double", diff --git a/docs/examples/1b60ad542abb511cbd926ac8c55b609c.asciidoc b/docs/examples/565386eee0951865a684e41fab53b40c.asciidoc similarity index 84% rename from docs/examples/1b60ad542abb511cbd926ac8c55b609c.asciidoc rename to docs/examples/565386eee0951865a684e41fab53b40c.asciidoc index 4e3a57eb3..449bbfbe6 100644 --- a/docs/examples/1b60ad542abb511cbd926ac8c55b609c.asciidoc +++ b/docs/examples/565386eee0951865a684e41fab53b40c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-elser.asciidoc:141 +// inference/service-elser.asciidoc:162 [source, python] ---- @@ -13,7 +13,8 @@ resp = client.inference.put( "enabled": True, "min_number_of_allocations": 3, "max_number_of_allocations": 10 - } + }, + "num_threads": 1 } }, ) diff --git a/docs/examples/5db5349162a4fbe74bffb646926a2495.asciidoc b/docs/examples/56da252798b8e7b006738428aa1a7f4c.asciidoc similarity index 67% rename from docs/examples/5db5349162a4fbe74bffb646926a2495.asciidoc rename to docs/examples/56da252798b8e7b006738428aa1a7f4c.asciidoc index 43631c2cb..f53776159 100644 --- a/docs/examples/5db5349162a4fbe74bffb646926a2495.asciidoc +++ b/docs/examples/56da252798b8e7b006738428aa1a7f4c.asciidoc @@ -1,14 +1,20 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/range.asciidoc:357 +// mapping/types/range.asciidoc:373 [source, python] ---- resp = client.indices.create( index="idx", + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, mappings={ - "_source": { - "mode": "synthetic" - }, "properties": { "my_range": { "type": "long_range" diff --git a/docs/examples/57e0bbab98f17d5b564d1ea146a55fe4.asciidoc b/docs/examples/57e0bbab98f17d5b564d1ea146a55fe4.asciidoc index 79a2d5fcf..398c41932 100644 --- a/docs/examples/57e0bbab98f17d5b564d1ea146a55fe4.asciidoc +++ b/docs/examples/57e0bbab98f17d5b564d1ea146a55fe4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-index-template.asciidoc:218 +// indices/put-index-template.asciidoc:221 [source, python] ---- diff --git a/docs/examples/584f502cf840134f2db5f39e2483ced1.asciidoc b/docs/examples/584f502cf840134f2db5f39e2483ced1.asciidoc index 925fc1eb0..cb0cd262e 100644 --- a/docs/examples/584f502cf840134f2db5f39e2483ced1.asciidoc +++ b/docs/examples/584f502cf840134f2db5f39e2483ced1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// analysis/analyzers/lang-analyzer.asciidoc:1453 +// analysis/analyzers/lang-analyzer.asciidoc:1454 [source, python] ---- diff --git a/docs/examples/58dd26afc919722e21358c91e112b27a.asciidoc b/docs/examples/58dd26afc919722e21358c91e112b27a.asciidoc new file mode 100644 index 000000000..a96a8752d --- /dev/null +++ b/docs/examples/58dd26afc919722e21358c91e112b27a.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/full-text-filtering-tutorial.asciidoc:459 + +[source, python] +---- +resp = client.search( + index="cooking_blog", + query={ + "range": { + "date": { + "gte": "2023-05-01", + "lte": "2023-05-31" + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/58f6b72009512851843c7b7a20e9504a.asciidoc b/docs/examples/58f6b72009512851843c7b7a20e9504a.asciidoc new file mode 100644 index 000000000..3fd25391d --- /dev/null +++ b/docs/examples/58f6b72009512851843c7b7a20e9504a.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// mapping/types/semantic-text.asciidoc:42 + +[source, python] +---- +resp = client.indices.create( + index="my-index-000002", + mappings={ + "properties": { + "inference_field": { + "type": "semantic_text", + "inference_id": "my-elser-endpoint-for-ingest", + "search_inference_id": "my-elser-endpoint-for-search" + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/59d736a4d064ed2013c7ead8e32e0998.asciidoc b/docs/examples/59d736a4d064ed2013c7ead8e32e0998.asciidoc index f825daf85..e0e4ba3c9 100644 --- a/docs/examples/59d736a4d064ed2013c7ead8e32e0998.asciidoc +++ b/docs/examples/59d736a4d064ed2013c7ead8e32e0998.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-openai.asciidoc:137 +// inference/service-openai.asciidoc:157 [source, python] ---- diff --git a/docs/examples/804cdf477ec829740e3d045140400c3b.asciidoc b/docs/examples/5a70db31f587b7ffed5e9bc1445430cb.asciidoc similarity index 86% rename from docs/examples/804cdf477ec829740e3d045140400c3b.asciidoc rename to docs/examples/5a70db31f587b7ffed5e9bc1445430cb.asciidoc index 61607f892..157fc4da6 100644 --- a/docs/examples/804cdf477ec829740e3d045140400c3b.asciidoc +++ b/docs/examples/5a70db31f587b7ffed5e9bc1445430cb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/semantic-search-semantic-text.asciidoc:68 +// search/search-your-data/semantic-text-hybrid-search:56 [source, python] ---- diff --git a/docs/examples/5b2a13366bd4e1ab4b25d04d360570dc.asciidoc b/docs/examples/5b2a13366bd4e1ab4b25d04d360570dc.asciidoc index 01a6a6ea8..e47c2638d 100644 --- a/docs/examples/5b2a13366bd4e1ab4b25d04d360570dc.asciidoc +++ b/docs/examples/5b2a13366bd4e1ab4b25d04d360570dc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-component-template.asciidoc:252 +// indices/put-component-template.asciidoc:255 [source, python] ---- diff --git a/docs/examples/5bba213a7f543190139d1a69ab2ed076.asciidoc b/docs/examples/5bba213a7f543190139d1a69ab2ed076.asciidoc new file mode 100644 index 000000000..02874e925 --- /dev/null +++ b/docs/examples/5bba213a7f543190139d1a69ab2ed076.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// esql/esql-across-clusters.asciidoc:301 + +[source, python] +---- +resp = client.esql.async_query( + format="json", + body={ + "query": "\n FROM cluster_one:my-index*,cluster_two:logs*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ", + "include_ccs_metadata": True + }, +) +print(resp) +---- diff --git a/docs/examples/5ceb734e3affe00e2cdc29af748d95bf.asciidoc b/docs/examples/5ceb734e3affe00e2cdc29af748d95bf.asciidoc new file mode 100644 index 000000000..b873d1d6e --- /dev/null +++ b/docs/examples/5ceb734e3affe00e2cdc29af748d95bf.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// inference/inference-apis.asciidoc:98 + +[source, python] +---- +resp = client.inference.put( + task_type="sparse_embedding", + inference_id="small_chunk_size", + inference_config={ + "service": "elasticsearch", + "service_settings": { + "num_allocations": 1, + "num_threads": 1 + }, + "chunking_settings": { + "strategy": "sentence", + "max_chunk_size": 100, + "sentence_overlap": 0 + } + }, +) +print(resp) +---- diff --git a/docs/examples/0bef1fdefeb2956d60d52d3f38397cad.asciidoc b/docs/examples/5cf12cc4f98d98dc79bead7e6556679c.asciidoc similarity index 55% rename from docs/examples/0bef1fdefeb2956d60d52d3f38397cad.asciidoc rename to docs/examples/5cf12cc4f98d98dc79bead7e6556679c.asciidoc index 8cedec16e..25d500e11 100644 --- a/docs/examples/0bef1fdefeb2956d60d52d3f38397cad.asciidoc +++ b/docs/examples/5cf12cc4f98d98dc79bead7e6556679c.asciidoc @@ -5,9 +5,13 @@ ---- resp = client.indices.create( index="idx", - mappings={ - "_source": { - "mode": "synthetic" + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } } }, ) diff --git a/docs/examples/1d827ae674970692643ea81991e5396e.asciidoc b/docs/examples/5daf8ede198be9b118da5bee9896cb00.asciidoc similarity index 75% rename from docs/examples/1d827ae674970692643ea81991e5396e.asciidoc rename to docs/examples/5daf8ede198be9b118da5bee9896cb00.asciidoc index faaa8e48a..ddad0eab9 100644 --- a/docs/examples/1d827ae674970692643ea81991e5396e.asciidoc +++ b/docs/examples/5daf8ede198be9b118da5bee9896cb00.asciidoc @@ -1,14 +1,20 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/flattened.asciidoc:334 +// mapping/types/flattened.asciidoc:333 [source, python] ---- resp = client.indices.create( index="idx", + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, mappings={ - "_source": { - "mode": "synthetic" - }, "properties": { "flattened": { "type": "flattened" diff --git a/docs/examples/5deeed427f35cbaee4b8ddc45002a9d7.asciidoc b/docs/examples/5deeed427f35cbaee4b8ddc45002a9d7.asciidoc index a5c9ef373..5b7e50ced 100644 --- a/docs/examples/5deeed427f35cbaee4b8ddc45002a9d7.asciidoc +++ b/docs/examples/5deeed427f35cbaee4b8ddc45002a9d7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/bulk-delete-roles.asciidoc:72 +// rest-api/security/bulk-delete-roles.asciidoc:71 [source, python] ---- diff --git a/docs/examples/4b113c7f475cfe484a150ddbb8e6c5c7.asciidoc b/docs/examples/5e021307d331a4483a5aa2198168451b.asciidoc similarity index 61% rename from docs/examples/4b113c7f475cfe484a150ddbb8e6c5c7.asciidoc rename to docs/examples/5e021307d331a4483a5aa2198168451b.asciidoc index c50d2f691..1c6919af5 100644 --- a/docs/examples/4b113c7f475cfe484a150ddbb8e6c5c7.asciidoc +++ b/docs/examples/5e021307d331a4483a5aa2198168451b.asciidoc @@ -1,10 +1,10 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/create-roles.asciidoc:170 +// rest-api/security/create-roles.asciidoc:183 [source, python] ---- resp = client.security.put_role( - name="role_with_remote_indices", + name="only_remote_access_role", remote_indices=[ { "clusters": [ @@ -20,6 +20,16 @@ resp = client.security.put_role( ] } ], + remote_cluster=[ + { + "clusters": [ + "my_remote" + ], + "privileges": [ + "monitor_stats" + ] + } + ], ) print(resp) ---- diff --git a/docs/examples/60d3f9a99cc91b43aaa7524a9a74dba0.asciidoc b/docs/examples/60d3f9a99cc91b43aaa7524a9a74dba0.asciidoc new file mode 100644 index 000000000..bf47c1f20 --- /dev/null +++ b/docs/examples/60d3f9a99cc91b43aaa7524a9a74dba0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// troubleshooting/common-issues/rejected-requests.asciidoc:43 + +[source, python] +---- +resp = client.nodes.stats( + metric="breaker", +) +print(resp) +---- diff --git a/docs/examples/62d3c8fccb11471bdc12555c1a7777f2.asciidoc b/docs/examples/62d3c8fccb11471bdc12555c1a7777f2.asciidoc index 2c535f041..e772c2097 100644 --- a/docs/examples/62d3c8fccb11471bdc12555c1a7777f2.asciidoc +++ b/docs/examples/62d3c8fccb11471bdc12555c1a7777f2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/fields/synthetic-source.asciidoc:89 +// mapping/fields/synthetic-source.asciidoc:100 [source, python] ---- diff --git a/docs/examples/6329fb2840a4373ff6d342f2653247cb.asciidoc b/docs/examples/6329fb2840a4373ff6d342f2653247cb.asciidoc new file mode 100644 index 000000000..d5b3150a1 --- /dev/null +++ b/docs/examples/6329fb2840a4373ff6d342f2653247cb.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/getting-started.asciidoc:299 + +[source, python] +---- +resp = client.indices.get_mapping( + index="books", +) +print(resp) +---- diff --git a/docs/examples/636ee2066450605247ec1f68d04b8ee4.asciidoc b/docs/examples/636ee2066450605247ec1f68d04b8ee4.asciidoc index f80d11e67..bf1dc4f69 100644 --- a/docs/examples/636ee2066450605247ec1f68d04b8ee4.asciidoc +++ b/docs/examples/636ee2066450605247ec1f68d04b8ee4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/runtime.asciidoc:1467 +// mapping/runtime.asciidoc:1465 [source, python] ---- diff --git a/docs/examples/63a53fcb0717ae9033a679cbfc932851.asciidoc b/docs/examples/63a53fcb0717ae9033a679cbfc932851.asciidoc new file mode 100644 index 000000000..15c170aba --- /dev/null +++ b/docs/examples/63a53fcb0717ae9033a679cbfc932851.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// inference/service-alibabacloud-ai-search.asciidoc:168 + +[source, python] +---- +resp = client.inference.put( + task_type="completion", + inference_id="alibabacloud_ai_search_completion", + inference_config={ + "service": "alibabacloud-ai-search", + "service_settings": { + "host": "default-j01.platform-cn-shanghai.opensearch.aliyuncs.com", + "api_key": "{{API_KEY}}", + "service_id": "ops-qwen-turbo", + "workspace": "default" + } + }, +) +print(resp) +---- diff --git a/docs/examples/63bf3480627a89b4b4ede4150e1d6bc0.asciidoc b/docs/examples/63bf3480627a89b4b4ede4150e1d6bc0.asciidoc index 5560f6733..c7d05b973 100644 --- a/docs/examples/63bf3480627a89b4b4ede4150e1d6bc0.asciidoc +++ b/docs/examples/63bf3480627a89b4b4ede4150e1d6bc0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/bulk-create-roles.asciidoc:106 +// rest-api/security/bulk-create-roles.asciidoc:119 [source, python] ---- diff --git a/docs/examples/640621cea39cdeeb76fbc95bff31a18d.asciidoc b/docs/examples/640621cea39cdeeb76fbc95bff31a18d.asciidoc index 7d20c7a29..34660de39 100644 --- a/docs/examples/640621cea39cdeeb76fbc95bff31a18d.asciidoc +++ b/docs/examples/640621cea39cdeeb76fbc95bff31a18d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-last-sync-api.asciidoc:115 +// connector/apis/update-connector-last-sync-api.asciidoc:116 [source, python] ---- diff --git a/docs/examples/640dbeecb736bd25f6f2b392b76a7531.asciidoc b/docs/examples/640dbeecb736bd25f6f2b392b76a7531.asciidoc new file mode 100644 index 000000000..2df817d3f --- /dev/null +++ b/docs/examples/640dbeecb736bd25f6f2b392b76a7531.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// cluster/stats.asciidoc:1902 + +[source, python] +---- +resp = client.cluster.stats( + include_remotes=True, +) +print(resp) +---- diff --git a/docs/examples/643e19c3b6ac1134554dd890e2249c2b.asciidoc b/docs/examples/643e19c3b6ac1134554dd890e2249c2b.asciidoc index 8a0fb1610..0c085cc7e 100644 --- a/docs/examples/643e19c3b6ac1134554dd890e2249c2b.asciidoc +++ b/docs/examples/643e19c3b6ac1134554dd890e2249c2b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/logs.asciidoc:26 +// data-streams/logs.asciidoc:18 [source, python] ---- diff --git a/docs/examples/64a79861225553799b26e118d7851dcc.asciidoc b/docs/examples/64a79861225553799b26e118d7851dcc.asciidoc index 13d42948d..d9c6bb56c 100644 --- a/docs/examples/64a79861225553799b26e118d7851dcc.asciidoc +++ b/docs/examples/64a79861225553799b26e118d7851dcc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ilm/error-handling.asciidoc:57 +// ilm/error-handling.asciidoc:60 [source, python] ---- diff --git a/docs/examples/6606d46685d10377b996b5f20f1229b5.asciidoc b/docs/examples/6606d46685d10377b996b5f20f1229b5.asciidoc index 9e0132018..793512aa2 100644 --- a/docs/examples/6606d46685d10377b996b5f20f1229b5.asciidoc +++ b/docs/examples/6606d46685d10377b996b5f20f1229b5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-index-name-api.asciidoc:75 +// connector/apis/update-connector-index-name-api.asciidoc:76 [source, python] ---- diff --git a/docs/examples/67154a4837cf996a9a9c3e61d6e9d1b3.asciidoc b/docs/examples/67154a4837cf996a9a9c3e61d6e9d1b3.asciidoc deleted file mode 100644 index 647b80bda..000000000 --- a/docs/examples/67154a4837cf996a9a9c3e61d6e9d1b3.asciidoc +++ /dev/null @@ -1,15 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// migration/migrate_8_0/migrate_to_java_time.asciidoc:289 - -[source, python] ----- -resp = client.reindex( - source={ - "index": "my-index-000001" - }, - dest={ - "index": "my-index-000002" - }, -) -print(resp) ----- diff --git a/docs/examples/67bab07fda27ef77e3bc948211051a33.asciidoc b/docs/examples/67bab07fda27ef77e3bc948211051a33.asciidoc index d7b22ab98..e7072b9dc 100644 --- a/docs/examples/67bab07fda27ef77e3bc948211051a33.asciidoc +++ b/docs/examples/67bab07fda27ef77e3bc948211051a33.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/hotspotting.asciidoc:154 +// troubleshooting/common-issues/hotspotting.asciidoc:156 [source, python] ---- diff --git a/docs/examples/69541f0bb81ab3797926bb2a00607cda.asciidoc b/docs/examples/69541f0bb81ab3797926bb2a00607cda.asciidoc new file mode 100644 index 000000000..72ee5b9f0 --- /dev/null +++ b/docs/examples/69541f0bb81ab3797926bb2a00607cda.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// search/retriever.asciidoc:512 + +[source, python] +---- +resp = client.inference.put( + task_type="rerank", + inference_id="my-msmarco-minilm-model", + inference_config={ + "service": "elasticsearch", + "service_settings": { + "num_allocations": 1, + "num_threads": 1, + "model_id": "cross-encoder__ms-marco-minilm-l-6-v2" + } + }, +) +print(resp) +---- diff --git a/docs/examples/69ab708fe65a75f870223d2289c3d171.asciidoc b/docs/examples/69ab708fe65a75f870223d2289c3d171.asciidoc index 342a15224..9bacae628 100644 --- a/docs/examples/69ab708fe65a75f870223d2289c3d171.asciidoc +++ b/docs/examples/69ab708fe65a75f870223d2289c3d171.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/processors/redact.asciidoc:106 +// ingest/processors/redact.asciidoc:107 [source, python] ---- diff --git a/docs/examples/a69c7c3412af73758f629e76263063b5.asciidoc b/docs/examples/6b6e275efe3d2aafe0fc3443f2c96868.asciidoc similarity index 84% rename from docs/examples/a69c7c3412af73758f629e76263063b5.asciidoc rename to docs/examples/6b6e275efe3d2aafe0fc3443f2c96868.asciidoc index 8ffb9afbe..c10420e51 100644 --- a/docs/examples/a69c7c3412af73758f629e76263063b5.asciidoc +++ b/docs/examples/6b6e275efe3d2aafe0fc3443f2c96868.asciidoc @@ -11,7 +11,7 @@ resp = client.reindex( }, dest={ "index": "google-vertex-ai-embeddings", - "pipeline": "google_vertex_ai_embeddings" + "pipeline": "google_vertex_ai_embeddings_pipeline" }, ) print(resp) diff --git a/docs/examples/6b8c5c8145c287c4fc535fa57ccf95a7.asciidoc b/docs/examples/6b8c5c8145c287c4fc535fa57ccf95a7.asciidoc index 0b217472d..5afe00179 100644 --- a/docs/examples/6b8c5c8145c287c4fc535fa57ccf95a7.asciidoc +++ b/docs/examples/6b8c5c8145c287c4fc535fa57ccf95a7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/list-connector-sync-jobs-api.asciidoc:64 +// connector/apis/list-connector-sync-jobs-api.asciidoc:65 [source, python] ---- diff --git a/docs/examples/10535507a9735fcf06600444b9067d4c.asciidoc b/docs/examples/6cb1dae368c945ecf7c9ec332a5743a2.asciidoc similarity index 74% rename from docs/examples/10535507a9735fcf06600444b9067d4c.asciidoc rename to docs/examples/6cb1dae368c945ecf7c9ec332a5743a2.asciidoc index 475eb922f..553747119 100644 --- a/docs/examples/10535507a9735fcf06600444b9067d4c.asciidoc +++ b/docs/examples/6cb1dae368c945ecf7c9ec332a5743a2.asciidoc @@ -1,14 +1,20 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/text.asciidoc:184 +// mapping/types/text.asciidoc:180 [source, python] ---- resp = client.indices.create( index="idx", + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, mappings={ - "_source": { - "mode": "synthetic" - }, "properties": { "text": { "type": "text", diff --git a/docs/examples/6db118771354792646229e7a3c30c7e9.asciidoc b/docs/examples/6db118771354792646229e7a3c30c7e9.asciidoc index ad36969c7..26dd39276 100644 --- a/docs/examples/6db118771354792646229e7a3c30c7e9.asciidoc +++ b/docs/examples/6db118771354792646229e7a3c30c7e9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/runtime.asciidoc:993 +// mapping/runtime.asciidoc:991 [source, python] ---- diff --git a/docs/examples/6dcd3916679f6aa64f79524c75991ebd.asciidoc b/docs/examples/6dcd3916679f6aa64f79524c75991ebd.asciidoc index 10fcf33a0..3e1bbdaa0 100644 --- a/docs/examples/6dcd3916679f6aa64f79524c75991ebd.asciidoc +++ b/docs/examples/6dcd3916679f6aa64f79524c75991ebd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-rest.asciidoc:245 +// esql/esql-rest.asciidoc:247 [source, python] ---- diff --git a/docs/examples/6ddd4e657efbf45def430a6419825796.asciidoc b/docs/examples/6ddd4e657efbf45def430a6419825796.asciidoc index b7861383a..f3e419d1f 100644 --- a/docs/examples/6ddd4e657efbf45def430a6419825796.asciidoc +++ b/docs/examples/6ddd4e657efbf45def430a6419825796.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-azure-ai-studio.asciidoc:159 +// inference/service-azure-ai-studio.asciidoc:179 [source, python] ---- diff --git a/docs/examples/6e6b78e6b689a5d6aa637271b6d084e2.asciidoc b/docs/examples/6e6b78e6b689a5d6aa637271b6d084e2.asciidoc new file mode 100644 index 000000000..798e0b46a --- /dev/null +++ b/docs/examples/6e6b78e6b689a5d6aa637271b6d084e2.asciidoc @@ -0,0 +1,52 @@ +// This file is autogenerated, DO NOT EDIT +// search/retriever.asciidoc:328 + +[source, python] +---- +resp = client.search( + index="movies", + retriever={ + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "sparse_vector": { + "field": "plot_embedding", + "inference_id": "my-elser-model", + "query": "films that explore psychological depths" + } + } + } + }, + { + "standard": { + "query": { + "multi_match": { + "query": "crime", + "fields": [ + "plot", + "title" + ] + } + } + } + }, + { + "knn": { + "field": "vector", + "query_vector": [ + 10, + 22, + 77 + ], + "k": 10, + "num_candidates": 10 + } + } + ] + } + }, +) +print(resp) +---- diff --git a/docs/examples/6f8bdca97e43aac75e32de655aa4314a.asciidoc b/docs/examples/6f8bdca97e43aac75e32de655aa4314a.asciidoc new file mode 100644 index 000000000..31751614e --- /dev/null +++ b/docs/examples/6f8bdca97e43aac75e32de655aa4314a.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// connector/docs/connectors-API-tutorial.asciidoc:450 + +[source, python] +---- +resp = client.connector.delete( + connector_id="my-connector-id&delete_sync_jobs=true", +) +print(resp) +---- diff --git a/docs/examples/7163346755400594d1dd7e445aa19ff0.asciidoc b/docs/examples/7163346755400594d1dd7e445aa19ff0.asciidoc new file mode 100644 index 000000000..914d14320 --- /dev/null +++ b/docs/examples/7163346755400594d1dd7e445aa19ff0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// connector/docs/connectors-API-tutorial.asciidoc:426 + +[source, python] +---- +resp = client.search( + index="music", +) +print(resp) +---- diff --git a/docs/examples/8bf51fd50195b46bacbf872f460ebec2.asciidoc b/docs/examples/71998bb300ac2a58419b0772cdc1c586.asciidoc similarity index 70% rename from docs/examples/8bf51fd50195b46bacbf872f460ebec2.asciidoc rename to docs/examples/71998bb300ac2a58419b0772cdc1c586.asciidoc index 550e81a85..0b9cdbc9f 100644 --- a/docs/examples/8bf51fd50195b46bacbf872f460ebec2.asciidoc +++ b/docs/examples/71998bb300ac2a58419b0772cdc1c586.asciidoc @@ -1,14 +1,20 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/version.asciidoc:86 +// mapping/types/version.asciidoc:85 [source, python] ---- resp = client.indices.create( index="idx", + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, mappings={ - "_source": { - "mode": "synthetic" - }, "properties": { "versions": { "type": "version" diff --git a/docs/examples/71c629c44bf3c542a0daacbfc253c4b0.asciidoc b/docs/examples/71c629c44bf3c542a0daacbfc253c4b0.asciidoc index 4644d9566..bd0b9c3ca 100644 --- a/docs/examples/71c629c44bf3c542a0daacbfc253c4b0.asciidoc +++ b/docs/examples/71c629c44bf3c542a0daacbfc253c4b0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/stats.asciidoc:1812 +// cluster/stats.asciidoc:1895 [source, python] ---- diff --git a/docs/examples/72ae3851160fcf02b8e2cdfd4e57d238.asciidoc b/docs/examples/72ae3851160fcf02b8e2cdfd4e57d238.asciidoc index 570c50a35..55eef3143 100644 --- a/docs/examples/72ae3851160fcf02b8e2cdfd4e57d238.asciidoc +++ b/docs/examples/72ae3851160fcf02b8e2cdfd4e57d238.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ilm/apis/start.asciidoc:75 +// tab-widgets/troubleshooting/data/start-ilm.asciidoc:66 [source, python] ---- diff --git a/docs/examples/741180473ba526219578ad0422f4fe81.asciidoc b/docs/examples/741180473ba526219578ad0422f4fe81.asciidoc index bb11e7c3e..328eb727d 100644 --- a/docs/examples/741180473ba526219578ad0422f4fe81.asciidoc +++ b/docs/examples/741180473ba526219578ad0422f4fe81.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-features-api.asciidoc:90 +// connector/apis/update-connector-features-api.asciidoc:91 [source, python] ---- diff --git a/docs/examples/7429b16221fe741fd31b0584786dd0b0.asciidoc b/docs/examples/7429b16221fe741fd31b0584786dd0b0.asciidoc index a124435ab..e9d165184 100644 --- a/docs/examples/7429b16221fe741fd31b0584786dd0b0.asciidoc +++ b/docs/examples/7429b16221fe741fd31b0584786dd0b0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/post-inference.asciidoc:248 +// inference/post-inference.asciidoc:243 [source, python] ---- diff --git a/docs/examples/744aeb2af40f519e430e21e004e3c3b7.asciidoc b/docs/examples/744aeb2af40f519e430e21e004e3c3b7.asciidoc index 8cf092fe3..73bd36aa9 100644 --- a/docs/examples/744aeb2af40f519e430e21e004e3c3b7.asciidoc +++ b/docs/examples/744aeb2af40f519e430e21e004e3c3b7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/multivalued-fields.asciidoc:91 +// esql/multivalued-fields.asciidoc:97 [source, python] ---- diff --git a/docs/examples/a4ec42130f3c75fc9d1d5f7cb6222cd5.asciidoc b/docs/examples/745864ef2427188241a4702b94ea57be.asciidoc similarity index 94% rename from docs/examples/a4ec42130f3c75fc9d1d5f7cb6222cd5.asciidoc rename to docs/examples/745864ef2427188241a4702b94ea57be.asciidoc index 8f5158ccc..f2b580fb5 100644 --- a/docs/examples/a4ec42130f3c75fc9d1d5f7cb6222cd5.asciidoc +++ b/docs/examples/745864ef2427188241a4702b94ea57be.asciidoc @@ -11,7 +11,7 @@ resp = client.search( "filter": { "range": { "price": { - "to": "500" + "lte": "500" } } } diff --git a/docs/examples/746e0a1cb5984f2672963b363505c7b3.asciidoc b/docs/examples/746e0a1cb5984f2672963b363505c7b3.asciidoc index 265bb0fb3..5c56c2f3f 100644 --- a/docs/examples/746e0a1cb5984f2672963b363505c7b3.asciidoc +++ b/docs/examples/746e0a1cb5984f2672963b363505c7b3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/date.asciidoc:189 +// mapping/types/date.asciidoc:188 [source, python] ---- diff --git a/docs/examples/7471e97aaaf21c3a200abdd89f15c3cc.asciidoc b/docs/examples/7471e97aaaf21c3a200abdd89f15c3cc.asciidoc index 63f567ecd..471f96714 100644 --- a/docs/examples/7471e97aaaf21c3a200abdd89f15c3cc.asciidoc +++ b/docs/examples/7471e97aaaf21c3a200abdd89f15c3cc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/intervals-query.asciidoc:312 +// query-dsl/intervals-query.asciidoc:383 [source, python] ---- diff --git a/docs/examples/35b686d9d9e915d0dea7a4251781767d.asciidoc b/docs/examples/750ac969f9a05567f5cdf4f93d6244b6.asciidoc similarity index 95% rename from docs/examples/35b686d9d9e915d0dea7a4251781767d.asciidoc rename to docs/examples/750ac969f9a05567f5cdf4f93d6244b6.asciidoc index 02508226f..455c5f508 100644 --- a/docs/examples/35b686d9d9e915d0dea7a4251781767d.asciidoc +++ b/docs/examples/750ac969f9a05567f5cdf4f93d6244b6.asciidoc @@ -4,7 +4,6 @@ [source, python] ---- resp = client.cluster.reroute( - metric="none", commands=[ { "allocate_empty_primary": { diff --git a/docs/examples/7594a9a85c8511701e281974cbc253e1.asciidoc b/docs/examples/7594a9a85c8511701e281974cbc253e1.asciidoc index 1425a25e1..3e348d90c 100644 --- a/docs/examples/7594a9a85c8511701e281974cbc253e1.asciidoc +++ b/docs/examples/7594a9a85c8511701e281974cbc253e1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// tab-widgets/inference-api/infer-api-task.asciidoc:228 +// tab-widgets/inference-api/infer-api-task.asciidoc:236 [source, python] ---- diff --git a/docs/examples/76c73b54f3f1e5cb1c0fcccd7c3fd18e.asciidoc b/docs/examples/76c73b54f3f1e5cb1c0fcccd7c3fd18e.asciidoc new file mode 100644 index 000000000..de9e27866 --- /dev/null +++ b/docs/examples/76c73b54f3f1e5cb1c0fcccd7c3fd18e.asciidoc @@ -0,0 +1,87 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/ingest-vectors.asciidoc:86 + +[source, python] +---- +resp = client.bulk( + operations=[ + { + "index": { + "_index": "amazon-reviews", + "_id": "2" + } + }, + { + "review_text": "This product is amazing! I love it.", + "review_vector": [ + 0.1, + 0.2, + 0.3, + 0.4, + 0.5, + 0.6, + 0.7, + 0.8 + ] + }, + { + "index": { + "_index": "amazon-reviews", + "_id": "3" + } + }, + { + "review_text": "This product is terrible. I hate it.", + "review_vector": [ + 0.8, + 0.7, + 0.6, + 0.5, + 0.4, + 0.3, + 0.2, + 0.1 + ] + }, + { + "index": { + "_index": "amazon-reviews", + "_id": "4" + } + }, + { + "review_text": "This product is great. I can do anything with it.", + "review_vector": [ + 0.1, + 0.2, + 0.3, + 0.4, + 0.5, + 0.6, + 0.7, + 0.8 + ] + }, + { + "index": { + "_index": "amazon-reviews", + "_id": "5" + } + }, + { + "review_text": "This product has ruined my life and the lives of my family and friends.", + "review_vector": [ + 0.8, + 0.7, + 0.6, + 0.5, + 0.4, + 0.3, + 0.2, + 0.1 + ] + } + ], +) +print(resp) +---- diff --git a/docs/examples/77082b1ffaae9ac52dfc133fa597baa7.asciidoc b/docs/examples/77082b1ffaae9ac52dfc133fa597baa7.asciidoc new file mode 100644 index 000000000..c2b612309 --- /dev/null +++ b/docs/examples/77082b1ffaae9ac52dfc133fa597baa7.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/full-text-filtering-tutorial.asciidoc:241 + +[source, python] +---- +resp = client.search( + index="cooking_blog", + query={ + "match": { + "description": { + "query": "fluffy pancakes", + "operator": "and" + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/7709a48020a6cefbbe547fb944541cdb.asciidoc b/docs/examples/7709a48020a6cefbbe547fb944541cdb.asciidoc index 10db24405..7e83448bc 100644 --- a/docs/examples/7709a48020a6cefbbe547fb944541cdb.asciidoc +++ b/docs/examples/7709a48020a6cefbbe547fb944541cdb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/dense-vector.asciidoc:392 +// mapping/types/dense-vector.asciidoc:423 [source, python] ---- diff --git a/docs/examples/7752b677825523bfb0c38ad9325a6d47.asciidoc b/docs/examples/7752b677825523bfb0c38ad9325a6d47.asciidoc index 3d71c943a..75baf555d 100644 --- a/docs/examples/7752b677825523bfb0c38ad9325a6d47.asciidoc +++ b/docs/examples/7752b677825523bfb0c38ad9325a6d47.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/delete-connector-api.asciidoc:69 +// connector/apis/delete-connector-api.asciidoc:70 [source, python] ---- diff --git a/docs/examples/776b553df0e507c96dbdbaedecaca0cc.asciidoc b/docs/examples/776b553df0e507c96dbdbaedecaca0cc.asciidoc index 3794702c4..95f751ddd 100644 --- a/docs/examples/776b553df0e507c96dbdbaedecaca0cc.asciidoc +++ b/docs/examples/776b553df0e507c96dbdbaedecaca0cc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/infer-trained-model.asciidoc:880 +// ml/trained-models/apis/infer-trained-model.asciidoc:981 [source, python] ---- diff --git a/docs/examples/77b90f6787195767b6da60d8532714b4.asciidoc b/docs/examples/77b90f6787195767b6da60d8532714b4.asciidoc index 5590d6b84..8e749b938 100644 --- a/docs/examples/77b90f6787195767b6da60d8532714b4.asciidoc +++ b/docs/examples/77b90f6787195767b6da60d8532714b4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-azure-openai.asciidoc:121 +// inference/service-azure-openai.asciidoc:141 [source, python] ---- diff --git a/docs/examples/77d0780c5faea4c9ec51a322a6811b3b.asciidoc b/docs/examples/77d0780c5faea4c9ec51a322a6811b3b.asciidoc index 80d2beda2..35a1df0e3 100644 --- a/docs/examples/77d0780c5faea4c9ec51a322a6811b3b.asciidoc +++ b/docs/examples/77d0780c5faea4c9ec51a322a6811b3b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/runtime.asciidoc:1311 +// mapping/runtime.asciidoc:1309 [source, python] ---- diff --git a/docs/examples/78176cd6f570e1534bb40b19e6e900b6.asciidoc b/docs/examples/78176cd6f570e1534bb40b19e6e900b6.asciidoc index 62ff4fba8..e0ae96d2b 100644 --- a/docs/examples/78176cd6f570e1534bb40b19e6e900b6.asciidoc +++ b/docs/examples/78176cd6f570e1534bb40b19e6e900b6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/alias.asciidoc:87 +// cat/alias.asciidoc:85 [source, python] ---- diff --git a/docs/examples/84490ee2c6c07dbd2101ce2e3751e1aa.asciidoc b/docs/examples/7888c509774a2abfe82ca370c43d8789.asciidoc similarity index 86% rename from docs/examples/84490ee2c6c07dbd2101ce2e3751e1aa.asciidoc rename to docs/examples/7888c509774a2abfe82ca370c43d8789.asciidoc index d25d4f72b..9856bdaa3 100644 --- a/docs/examples/84490ee2c6c07dbd2101ce2e3751e1aa.asciidoc +++ b/docs/examples/7888c509774a2abfe82ca370c43d8789.asciidoc @@ -11,7 +11,7 @@ resp = client.reindex( }, dest={ "index": "cohere-embeddings", - "pipeline": "cohere_embeddings" + "pipeline": "cohere_embeddings_pipeline" }, ) print(resp) diff --git a/docs/examples/79ff4e7fa5c004226d05d7e2bfb5dc1e.asciidoc b/docs/examples/79ff4e7fa5c004226d05d7e2bfb5dc1e.asciidoc new file mode 100644 index 000000000..ac63f53fb --- /dev/null +++ b/docs/examples/79ff4e7fa5c004226d05d7e2bfb5dc1e.asciidoc @@ -0,0 +1,51 @@ +// This file is autogenerated, DO NOT EDIT +// mapping/types/passthrough.asciidoc:134 + +[source, python] +---- +resp = client.indices.put_index_template( + name="my-metrics", + index_patterns=[ + "metrics-mymetrics-*" + ], + priority=200, + data_stream={}, + template={ + "settings": { + "index.mode": "time_series" + }, + "mappings": { + "properties": { + "attributes": { + "type": "passthrough", + "priority": 10, + "time_series_dimension": True, + "properties": { + "host.name": { + "type": "keyword" + } + } + }, + "cpu": { + "type": "integer", + "time_series_metric": "counter" + } + } + } + }, +) +print(resp) + +resp1 = client.index( + index="metrics-mymetrics-test", + document={ + "@timestamp": "2020-01-01T00:00:00.000Z", + "attributes": { + "host.name": "foo", + "zone": "bar" + }, + "cpu": 10 + }, +) +print(resp1) +---- diff --git a/docs/examples/7a0eb2222fe282d3aab66e12feff2a3b.asciidoc b/docs/examples/7a0eb2222fe282d3aab66e12feff2a3b.asciidoc index 77928735e..a829a70e7 100644 --- a/docs/examples/7a0eb2222fe282d3aab66e12feff2a3b.asciidoc +++ b/docs/examples/7a0eb2222fe282d3aab66e12feff2a3b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/runtime.asciidoc:834 +// mapping/runtime.asciidoc:832 [source, python] ---- diff --git a/docs/examples/7a27336a61284d079f3cc3994cf927d1.asciidoc b/docs/examples/7a27336a61284d079f3cc3994cf927d1.asciidoc new file mode 100644 index 000000000..5115db85e --- /dev/null +++ b/docs/examples/7a27336a61284d079f3cc3994cf927d1.asciidoc @@ -0,0 +1,58 @@ +// This file is autogenerated, DO NOT EDIT +// connector/docs/dls-overview.asciidoc:283 + +[source, python] +---- +resp = client.security.create_api_key( + name="my-api-key", + role_descriptors={ + "role-source1": { + "indices": [ + { + "names": [ + "source1" + ], + "privileges": [ + "read" + ], + "query": { + "template": { + "params": { + "access_control": [ + "example.user@example.com", + "source1-user-group" + ] + } + }, + "source": "..." + } + } + ] + }, + "role-source2": { + "indices": [ + { + "names": [ + "source2" + ], + "privileges": [ + "read" + ], + "query": { + "template": { + "params": { + "access_control": [ + "example.user@example.com", + "source2-user-group" + ] + } + }, + "source": "..." + } + } + ] + } + }, +) +print(resp) +---- diff --git a/docs/examples/7a32f44a1511ecb0d3f0b0ff2aca5c44.asciidoc b/docs/examples/7a32f44a1511ecb0d3f0b0ff2aca5c44.asciidoc deleted file mode 100644 index 984cd88e4..000000000 --- a/docs/examples/7a32f44a1511ecb0d3f0b0ff2aca5c44.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// migration/migrate_8_0/migrate_to_java_time.asciidoc:305 - -[source, python] ----- -resp = client.indices.update_aliases( - actions=[ - { - "remove": { - "index": "my-index-000001", - "alias": "my-index" - } - }, - { - "add": { - "index": "my-index-000002", - "alias": "my-index" - } - } - ], -) -print(resp) ----- diff --git a/docs/examples/7af1f62b0cf496cbf593d83d30b472cc.asciidoc b/docs/examples/7af1f62b0cf496cbf593d83d30b472cc.asciidoc new file mode 100644 index 000000000..b98d052b9 --- /dev/null +++ b/docs/examples/7af1f62b0cf496cbf593d83d30b472cc.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// connector/docs/connectors-API-tutorial.asciidoc:226 + +[source, python] +---- +resp = client.security.create_api_key( + name="music-connector", + role_descriptors={ + "music-connector-role": { + "cluster": [ + "monitor", + "manage_connector" + ], + "indices": [ + { + "names": [ + "music", + ".search-acl-filter-music", + ".elastic-connectors*" + ], + "privileges": [ + "all" + ], + "allow_restricted_indices": False + } + ] + } + }, +) +print(resp) +---- diff --git a/docs/examples/7b9691bd34a02dd859562eb927f175e0.asciidoc b/docs/examples/7b9691bd34a02dd859562eb927f175e0.asciidoc new file mode 100644 index 000000000..0fd593aaf --- /dev/null +++ b/docs/examples/7b9691bd34a02dd859562eb927f175e0.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// inference/service-elasticsearch.asciidoc:132 + +[source, python] +---- +resp = client.inference.put( + task_type="sparse_embedding", + inference_id="my-elser-model", + inference_config={ + "service": "elasticsearch", + "service_settings": { + "adaptive_allocations": { + "enabled": True, + "min_number_of_allocations": 1, + "max_number_of_allocations": 10 + }, + "num_threads": 1, + "model_id": ".elser_model_2" + } + }, +) +print(resp) +---- diff --git a/docs/examples/7c8f207e43115ea8f20d2298be5aaebc.asciidoc b/docs/examples/7c8f207e43115ea8f20d2298be5aaebc.asciidoc deleted file mode 100644 index 106984537..000000000 --- a/docs/examples/7c8f207e43115ea8f20d2298be5aaebc.asciidoc +++ /dev/null @@ -1,39 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// ingest/apis/simulate-ingest.asciidoc:56 - -[source, python] ----- -resp = client.simulate.ingest( - body={ - "docs": [ - { - "_index": "my-index", - "_id": "id", - "_source": { - "foo": "bar" - } - }, - { - "_index": "my-index", - "_id": "id", - "_source": { - "foo": "rab" - } - } - ], - "pipeline_substitutions": { - "my-pipeline": { - "processors": [ - { - "set": { - "field": "field3", - "value": "value3" - } - } - ] - } - } - }, -) -print(resp) ----- diff --git a/docs/examples/f3574cfee3971d98417b8dc574a91be0.asciidoc b/docs/examples/7d3a74fe0ba3fe95d1c3275365ff9315.asciidoc similarity index 76% rename from docs/examples/f3574cfee3971d98417b8dc574a91be0.asciidoc rename to docs/examples/7d3a74fe0ba3fe95d1c3275365ff9315.asciidoc index 0d23bbd06..c812891e4 100644 --- a/docs/examples/f3574cfee3971d98417b8dc574a91be0.asciidoc +++ b/docs/examples/7d3a74fe0ba3fe95d1c3275365ff9315.asciidoc @@ -1,14 +1,20 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/flattened.asciidoc:367 +// mapping/types/flattened.asciidoc:374 [source, python] ---- resp = client.indices.create( index="idx", + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, mappings={ - "_source": { - "mode": "synthetic" - }, "properties": { "flattened": { "type": "flattened" diff --git a/docs/examples/7db09cab02d71f3a10d91071216d80fc.asciidoc b/docs/examples/7db09cab02d71f3a10d91071216d80fc.asciidoc new file mode 100644 index 000000000..cd720284a --- /dev/null +++ b/docs/examples/7db09cab02d71f3a10d91071216d80fc.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/ingest-vectors.asciidoc:108 + +[source, python] +---- +resp = client.search( + index="amazon-reviews", + retriever={ + "knn": { + "field": "review_vector", + "query_vector": [ + 0.1, + 0.2, + 0.3, + 0.4, + 0.5, + 0.6, + 0.7, + 0.8 + ], + "k": 2, + "num_candidates": 5 + } + }, +) +print(resp) +---- diff --git a/docs/examples/7db798942cf2d334456e30ef5fcb801b.asciidoc b/docs/examples/7db798942cf2d334456e30ef5fcb801b.asciidoc new file mode 100644 index 000000000..b91fa55a9 --- /dev/null +++ b/docs/examples/7db798942cf2d334456e30ef5fcb801b.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/full-text-filtering-tutorial.asciidoc:161 + +[source, python] +---- +resp = client.search( + index="cooking_blog", + query={ + "match": { + "description": { + "query": "fluffy pancakes" + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/7f1fade93225f8cf6000b93334d76ce4.asciidoc b/docs/examples/7f1fade93225f8cf6000b93334d76ce4.asciidoc new file mode 100644 index 000000000..a2b9f8e5c --- /dev/null +++ b/docs/examples/7f1fade93225f8cf6000b93334d76ce4.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// ingest/processors/ip-location.asciidoc:188 + +[source, python] +---- +resp = client.ingest.put_pipeline( + id="ip_location", + description="Add ip geolocation info", + processors=[ + { + "ip_location": { + "field": "ip" + } + } + ], +) +print(resp) + +resp1 = client.index( + index="my-index-000001", + id="my_id", + pipeline="ip_location", + document={ + "ip": "80.231.5.0" + }, +) +print(resp1) + +resp2 = client.get( + index="my-index-000001", + id="my_id", +) +print(resp2) +---- diff --git a/docs/examples/7fde3ff91c4a2e7080444af37d5cd287.asciidoc b/docs/examples/7fde3ff91c4a2e7080444af37d5cd287.asciidoc index 1e29997a4..0c25a55f4 100644 --- a/docs/examples/7fde3ff91c4a2e7080444af37d5cd287.asciidoc +++ b/docs/examples/7fde3ff91c4a2e7080444af37d5cd287.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-rest.asciidoc:286 +// esql/esql-rest.asciidoc:288 [source, python] ---- diff --git a/docs/examples/981b331db1404b39c1a612a135e4e76d.asciidoc b/docs/examples/8080cd9e24a8785728ce7c372ec4acf1.asciidoc similarity index 50% rename from docs/examples/981b331db1404b39c1a612a135e4e76d.asciidoc rename to docs/examples/8080cd9e24a8785728ce7c372ec4acf1.asciidoc index 6f3574e63..f6290d1e7 100644 --- a/docs/examples/981b331db1404b39c1a612a135e4e76d.asciidoc +++ b/docs/examples/8080cd9e24a8785728ce7c372ec4acf1.asciidoc @@ -1,17 +1,14 @@ // This file is autogenerated, DO NOT EDIT -// ingest/apis/put-geoip-database.asciidoc:15 +// watcher/how-watcher-works.asciidoc:159 [source, python] ---- resp = client.perform_request( "PUT", - "/_ingest/geoip/database/my-database-id", + "/_watcher/settings", headers={"Content-Type": "application/json"}, body={ - "name": "GeoIP2-Domain", - "maxmind": { - "account_id": "1025402" - } + "index.routing.allocation.include.role": "watcher" }, ) print(resp) diff --git a/docs/examples/82844ef45e11c0eece100d3109db3182.asciidoc b/docs/examples/82844ef45e11c0eece100d3109db3182.asciidoc index 67b526d67..8886c646e 100644 --- a/docs/examples/82844ef45e11c0eece100d3109db3182.asciidoc +++ b/docs/examples/82844ef45e11c0eece100d3109db3182.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-amazon-bedrock.asciidoc:154 +// inference/service-amazon-bedrock.asciidoc:174 [source, python] ---- diff --git a/docs/examples/828f0045747fde4888a947bb99e190e3.asciidoc b/docs/examples/828f0045747fde4888a947bb99e190e3.asciidoc new file mode 100644 index 000000000..c14704b5a --- /dev/null +++ b/docs/examples/828f0045747fde4888a947bb99e190e3.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// search/retriever.asciidoc:601 + +[source, python] +---- +resp = client.search( + index="movies", + retriever={ + "rule": { + "match_criteria": { + "query_string": "harry potter" + }, + "ruleset_ids": [ + "my-ruleset" + ], + "retriever": { + "standard": { + "query": { + "query_string": { + "query": "harry potter" + } + } + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/9aa2327ae315c39f2bce2bd22e0deb1b.asciidoc b/docs/examples/8417d8d35ec5fc5665dfb2f95d6d1101.asciidoc similarity index 92% rename from docs/examples/9aa2327ae315c39f2bce2bd22e0deb1b.asciidoc rename to docs/examples/8417d8d35ec5fc5665dfb2f95d6d1101.asciidoc index f342a73d5..b772d4988 100644 --- a/docs/examples/9aa2327ae315c39f2bce2bd22e0deb1b.asciidoc +++ b/docs/examples/8417d8d35ec5fc5665dfb2f95d6d1101.asciidoc @@ -17,7 +17,7 @@ resp = client.search( { "range": { "result.execution_time": { - "from": "now-10s" + "gte": "now-10s" } } } diff --git a/docs/examples/84237aa9da49ab4b4c4e2b21d2548df2.asciidoc b/docs/examples/84237aa9da49ab4b4c4e2b21d2548df2.asciidoc index 17a2d2fcc..ca3a5b11e 100644 --- a/docs/examples/84237aa9da49ab4b4c4e2b21d2548df2.asciidoc +++ b/docs/examples/84237aa9da49ab4b4c4e2b21d2548df2.asciidoc @@ -3,9 +3,8 @@ [source, python] ---- -resp = client.snapshot.create( - repository="my_repository", - snapshot="_verify_integrity", +resp = client.snapshot.repository_verify_integrity( + name="my_repository", ) print(resp) ---- diff --git a/docs/examples/84243213614fe64930b1d430704afb29.asciidoc b/docs/examples/84243213614fe64930b1d430704afb29.asciidoc index 15574d907..ab64c7049 100644 --- a/docs/examples/84243213614fe64930b1d430704afb29.asciidoc +++ b/docs/examples/84243213614fe64930b1d430704afb29.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/runtime.asciidoc:1016 +// mapping/runtime.asciidoc:1014 [source, python] ---- diff --git a/docs/examples/84c69fb07050f0e89720007a6507a221.asciidoc b/docs/examples/84c69fb07050f0e89720007a6507a221.asciidoc index c46ae806d..e89a8c84b 100644 --- a/docs/examples/84c69fb07050f0e89720007a6507a221.asciidoc +++ b/docs/examples/84c69fb07050f0e89720007a6507a221.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/high-cpu-usage.asciidoc:95 +// troubleshooting/common-issues/high-cpu-usage.asciidoc:114 [source, python] ---- diff --git a/docs/examples/84ef9fe951c6d3caa7438238a5b23319.asciidoc b/docs/examples/84ef9fe951c6d3caa7438238a5b23319.asciidoc new file mode 100644 index 000000000..0ece7ba7c --- /dev/null +++ b/docs/examples/84ef9fe951c6d3caa7438238a5b23319.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/full-text-filtering-tutorial.asciidoc:487 + +[source, python] +---- +resp = client.search( + index="cooking_blog", + query={ + "term": { + "author.keyword": "Maria Rodriguez" + } + }, +) +print(resp) +---- diff --git a/docs/examples/853fc710cea79fb4e1a85fb6d149f9c5.asciidoc b/docs/examples/853fc710cea79fb4e1a85fb6d149f9c5.asciidoc new file mode 100644 index 000000000..70473d84b --- /dev/null +++ b/docs/examples/853fc710cea79fb4e1a85fb6d149f9c5.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// search/retriever.asciidoc:640 + +[source, python] +---- +resp = client.search( + index="movies", + retriever={ + "rule": { + "match_criteria": { + "query_string": "harry potter" + }, + "ruleset_ids": [ + "my-ruleset" + ], + "retriever": { + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "query_string": { + "query": "sorcerer's stone" + } + } + } + }, + { + "standard": { + "query": { + "query_string": { + "query": "chamber of secrets" + } + } + } + } + ] + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/8566f5ecf4ae14802ba63c8cc7c629f8.asciidoc b/docs/examples/8566f5ecf4ae14802ba63c8cc7c629f8.asciidoc index dfffb62b0..cf1c0fa6c 100644 --- a/docs/examples/8566f5ecf4ae14802ba63c8cc7c629f8.asciidoc +++ b/docs/examples/8566f5ecf4ae14802ba63c8cc7c629f8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// tab-widgets/inference-api/infer-api-task.asciidoc:208 +// tab-widgets/inference-api/infer-api-task.asciidoc:216 [source, python] ---- diff --git a/docs/examples/8593715fcc70315a0816b435551258e0.asciidoc b/docs/examples/8593715fcc70315a0816b435551258e0.asciidoc index d3df5ebad..743b90211 100644 --- a/docs/examples/8593715fcc70315a0816b435551258e0.asciidoc +++ b/docs/examples/8593715fcc70315a0816b435551258e0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/semantic-text.asciidoc:155 +// mapping/types/semantic-text.asciidoc:185 [source, python] ---- diff --git a/docs/examples/85f9fc6f98e8573efed9b034e853d5ae.asciidoc b/docs/examples/85f9fc6f98e8573efed9b034e853d5ae.asciidoc new file mode 100644 index 000000000..b7610f139 --- /dev/null +++ b/docs/examples/85f9fc6f98e8573efed9b034e853d5ae.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// inference/service-elasticsearch.asciidoc:248 + +[source, python] +---- +resp = client.inference.put( + task_type="sparse_embedding", + inference_id="use_existing_deployment", + inference_config={ + "service": "elasticsearch", + "service_settings": { + "deployment_id": ".elser_model_2" + } + }, +) +print(resp) +---- diff --git a/docs/examples/8619bd17bbfe33490b1f277007f654db.asciidoc b/docs/examples/8619bd17bbfe33490b1f277007f654db.asciidoc index 57c7e9c05..03d8b42f7 100644 --- a/docs/examples/8619bd17bbfe33490b1f277007f654db.asciidoc +++ b/docs/examples/8619bd17bbfe33490b1f277007f654db.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-cohere.asciidoc:188 +// inference/service-cohere.asciidoc:208 [source, python] ---- diff --git a/docs/examples/861f5f61409dc87f3671293b87839ff7.asciidoc b/docs/examples/861f5f61409dc87f3671293b87839ff7.asciidoc index b5cbc6f3d..23dd310c0 100644 --- a/docs/examples/861f5f61409dc87f3671293b87839ff7.asciidoc +++ b/docs/examples/861f5f61409dc87f3671293b87839ff7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/stats.asciidoc:1450 +// cluster/stats.asciidoc:1533 [source, python] ---- diff --git a/docs/examples/87457bb3467484bec3e9df4e25942ba6.asciidoc b/docs/examples/87457bb3467484bec3e9df4e25942ba6.asciidoc index e2e6764d2..484399a22 100644 --- a/docs/examples/87457bb3467484bec3e9df4e25942ba6.asciidoc +++ b/docs/examples/87457bb3467484bec3e9df4e25942ba6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/multivalued-fields.asciidoc:225 +// esql/multivalued-fields.asciidoc:269 [source, python] ---- diff --git a/docs/examples/113ac8466084ee6ac4ed272e342dc468.asciidoc b/docs/examples/894fce12d8f0d01e4c4083885a0c0077.asciidoc similarity index 86% rename from docs/examples/113ac8466084ee6ac4ed272e342dc468.asciidoc rename to docs/examples/894fce12d8f0d01e4c4083885a0c0077.asciidoc index dde6989d5..2b80551c1 100644 --- a/docs/examples/113ac8466084ee6ac4ed272e342dc468.asciidoc +++ b/docs/examples/894fce12d8f0d01e4c4083885a0c0077.asciidoc @@ -11,7 +11,7 @@ resp = client.reindex( }, dest={ "index": "mistral-embeddings", - "pipeline": "mistral_embeddings" + "pipeline": "mistral_embeddings_pipeline" }, ) print(resp) diff --git a/docs/examples/8a1b6eae4893c5dd27b3d81fd8d70f5b.asciidoc b/docs/examples/8a1b6eae4893c5dd27b3d81fd8d70f5b.asciidoc index f670391bd..8a6c13fbf 100644 --- a/docs/examples/8a1b6eae4893c5dd27b3d81fd8d70f5b.asciidoc +++ b/docs/examples/8a1b6eae4893c5dd27b3d81fd8d70f5b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/semantic-search-inference.asciidoc:89 +// search/search-your-data/semantic-search-elser.asciidoc:157 [source, python] ---- diff --git a/docs/examples/8b8b6aac2111b2d8b93758ac737e6543.asciidoc b/docs/examples/8b8b6aac2111b2d8b93758ac737e6543.asciidoc new file mode 100644 index 000000000..55f7d68aa --- /dev/null +++ b/docs/examples/8b8b6aac2111b2d8b93758ac737e6543.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// mapping/fields/synthetic-source.asciidoc:231 + +[source, python] +---- +resp = client.indices.create( + index="idx_keep", + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, + mappings={ + "properties": { + "path": { + "type": "object", + "synthetic_source_keep": "all" + }, + "ids": { + "type": "integer", + "synthetic_source_keep": "arrays" + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/8bf1e7a6d529547906ba8b1d6501fa0c.asciidoc b/docs/examples/8bf1e7a6d529547906ba8b1d6501fa0c.asciidoc index 74dc43129..1c44b7e5c 100644 --- a/docs/examples/8bf1e7a6d529547906ba8b1d6501fa0c.asciidoc +++ b/docs/examples/8bf1e7a6d529547906ba8b1d6501fa0c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/set-connector-sync-job-error-api.asciidoc:56 +// connector/apis/set-connector-sync-job-error-api.asciidoc:57 [source, python] ---- diff --git a/docs/examples/8c92c5e87facbae8dc4f58376ec21815.asciidoc b/docs/examples/8c92c5e87facbae8dc4f58376ec21815.asciidoc index 1d0d3cbd6..144aad3e1 100644 --- a/docs/examples/8c92c5e87facbae8dc4f58376ec21815.asciidoc +++ b/docs/examples/8c92c5e87facbae8dc4f58376ec21815.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/runtime.asciidoc:1040 +// mapping/runtime.asciidoc:1038 [source, python] ---- diff --git a/docs/examples/bdb671866e2f0195f8dfbdb7f20bf591.asciidoc b/docs/examples/8cad5d95a0e7c103f08be53d0b172558.asciidoc similarity index 56% rename from docs/examples/bdb671866e2f0195f8dfbdb7f20bf591.asciidoc rename to docs/examples/8cad5d95a0e7c103f08be53d0b172558.asciidoc index f398c9d7c..d27833994 100644 --- a/docs/examples/bdb671866e2f0195f8dfbdb7f20bf591.asciidoc +++ b/docs/examples/8cad5d95a0e7c103f08be53d0b172558.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/semantic-search-semantic-text.asciidoc:34 +// search/search-your-data/semantic-text-hybrid-search:20 [source, python] ---- @@ -9,7 +9,11 @@ resp = client.inference.put( inference_config={ "service": "elser", "service_settings": { - "num_allocations": 1, + "adaptive_allocations": { + "enabled": True, + "min_number_of_allocations": 3, + "max_number_of_allocations": 10 + }, "num_threads": 1 } }, diff --git a/docs/examples/8d05862be1f9e7edaba162b1888b5677.asciidoc b/docs/examples/8d05862be1f9e7edaba162b1888b5677.asciidoc new file mode 100644 index 000000000..0433809f5 --- /dev/null +++ b/docs/examples/8d05862be1f9e7edaba162b1888b5677.asciidoc @@ -0,0 +1,61 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/full-text-filtering-tutorial.asciidoc:50 + +[source, python] +---- +resp = client.indices.put_mapping( + index="cooking_blog", + properties={ + "title": { + "type": "text", + "analyzer": "standard", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "description": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "author": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "date": { + "type": "date", + "format": "yyyy-MM-dd" + }, + "category": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "tags": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "rating": { + "type": "float" + } + }, +) +print(resp) +---- diff --git a/docs/examples/8d9b04f2a97f4229dec9e620126de049.asciidoc b/docs/examples/8d9b04f2a97f4229dec9e620126de049.asciidoc index 846594fa0..e175422ed 100644 --- a/docs/examples/8d9b04f2a97f4229dec9e620126de049.asciidoc +++ b/docs/examples/8d9b04f2a97f4229dec9e620126de049.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/repository-s3.asciidoc:580 +// snapshot-restore/repository-s3.asciidoc:602 [source, python] ---- diff --git a/docs/examples/8de6fed6ba2b94ce6a12ce076be2b4d7.asciidoc b/docs/examples/8de6fed6ba2b94ce6a12ce076be2b4d7.asciidoc index 98d0c6ac7..30b9b54bb 100644 --- a/docs/examples/8de6fed6ba2b94ce6a12ce076be2b4d7.asciidoc +++ b/docs/examples/8de6fed6ba2b94ce6a12ce076be2b4d7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/segments.asciidoc:127 +// cat/segments.asciidoc:126 [source, python] ---- diff --git a/docs/examples/8e2bbef535fef688d397e60e09aefa7f.asciidoc b/docs/examples/8e2bbef535fef688d397e60e09aefa7f.asciidoc index b6b5fa22c..ad334ccf3 100644 --- a/docs/examples/8e2bbef535fef688d397e60e09aefa7f.asciidoc +++ b/docs/examples/8e2bbef535fef688d397e60e09aefa7f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/hotspotting.asciidoc:200 +// troubleshooting/common-issues/hotspotting.asciidoc:202 [source, python] ---- diff --git a/docs/examples/8e89fee0be6a436c4e3d7c152659c47e.asciidoc b/docs/examples/8e89fee0be6a436c4e3d7c152659c47e.asciidoc index 5d065d20d..30432d61e 100644 --- a/docs/examples/8e89fee0be6a436c4e3d7c152659c47e.asciidoc +++ b/docs/examples/8e89fee0be6a436c4e3d7c152659c47e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-scheduling-api.asciidoc:89 +// connector/apis/update-connector-scheduling-api.asciidoc:90 [source, python] ---- diff --git a/docs/examples/8f4a7f68f2ca3698abdf20026a2d8c5f.asciidoc b/docs/examples/8f4a7f68f2ca3698abdf20026a2d8c5f.asciidoc index 3567b8751..73ac49610 100644 --- a/docs/examples/8f4a7f68f2ca3698abdf20026a2d8c5f.asciidoc +++ b/docs/examples/8f4a7f68f2ca3698abdf20026a2d8c5f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/tasks.asciidoc:153 +// troubleshooting/common-issues/high-cpu-usage.asciidoc:77 [source, python] ---- diff --git a/docs/examples/90083d93e46fad2524755b8d4d1306fc.asciidoc b/docs/examples/90083d93e46fad2524755b8d4d1306fc.asciidoc index f2fd9f349..dfe32a284 100644 --- a/docs/examples/90083d93e46fad2524755b8d4d1306fc.asciidoc +++ b/docs/examples/90083d93e46fad2524755b8d4d1306fc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/set-connector-sync-job-stats-api.asciidoc:74 +// connector/apis/set-connector-sync-job-stats-api.asciidoc:75 [source, python] ---- diff --git a/docs/examples/9138550002cb26ab64918cce427963b8.asciidoc b/docs/examples/9138550002cb26ab64918cce427963b8.asciidoc index ce568686c..10a10eebc 100644 --- a/docs/examples/9138550002cb26ab64918cce427963b8.asciidoc +++ b/docs/examples/9138550002cb26ab64918cce427963b8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-index-template.asciidoc:268 +// indices/put-index-template.asciidoc:271 [source, python] ---- diff --git a/docs/examples/9169d19a80175ec94f80865d0f9bef4c.asciidoc b/docs/examples/9169d19a80175ec94f80865d0f9bef4c.asciidoc new file mode 100644 index 000000000..17091127e --- /dev/null +++ b/docs/examples/9169d19a80175ec94f80865d0f9bef4c.asciidoc @@ -0,0 +1,43 @@ +// This file is autogenerated, DO NOT EDIT +// search/retriever.asciidoc:279 + +[source, python] +---- +resp = client.search( + index="restaurants", + retriever={ + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "multi_match": { + "query": "Austria", + "fields": [ + "city", + "region" + ] + } + } + } + }, + { + "knn": { + "field": "vector", + "query_vector": [ + 10, + 22, + 77 + ], + "k": 10, + "num_candidates": 10 + } + } + ], + "rank_constant": 1, + "rank_window_size": 50 + } + }, +) +print(resp) +---- diff --git a/docs/examples/927b20a221f975b75d1227b67d0eb7e2.asciidoc b/docs/examples/927b20a221f975b75d1227b67d0eb7e2.asciidoc index 5c64ffb1a..5983ceb0a 100644 --- a/docs/examples/927b20a221f975b75d1227b67d0eb7e2.asciidoc +++ b/docs/examples/927b20a221f975b75d1227b67d0eb7e2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-rest.asciidoc:265 +// esql/esql-rest.asciidoc:267 [source, python] ---- diff --git a/docs/examples/92fa6608673cec5a2ed568a07e80d36b.asciidoc b/docs/examples/92fa6608673cec5a2ed568a07e80d36b.asciidoc index a109e86cc..d82a846d5 100644 --- a/docs/examples/92fa6608673cec5a2ed568a07e80d36b.asciidoc +++ b/docs/examples/92fa6608673cec5a2ed568a07e80d36b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/runtime.asciidoc:1551 +// mapping/runtime.asciidoc:1549 [source, python] ---- diff --git a/docs/examples/9313f534e1aa266cde7d4af74665497f.asciidoc b/docs/examples/9313f534e1aa266cde7d4af74665497f.asciidoc new file mode 100644 index 000000000..8be94a953 --- /dev/null +++ b/docs/examples/9313f534e1aa266cde7d4af74665497f.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// connector/docs/connectors-zoom.asciidoc:219 + +[source, python] +---- +resp = client.connector.put( + connector_id="my-{service-name-stub}-connector", + index_name="my-elasticsearch-index", + name="Content synced from {service-name}", + service_type="{service-name-stub}", +) +print(resp) +---- diff --git a/docs/examples/935566d5426d44ade486a49ec5289741.asciidoc b/docs/examples/935566d5426d44ade486a49ec5289741.asciidoc index 9b2c52446..920cadc2f 100644 --- a/docs/examples/935566d5426d44ade486a49ec5289741.asciidoc +++ b/docs/examples/935566d5426d44ade486a49ec5289741.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/semantic-search-semantic-text.asciidoc:128 +// search/search-your-data/semantic-text-hybrid-search:113 [source, python] ---- diff --git a/docs/examples/9382f022086c692ba05efb0acae65946.asciidoc b/docs/examples/9382f022086c692ba05efb0acae65946.asciidoc index e93dffce6..782786730 100644 --- a/docs/examples/9382f022086c692ba05efb0acae65946.asciidoc +++ b/docs/examples/9382f022086c692ba05efb0acae65946.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/fields/synthetic-source.asciidoc:59 +// mapping/fields/synthetic-source.asciidoc:70 [source, python] ---- diff --git a/docs/examples/95414139c7b1203e3c2d99a354415801.asciidoc b/docs/examples/95414139c7b1203e3c2d99a354415801.asciidoc index 711c56dca..54d9b2266 100644 --- a/docs/examples/95414139c7b1203e3c2d99a354415801.asciidoc +++ b/docs/examples/95414139c7b1203e3c2d99a354415801.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/recovery.asciidoc:84 +// cat/recovery.asciidoc:83 [source, python] ---- diff --git a/docs/examples/7e5bee18e61d950e823782da1b733903.asciidoc b/docs/examples/968fb5b92aa65af09544f7c002b0953e.asciidoc similarity index 87% rename from docs/examples/7e5bee18e61d950e823782da1b733903.asciidoc rename to docs/examples/968fb5b92aa65af09544f7c002b0953e.asciidoc index 5fee705ec..134fe03d9 100644 --- a/docs/examples/7e5bee18e61d950e823782da1b733903.asciidoc +++ b/docs/examples/968fb5b92aa65af09544f7c002b0953e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/semantic-search-semantic-text.asciidoc:173 +// search/search-your-data/semantic-search-semantic-text.asciidoc:185 [source, python] ---- @@ -7,7 +7,7 @@ resp = client.search( index="semantic-embeddings", query={ "semantic": { - "field": "semantic_text", + "field": "content", "query": "How to avoid muscle soreness while running?" } }, diff --git a/docs/examples/971fd23adb81bb5842c7750e0379336a.asciidoc b/docs/examples/971fd23adb81bb5842c7750e0379336a.asciidoc new file mode 100644 index 000000000..ffc1c6da8 --- /dev/null +++ b/docs/examples/971fd23adb81bb5842c7750e0379336a.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// search/retriever.asciidoc:528 + +[source, python] +---- +resp = client.search( + index="movies", + retriever={ + "text_similarity_reranker": { + "retriever": { + "standard": { + "query": { + "match": { + "genre": "drama" + } + } + } + }, + "field": "plot", + "inference_id": "my-msmarco-minilm-model", + "inference_text": "films that explore psychological depths" + } + }, +) +print(resp) +---- diff --git a/docs/examples/986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc b/docs/examples/986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc index 627cbc6bb..07f78c0cd 100644 --- a/docs/examples/986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc +++ b/docs/examples/986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-alibabacloud-ai-search.asciidoc:136 +// inference/service-alibabacloud-ai-search.asciidoc:222 [source, python] ---- diff --git a/docs/examples/99160b7c3c3fc1fac98aeb426dbcb3cb.asciidoc b/docs/examples/99160b7c3c3fc1fac98aeb426dbcb3cb.asciidoc index f50ea9075..c20218f95 100644 --- a/docs/examples/99160b7c3c3fc1fac98aeb426dbcb3cb.asciidoc +++ b/docs/examples/99160b7c3c3fc1fac98aeb426dbcb3cb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// scripting/fields.asciidoc:171 +// scripting/fields.asciidoc:244 [source, python] ---- diff --git a/docs/examples/99803d7b111b862c0c82e9908e549b16.asciidoc b/docs/examples/99803d7b111b862c0c82e9908e549b16.asciidoc index ceee23cce..3c30cc23d 100644 --- a/docs/examples/99803d7b111b862c0c82e9908e549b16.asciidoc +++ b/docs/examples/99803d7b111b862c0c82e9908e549b16.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-mistral.asciidoc:87 +// inference/service-mistral.asciidoc:107 [source, python] ---- diff --git a/docs/examples/998c8479c8704bca0e121d5969859517.asciidoc b/docs/examples/998c8479c8704bca0e121d5969859517.asciidoc new file mode 100644 index 000000000..c7501ec38 --- /dev/null +++ b/docs/examples/998c8479c8704bca0e121d5969859517.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// connector/docs/connectors-API-tutorial.asciidoc:417 + +[source, python] +---- +resp = client.count( + index="music", +) +print(resp) +---- diff --git a/docs/examples/9a05cc10eea1251e23b82a4549913536.asciidoc b/docs/examples/9a05cc10eea1251e23b82a4549913536.asciidoc index 29eb24242..fde02f78f 100644 --- a/docs/examples/9a05cc10eea1251e23b82a4549913536.asciidoc +++ b/docs/examples/9a05cc10eea1251e23b82a4549913536.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/hotspotting.asciidoc:102 +// troubleshooting/common-issues/hotspotting.asciidoc:104 [source, python] ---- diff --git a/docs/examples/9a203aae3e1412d919546276fb52a5ca.asciidoc b/docs/examples/9a203aae3e1412d919546276fb52a5ca.asciidoc index 57789d3dd..8c93ab505 100644 --- a/docs/examples/9a203aae3e1412d919546276fb52a5ca.asciidoc +++ b/docs/examples/9a203aae3e1412d919546276fb52a5ca.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-cohere.asciidoc:170 +// inference/service-cohere.asciidoc:190 [source, python] ---- diff --git a/docs/examples/9ad0864bcd665b63551e944653d32423.asciidoc b/docs/examples/9ad0864bcd665b63551e944653d32423.asciidoc new file mode 100644 index 000000000..a6d007b6a --- /dev/null +++ b/docs/examples/9ad0864bcd665b63551e944653d32423.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/semantic-text-hybrid-search:155 + +[source, python] +---- +resp = client.search( + index="semantic-embeddings", + retriever={ + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "match": { + "content": "How to avoid muscle soreness while running?" + } + } + } + }, + { + "standard": { + "query": { + "semantic": { + "field": "semantic_text", + "query": "How to avoid muscle soreness while running?" + } + } + } + } + ] + } + }, +) +print(resp) +---- diff --git a/docs/examples/9ae268058c0ea32ef8926568e011c728.asciidoc b/docs/examples/9ae268058c0ea32ef8926568e011c728.asciidoc index f0c74cd8a..d885d8879 100644 --- a/docs/examples/9ae268058c0ea32ef8926568e011c728.asciidoc +++ b/docs/examples/9ae268058c0ea32ef8926568e011c728.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-features-api.asciidoc:122 +// connector/apis/update-connector-features-api.asciidoc:123 [source, python] ---- diff --git a/docs/examples/2cd8439db5054c93c49f1bf50433e1bb.asciidoc b/docs/examples/9aedc45f83e022732789e8d796f5a43c.asciidoc similarity index 96% rename from docs/examples/2cd8439db5054c93c49f1bf50433e1bb.asciidoc rename to docs/examples/9aedc45f83e022732789e8d796f5a43c.asciidoc index 2b48fd10d..3d8050779 100644 --- a/docs/examples/2cd8439db5054c93c49f1bf50433e1bb.asciidoc +++ b/docs/examples/9aedc45f83e022732789e8d796f5a43c.asciidoc @@ -4,7 +4,6 @@ [source, python] ---- resp = client.cluster.reroute( - metric="none", commands=[ { "move": { diff --git a/docs/examples/9b0f34d122a4b348dc86df7410d6ebb6.asciidoc b/docs/examples/9b0f34d122a4b348dc86df7410d6ebb6.asciidoc index bb2afba6c..34e4efa64 100644 --- a/docs/examples/9b0f34d122a4b348dc86df7410d6ebb6.asciidoc +++ b/docs/examples/9b0f34d122a4b348dc86df7410d6ebb6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/cancel-connector-sync-job-api.asciidoc:50 +// connector/apis/cancel-connector-sync-job-api.asciidoc:51 [source, python] ---- diff --git a/docs/examples/9bd5a470ee6d2b4a1f5280adc39675d2.asciidoc b/docs/examples/9bd5a470ee6d2b4a1f5280adc39675d2.asciidoc new file mode 100644 index 000000000..404ca1548 --- /dev/null +++ b/docs/examples/9bd5a470ee6d2b4a1f5280adc39675d2.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// connector/docs/connectors-mysql.asciidoc:503 + +[source, python] +---- +resp = client.update( + index=".elastic-connectors", + id="connector_id", + doc={ + "configuration": { + "tables": { + "type": "list", + "value": "*" + }, + "ssl_enabled": { + "type": "bool", + "value": False + }, + "ssl_ca": { + "type": "str", + "value": "" + }, + "fetch_size": { + "type": "int", + "value": 50 + }, + "retry_count": { + "type": "int", + "value": 3 + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/9c021836acf7c0370e289f611325868d.asciidoc b/docs/examples/9c021836acf7c0370e289f611325868d.asciidoc index 9fc291d61..81f2363e6 100644 --- a/docs/examples/9c021836acf7c0370e289f611325868d.asciidoc +++ b/docs/examples/9c021836acf7c0370e289f611325868d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-configuration-api.asciidoc:308 +// connector/apis/update-connector-configuration-api.asciidoc:309 [source, python] ---- diff --git a/docs/examples/5b86d54900e2c4c043a54ca7ae2df0f0.asciidoc b/docs/examples/9c2ce0132e4527077443f007d27b1158.asciidoc similarity index 67% rename from docs/examples/5b86d54900e2c4c043a54ca7ae2df0f0.asciidoc rename to docs/examples/9c2ce0132e4527077443f007d27b1158.asciidoc index 31492b541..64eb514f0 100644 --- a/docs/examples/5b86d54900e2c4c043a54ca7ae2df0f0.asciidoc +++ b/docs/examples/9c2ce0132e4527077443f007d27b1158.asciidoc @@ -1,14 +1,20 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/flattened.asciidoc:407 +// mapping/types/flattened.asciidoc:422 [source, python] ---- resp = client.indices.create( index="idx", + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, mappings={ - "_source": { - "mode": "synthetic" - }, "properties": { "flattened": { "type": "flattened" diff --git a/docs/examples/9cb150d67dfa0947f29aa809bcc93c6e.asciidoc b/docs/examples/9cb150d67dfa0947f29aa809bcc93c6e.asciidoc index 6efd75246..ab22d2d4f 100644 --- a/docs/examples/9cb150d67dfa0947f29aa809bcc93c6e.asciidoc +++ b/docs/examples/9cb150d67dfa0947f29aa809bcc93c6e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// datatiers.asciidoc:241 +// datatiers.asciidoc:240 [source, python] ---- diff --git a/docs/examples/9cbb097e5498a9fde39e3b1d3b62a4d2.asciidoc b/docs/examples/9cbb097e5498a9fde39e3b1d3b62a4d2.asciidoc index 15d03e6ab..70dfa8833 100644 --- a/docs/examples/9cbb097e5498a9fde39e3b1d3b62a4d2.asciidoc +++ b/docs/examples/9cbb097e5498a9fde39e3b1d3b62a4d2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/infer-trained-model.asciidoc:945 +// ml/trained-models/apis/infer-trained-model.asciidoc:1046 [source, python] ---- diff --git a/docs/examples/9d396afad93782699d7a929578c85284.asciidoc b/docs/examples/9d396afad93782699d7a929578c85284.asciidoc index 5c4822152..e869a6a92 100644 --- a/docs/examples/9d396afad93782699d7a929578c85284.asciidoc +++ b/docs/examples/9d396afad93782699d7a929578c85284.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// tab-widgets/inference-api/infer-api-task.asciidoc:184 +// tab-widgets/inference-api/infer-api-task.asciidoc:192 [source, python] ---- diff --git a/docs/examples/9d66cb59711f24e6b4ff85608c9b5a1b.asciidoc b/docs/examples/9d66cb59711f24e6b4ff85608c9b5a1b.asciidoc new file mode 100644 index 000000000..dfe0eb762 --- /dev/null +++ b/docs/examples/9d66cb59711f24e6b4ff85608c9b5a1b.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// troubleshooting/common-issues/task-queue-backlog.asciidoc:51 + +[source, python] +---- +resp = client.tasks.list( + pretty=True, + human=True, + detailed=True, +) +print(resp) +---- diff --git a/docs/examples/9de4edafd22a8b9cb557632b2c8779cd.asciidoc b/docs/examples/9de4edafd22a8b9cb557632b2c8779cd.asciidoc index a05788985..f0d7cb7d0 100644 --- a/docs/examples/9de4edafd22a8b9cb557632b2c8779cd.asciidoc +++ b/docs/examples/9de4edafd22a8b9cb557632b2c8779cd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-rest.asciidoc:306 +// esql/esql-rest.asciidoc:308 [source, python] ---- diff --git a/docs/examples/9f16fca9813304e398ee052aa857dbcd.asciidoc b/docs/examples/9f16fca9813304e398ee052aa857dbcd.asciidoc index f56b0349b..ecef289d9 100644 --- a/docs/examples/9f16fca9813304e398ee052aa857dbcd.asciidoc +++ b/docs/examples/9f16fca9813304e398ee052aa857dbcd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-openai.asciidoc:121 +// inference/service-openai.asciidoc:141 [source, python] ---- diff --git a/docs/examples/9f3341489fefd38c4e439c29f6dcb86c.asciidoc b/docs/examples/9f3341489fefd38c4e439c29f6dcb86c.asciidoc index 76040c776..f73aece74 100644 --- a/docs/examples/9f3341489fefd38c4e439c29f6dcb86c.asciidoc +++ b/docs/examples/9f3341489fefd38c4e439c29f6dcb86c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/terms-set-query.asciidoc:214 +// query-dsl/terms-set-query.asciidoc:224 [source, python] ---- diff --git a/docs/examples/a1070cf2f5969d42d71cda057223f152.asciidoc b/docs/examples/a1070cf2f5969d42d71cda057223f152.asciidoc index 5c00da449..2de683ba9 100644 --- a/docs/examples/a1070cf2f5969d42d71cda057223f152.asciidoc +++ b/docs/examples/a1070cf2f5969d42d71cda057223f152.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// how-to/size-your-shards.asciidoc:247 +// how-to/size-your-shards.asciidoc:248 [source, python] ---- diff --git a/docs/examples/f38262ef72f73816ec35fa4c9c85760d.asciidoc b/docs/examples/a1b668795243398f5bc40bcc9bead884.asciidoc similarity index 81% rename from docs/examples/f38262ef72f73816ec35fa4c9c85760d.asciidoc rename to docs/examples/a1b668795243398f5bc40bcc9bead884.asciidoc index 9de3a3deb..56d50d1b9 100644 --- a/docs/examples/f38262ef72f73816ec35fa4c9c85760d.asciidoc +++ b/docs/examples/a1b668795243398f5bc40bcc9bead884.asciidoc @@ -5,10 +5,16 @@ ---- resp = client.indices.create( index="idx", + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, mappings={ - "_source": { - "mode": "synthetic" - }, "properties": { "my_range": { "type": "long_range" diff --git a/docs/examples/a1dda7e7c01be96a4acf7b725d70385f.asciidoc b/docs/examples/a1dda7e7c01be96a4acf7b725d70385f.asciidoc new file mode 100644 index 000000000..a603b404b --- /dev/null +++ b/docs/examples/a1dda7e7c01be96a4acf7b725d70385f.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// search/retriever.asciidoc:448 + +[source, python] +---- +resp = client.search( + index="index", + retriever={ + "text_similarity_reranker": { + "retriever": { + "standard": { + "query": { + "match_phrase": { + "text": "landmark in Paris" + } + } + } + }, + "field": "text", + "inference_id": "my-cohere-rerank-model", + "inference_text": "Most famous landmark in Paris", + "rank_window_size": 100, + "min_score": 0.5 + } + }, +) +print(resp) +---- diff --git a/docs/examples/a2b2ce031120dac49b5120b26eea8758.asciidoc b/docs/examples/a2b2ce031120dac49b5120b26eea8758.asciidoc index d90984516..4a33279d1 100644 --- a/docs/examples/a2b2ce031120dac49b5120b26eea8758.asciidoc +++ b/docs/examples/a2b2ce031120dac49b5120b26eea8758.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/indices.asciidoc:109 +// cat/indices.asciidoc:113 [source, python] ---- diff --git a/docs/examples/a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc b/docs/examples/a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc new file mode 100644 index 000000000..67acc59e9 --- /dev/null +++ b/docs/examples/a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc @@ -0,0 +1,32 @@ +// This file is autogenerated, DO NOT EDIT +// search/retriever.asciidoc:144 + +[source, python] +---- +resp = client.search( + index="restaurants", + retriever={ + "standard": { + "query": { + "bool": { + "should": [ + { + "match": { + "region": "Austria" + } + } + ], + "filter": [ + { + "term": { + "year": "2019" + } + } + ] + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/a3779f21f132787c48681bfb50453592.asciidoc b/docs/examples/a3779f21f132787c48681bfb50453592.asciidoc new file mode 100644 index 000000000..6b0436b24 --- /dev/null +++ b/docs/examples/a3779f21f132787c48681bfb50453592.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// ingest/processors/ip-location.asciidoc:85 + +[source, python] +---- +resp = client.ingest.put_pipeline( + id="ip_location", + description="Add ip geolocation info", + processors=[ + { + "ip_location": { + "field": "ip" + } + } + ], +) +print(resp) + +resp1 = client.index( + index="my-index-000001", + id="my_id", + pipeline="ip_location", + document={ + "ip": "89.160.20.128" + }, +) +print(resp1) + +resp2 = client.get( + index="my-index-000001", + id="my_id", +) +print(resp2) +---- diff --git a/docs/examples/a4a3c3cd09efa75168dab90105afb2e9.asciidoc b/docs/examples/a4a3c3cd09efa75168dab90105afb2e9.asciidoc index cc4db4e1d..bcd4b081f 100644 --- a/docs/examples/a4a3c3cd09efa75168dab90105afb2e9.asciidoc +++ b/docs/examples/a4a3c3cd09efa75168dab90105afb2e9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/get-inference.asciidoc:73 +// inference/get-inference.asciidoc:68 [source, python] ---- diff --git a/docs/examples/a4ee2214d621bcfaf768c46d21325958.asciidoc b/docs/examples/a4ee2214d621bcfaf768c46d21325958.asciidoc index 7c2c97f72..b00e53429 100644 --- a/docs/examples/a4ee2214d621bcfaf768c46d21325958.asciidoc +++ b/docs/examples/a4ee2214d621bcfaf768c46d21325958.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// tab-widgets/inference-api/infer-api-task.asciidoc:66 +// tab-widgets/inference-api/infer-api-task.asciidoc:74 [source, python] ---- diff --git a/docs/examples/a594f05459d9eecc8050c73fc8da336f.asciidoc b/docs/examples/a594f05459d9eecc8050c73fc8da336f.asciidoc index c714c6bd4..60d534ece 100644 --- a/docs/examples/a594f05459d9eecc8050c73fc8da336f.asciidoc +++ b/docs/examples/a594f05459d9eecc8050c73fc8da336f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// tab-widgets/inference-api/infer-api-task.asciidoc:121 +// tab-widgets/inference-api/infer-api-task.asciidoc:129 [source, python] ---- diff --git a/docs/examples/14a49c13c399840e64c00b487aa820c9.asciidoc b/docs/examples/a5aeb2c8bdf91f6146026ec8edc476b6.asciidoc similarity index 67% rename from docs/examples/14a49c13c399840e64c00b487aa820c9.asciidoc rename to docs/examples/a5aeb2c8bdf91f6146026ec8edc476b6.asciidoc index 733cdfc77..48329183c 100644 --- a/docs/examples/14a49c13c399840e64c00b487aa820c9.asciidoc +++ b/docs/examples/a5aeb2c8bdf91f6146026ec8edc476b6.asciidoc @@ -1,14 +1,20 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/date_nanos.asciidoc:160 +// mapping/types/date_nanos.asciidoc:155 [source, python] ---- resp = client.indices.create( index="idx", + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, mappings={ - "_source": { - "mode": "synthetic" - }, "properties": { "date": { "type": "date_nanos" diff --git a/docs/examples/a75765e3fb130421dde6c3c2f12e8acb.asciidoc b/docs/examples/a75765e3fb130421dde6c3c2f12e8acb.asciidoc index 4642ae26e..aff792fea 100644 --- a/docs/examples/a75765e3fb130421dde6c3c2f12e8acb.asciidoc +++ b/docs/examples/a75765e3fb130421dde6c3c2f12e8acb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/claim-connector-sync-job-api.asciidoc:62 +// connector/apis/claim-connector-sync-job-api.asciidoc:63 [source, python] ---- diff --git a/docs/examples/82eff1d681a5d0d1538ef011bb32ab9a.asciidoc b/docs/examples/a769d696bf12f5e9de4b3250646d250c.asciidoc similarity index 83% rename from docs/examples/82eff1d681a5d0d1538ef011bb32ab9a.asciidoc rename to docs/examples/a769d696bf12f5e9de4b3250646d250c.asciidoc index 87c70a6f3..dff590d07 100644 --- a/docs/examples/82eff1d681a5d0d1538ef011bb32ab9a.asciidoc +++ b/docs/examples/a769d696bf12f5e9de4b3250646d250c.asciidoc @@ -11,7 +11,7 @@ resp = client.reindex( }, dest={ "index": "alibabacloud-ai-search-embeddings", - "pipeline": "alibabacloud_ai_search_embeddings" + "pipeline": "alibabacloud_ai_search_embeddings_pipeline" }, ) print(resp) diff --git a/docs/examples/2826510e4aeb1c0d8dc43d317ed7624a.asciidoc b/docs/examples/a7d814caf2a995d2aeadecc3495011be.asciidoc similarity index 67% rename from docs/examples/2826510e4aeb1c0d8dc43d317ed7624a.asciidoc rename to docs/examples/a7d814caf2a995d2aeadecc3495011be.asciidoc index 088c4262a..5801198e8 100644 --- a/docs/examples/2826510e4aeb1c0d8dc43d317ed7624a.asciidoc +++ b/docs/examples/a7d814caf2a995d2aeadecc3495011be.asciidoc @@ -1,14 +1,20 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/boolean.asciidoc:249 +// mapping/types/boolean.asciidoc:248 [source, python] ---- resp = client.indices.create( index="idx", + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, mappings={ - "_source": { - "mode": "synthetic" - }, "properties": { "bool": { "type": "boolean" diff --git a/docs/examples/794d9a321b944347d2a8834a07b5eb22.asciidoc b/docs/examples/a8dff54362184b2732b9bd248cf6df8a.asciidoc similarity index 66% rename from docs/examples/794d9a321b944347d2a8834a07b5eb22.asciidoc rename to docs/examples/a8dff54362184b2732b9bd248cf6df8a.asciidoc index 45c76d191..2881a6771 100644 --- a/docs/examples/794d9a321b944347d2a8834a07b5eb22.asciidoc +++ b/docs/examples/a8dff54362184b2732b9bd248cf6df8a.asciidoc @@ -1,14 +1,20 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/range.asciidoc:394 +// mapping/types/range.asciidoc:418 [source, python] ---- resp = client.indices.create( index="idx", + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, mappings={ - "_source": { - "mode": "synthetic" - }, "properties": { "my_range": { "type": "integer_range" diff --git a/docs/examples/a9280b55a7284952f604ec7bece712f6.asciidoc b/docs/examples/a9280b55a7284952f604ec7bece712f6.asciidoc index 2b8beb63d..b67cae301 100644 --- a/docs/examples/a9280b55a7284952f604ec7bece712f6.asciidoc +++ b/docs/examples/a9280b55a7284952f604ec7bece712f6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/runtime.asciidoc:1188 +// mapping/runtime.asciidoc:1186 [source, python] ---- diff --git a/docs/examples/0ba5acede9d43af424e85428e7d35420.asciidoc b/docs/examples/a95ae76fca7c3e273e4bd10323b3caa6.asciidoc similarity index 91% rename from docs/examples/0ba5acede9d43af424e85428e7d35420.asciidoc rename to docs/examples/a95ae76fca7c3e273e4bd10323b3caa6.asciidoc index b1c6f894b..20be5e1e3 100644 --- a/docs/examples/0ba5acede9d43af424e85428e7d35420.asciidoc +++ b/docs/examples/a95ae76fca7c3e273e4bd10323b3caa6.asciidoc @@ -4,7 +4,7 @@ [source, python] ---- resp = client.ingest.put_pipeline( - id="azure_openai_embeddings", + id="azure_openai_embeddings_pipeline", processors=[ { "inference": { diff --git a/docs/examples/a960b43e720b4934edb74ab4b085ca77.asciidoc b/docs/examples/a960b43e720b4934edb74ab4b085ca77.asciidoc index 6bf107489..d55601076 100644 --- a/docs/examples/a960b43e720b4934edb74ab4b085ca77.asciidoc +++ b/docs/examples/a960b43e720b4934edb74ab4b085ca77.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/list-connectors-api.asciidoc:78 +// connector/apis/list-connectors-api.asciidoc:79 [source, python] ---- diff --git a/docs/examples/a999b5661bebb802bbbfe04faacf1971.asciidoc b/docs/examples/a999b5661bebb802bbbfe04faacf1971.asciidoc index b8c02fe91..77df3aaa7 100644 --- a/docs/examples/a999b5661bebb802bbbfe04faacf1971.asciidoc +++ b/docs/examples/a999b5661bebb802bbbfe04faacf1971.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// how-to/size-your-shards.asciidoc:510 +// how-to/size-your-shards.asciidoc:511 [source, python] ---- diff --git a/docs/examples/aa676d54a59dee87ecd28bcc1edce59b.asciidoc b/docs/examples/aa676d54a59dee87ecd28bcc1edce59b.asciidoc index f921fe239..1e934487f 100644 --- a/docs/examples/aa676d54a59dee87ecd28bcc1edce59b.asciidoc +++ b/docs/examples/aa676d54a59dee87ecd28bcc1edce59b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-alibabacloud-ai-search.asciidoc:172 +// inference/service-alibabacloud-ai-search.asciidoc:186 [source, python] ---- diff --git a/docs/examples/aa814309ad5f1630886ba75255b444f5.asciidoc b/docs/examples/aa814309ad5f1630886ba75255b444f5.asciidoc new file mode 100644 index 000000000..d542f9477 --- /dev/null +++ b/docs/examples/aa814309ad5f1630886ba75255b444f5.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// troubleshooting/common-issues/task-queue-backlog.asciidoc:80 + +[source, python] +---- +resp = client.cluster.pending_tasks() +print(resp) +---- diff --git a/docs/examples/aab810de3314d5e11bd564ea096785b8.asciidoc b/docs/examples/aab810de3314d5e11bd564ea096785b8.asciidoc new file mode 100644 index 000000000..0fb61ac72 --- /dev/null +++ b/docs/examples/aab810de3314d5e11bd564ea096785b8.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/full-text-filtering-tutorial.asciidoc:428 + +[source, python] +---- +resp = client.search( + index="cooking_blog", + query={ + "bool": { + "filter": [ + { + "term": { + "category.keyword": "Breakfast" + } + } + ] + } + }, +) +print(resp) +---- diff --git a/docs/examples/aaba346e0becdf12db13658296e0b8a1.asciidoc b/docs/examples/aaba346e0becdf12db13658296e0b8a1.asciidoc index e775fb30d..7a2fb484b 100644 --- a/docs/examples/aaba346e0becdf12db13658296e0b8a1.asciidoc +++ b/docs/examples/aaba346e0becdf12db13658296e0b8a1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ilm/error-handling.asciidoc:38 +// ilm/error-handling.asciidoc:41 [source, python] ---- diff --git a/docs/examples/0e5db64154a722a5cbdb84b588ce2ce8.asciidoc b/docs/examples/aad7d80990a6a3c391ff555ce09ae9dc.asciidoc similarity index 65% rename from docs/examples/0e5db64154a722a5cbdb84b588ce2ce8.asciidoc rename to docs/examples/aad7d80990a6a3c391ff555ce09ae9dc.asciidoc index 07bbf1dbd..adf825bb3 100644 --- a/docs/examples/0e5db64154a722a5cbdb84b588ce2ce8.asciidoc +++ b/docs/examples/aad7d80990a6a3c391ff555ce09ae9dc.asciidoc @@ -1,14 +1,20 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/numeric.asciidoc:287 +// mapping/types/numeric.asciidoc:295 [source, python] ---- resp = client.indices.create( index="idx", + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, mappings={ - "_source": { - "mode": "synthetic" - }, "properties": { "f": { "type": "scaled_float", diff --git a/docs/examples/ab24bfdfd8c1c7b3044b21a3b4684370.asciidoc b/docs/examples/ab24bfdfd8c1c7b3044b21a3b4684370.asciidoc index 3d433144a..538069d22 100644 --- a/docs/examples/ab24bfdfd8c1c7b3044b21a3b4684370.asciidoc +++ b/docs/examples/ab24bfdfd8c1c7b3044b21a3b4684370.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// scripting/fields.asciidoc:94 +// scripting/fields.asciidoc:167 [source, python] ---- diff --git a/docs/examples/f9cb2547ab04461a12bfd25a35be5f96.asciidoc b/docs/examples/ac5b91aa75696f9880451c9439fd9eec.asciidoc similarity index 73% rename from docs/examples/f9cb2547ab04461a12bfd25a35be5f96.asciidoc rename to docs/examples/ac5b91aa75696f9880451c9439fd9eec.asciidoc index ebf203e52..84f62b3bf 100644 --- a/docs/examples/f9cb2547ab04461a12bfd25a35be5f96.asciidoc +++ b/docs/examples/ac5b91aa75696f9880451c9439fd9eec.asciidoc @@ -1,14 +1,20 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/range.asciidoc:429 +// mapping/types/range.asciidoc:461 [source, python] ---- resp = client.indices.create( index="idx", + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, mappings={ - "_source": { - "mode": "synthetic" - }, "properties": { "my_range": { "type": "date_range" diff --git a/docs/examples/ac9fe9b64891095bcf84066f719b3dc4.asciidoc b/docs/examples/ac9fe9b64891095bcf84066f719b3dc4.asciidoc index 2b0c50721..c65434081 100644 --- a/docs/examples/ac9fe9b64891095bcf84066f719b3dc4.asciidoc +++ b/docs/examples/ac9fe9b64891095bcf84066f719b3dc4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/repository-source-only.asciidoc:38 +// snapshot-restore/repository-source-only.asciidoc:41 [source, python] ---- diff --git a/docs/examples/8f0a3d7b5fbdf5351750a23c493cc078.asciidoc b/docs/examples/acc44366a9908684b2c8c2b119a4fb2b.asciidoc similarity index 56% rename from docs/examples/8f0a3d7b5fbdf5351750a23c493cc078.asciidoc rename to docs/examples/acc44366a9908684b2c8c2b119a4fb2b.asciidoc index 769097bfb..daf9f50cf 100644 --- a/docs/examples/8f0a3d7b5fbdf5351750a23c493cc078.asciidoc +++ b/docs/examples/acc44366a9908684b2c8c2b119a4fb2b.asciidoc @@ -1,15 +1,19 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/search-using-query-rules.asciidoc:200 +// search/search-your-data/search-using-query-rules.asciidoc:202 [source, python] ---- resp = client.search( index="my-index-000001", - query={ + retriever={ "rule": { - "organic": { - "query_string": { - "query": "puggles" + "retriever": { + "standard": { + "query": { + "query_string": { + "query": "puggles" + } + } } }, "match_criteria": { diff --git a/docs/examples/add82cbe7cd95c4be5ce1c9958f2f208.asciidoc b/docs/examples/add82cbe7cd95c4be5ce1c9958f2f208.asciidoc new file mode 100644 index 000000000..a6715d19e --- /dev/null +++ b/docs/examples/add82cbe7cd95c4be5ce1c9958f2f208.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/full-text-filtering-tutorial.asciidoc:335 + +[source, python] +---- +resp = client.search( + index="cooking_blog", + query={ + "multi_match": { + "query": "vegetarian curry", + "fields": [ + "title^3", + "description^2", + "tags" + ] + } + }, +) +print(resp) +---- diff --git a/docs/examples/ae3473adaf1515afcf7773f26c018e5c.asciidoc b/docs/examples/ae3473adaf1515afcf7773f26c018e5c.asciidoc new file mode 100644 index 000000000..262513d43 --- /dev/null +++ b/docs/examples/ae3473adaf1515afcf7773f26c018e5c.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// connector/docs/connectors-zoom.asciidoc:60 + +[source, python] +---- +resp = client.connector.put( + connector_id="my-{service-name-stub}-connector", + index_name="my-elasticsearch-index", + name="Content synced from {service-name}", + service_type="{service-name-stub}", + is_native=True, +) +print(resp) +---- diff --git a/docs/examples/afef5cac988592b97ae289ab39c2f437.asciidoc b/docs/examples/afef5cac988592b97ae289ab39c2f437.asciidoc index b669c17e3..681761170 100644 --- a/docs/examples/afef5cac988592b97ae289ab39c2f437.asciidoc +++ b/docs/examples/afef5cac988592b97ae289ab39c2f437.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/text.asciidoc:295 +// mapping/types/text.asciidoc:307 [source, python] ---- diff --git a/docs/examples/b577e7e7eb5ce9d16cb582356e2cc45c.asciidoc b/docs/examples/b0ee6f19875fe5bad8aab02d60e3532c.asciidoc similarity index 85% rename from docs/examples/b577e7e7eb5ce9d16cb582356e2cc45c.asciidoc rename to docs/examples/b0ee6f19875fe5bad8aab02d60e3532c.asciidoc index 8a8e061a5..610238476 100644 --- a/docs/examples/b577e7e7eb5ce9d16cb582356e2cc45c.asciidoc +++ b/docs/examples/b0ee6f19875fe5bad8aab02d60e3532c.asciidoc @@ -1,11 +1,11 @@ // This file is autogenerated, DO NOT EDIT -// ingest/processors/geoip.asciidoc:83 +// ingest/processors/geoip.asciidoc:85 [source, python] ---- resp = client.ingest.put_pipeline( id="geoip", - description="Add geoip info", + description="Add ip geolocation info", processors=[ { "geoip": { diff --git a/docs/examples/b0fe9a7c8e519995258786be4bef36c4.asciidoc b/docs/examples/b0fe9a7c8e519995258786be4bef36c4.asciidoc index 90b70da87..6ef842f01 100644 --- a/docs/examples/b0fe9a7c8e519995258786be4bef36c4.asciidoc +++ b/docs/examples/b0fe9a7c8e519995258786be4bef36c4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/semantic-search-inference.asciidoc:97 +// search/search-your-data/semantic-search-elser.asciidoc:170 [source, python] ---- diff --git a/docs/examples/b11a0675e49df0709be693297ca73a2c.asciidoc b/docs/examples/b11a0675e49df0709be693297ca73a2c.asciidoc index 8eab9b4d7..73aa660e5 100644 --- a/docs/examples/b11a0675e49df0709be693297ca73a2c.asciidoc +++ b/docs/examples/b11a0675e49df0709be693297ca73a2c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/info.asciidoc:193 +// rest-api/info.asciidoc:197 [source, python] ---- diff --git a/docs/examples/c1bb395546102279296534522061829f.asciidoc b/docs/examples/b3479ee4586c15020549afae58d94d65.asciidoc similarity index 71% rename from docs/examples/c1bb395546102279296534522061829f.asciidoc rename to docs/examples/b3479ee4586c15020549afae58d94d65.asciidoc index 2df09de38..2ae2dff35 100644 --- a/docs/examples/c1bb395546102279296534522061829f.asciidoc +++ b/docs/examples/b3479ee4586c15020549afae58d94d65.asciidoc @@ -1,14 +1,20 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/geo-point.asciidoc:229 +// mapping/types/geo-point.asciidoc:225 [source, python] ---- resp = client.indices.create( index="idx", + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, mappings={ - "_source": { - "mode": "synthetic" - }, "properties": { "point": { "type": "geo_point" diff --git a/docs/examples/b3756e700d0f6c7e8919003bdf26bc8f.asciidoc b/docs/examples/b3756e700d0f6c7e8919003bdf26bc8f.asciidoc index 03ee84804..3f151a158 100644 --- a/docs/examples/b3756e700d0f6c7e8919003bdf26bc8f.asciidoc +++ b/docs/examples/b3756e700d0f6c7e8919003bdf26bc8f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/delete-desired-balance.asciidoc:21 +// troubleshooting/troubleshooting-unbalanced-cluster.asciidoc:72 [source, python] ---- diff --git a/docs/examples/36063ff9a318dba7bb0be3a230655dc8.asciidoc b/docs/examples/b3cd07f02059165fd62a2f148be3dc58.asciidoc similarity index 73% rename from docs/examples/36063ff9a318dba7bb0be3a230655dc8.asciidoc rename to docs/examples/b3cd07f02059165fd62a2f148be3dc58.asciidoc index 6ece85e77..be7f57a49 100644 --- a/docs/examples/36063ff9a318dba7bb0be3a230655dc8.asciidoc +++ b/docs/examples/b3cd07f02059165fd62a2f148be3dc58.asciidoc @@ -5,10 +5,16 @@ ---- resp = client.indices.create( index="idx", + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, mappings={ - "_source": { - "mode": "synthetic" - }, "properties": { "long": { "type": "long" diff --git a/docs/examples/51390ca10aa22d7104e8970f09ea4512.asciidoc b/docs/examples/b3f442a7d9eb391121dcab991787f9d6.asciidoc similarity index 73% rename from docs/examples/51390ca10aa22d7104e8970f09ea4512.asciidoc rename to docs/examples/b3f442a7d9eb391121dcab991787f9d6.asciidoc index 83ee3f394..0f1dbf105 100644 --- a/docs/examples/51390ca10aa22d7104e8970f09ea4512.asciidoc +++ b/docs/examples/b3f442a7d9eb391121dcab991787f9d6.asciidoc @@ -5,10 +5,16 @@ ---- resp = client.indices.create( index="idx", + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, mappings={ - "_source": { - "mode": "synthetic" - }, "properties": { "binary": { "type": "binary", diff --git a/docs/examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc b/docs/examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc index 9b704644e..93191cd7e 100644 --- a/docs/examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc +++ b/docs/examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/post-inference.asciidoc:106 +// inference/post-inference.asciidoc:101 [source, python] ---- diff --git a/docs/examples/b583bf8d3a2f49d633aa2cfed5606418.asciidoc b/docs/examples/b583bf8d3a2f49d633aa2cfed5606418.asciidoc index 104cdbd91..b7d77e2f8 100644 --- a/docs/examples/b583bf8d3a2f49d633aa2cfed5606418.asciidoc +++ b/docs/examples/b583bf8d3a2f49d633aa2cfed5606418.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-component-template.asciidoc:186 +// indices/put-component-template.asciidoc:189 [source, python] ---- diff --git a/docs/examples/b6a6aa9ba20e9a019371ae268488833f.asciidoc b/docs/examples/b6a6aa9ba20e9a019371ae268488833f.asciidoc index 16ae792b9..7a0389714 100644 --- a/docs/examples/b6a6aa9ba20e9a019371ae268488833f.asciidoc +++ b/docs/examples/b6a6aa9ba20e9a019371ae268488833f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// modules/cluster/remote-clusters-migration.asciidoc:96 +// modules/cluster/remote-clusters-migration.asciidoc:97 [source, python] ---- diff --git a/docs/examples/b6a7ffd2003c38f4aa321f067d162be5.asciidoc b/docs/examples/b6a7ffd2003c38f4aa321f067d162be5.asciidoc index 5bcfbbfb9..f75a30daa 100644 --- a/docs/examples/b6a7ffd2003c38f4aa321f067d162be5.asciidoc +++ b/docs/examples/b6a7ffd2003c38f4aa321f067d162be5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/semantic-search-elser.asciidoc:248 +// search/search-your-data/semantic-search-elser.asciidoc:260 [source, python] ---- diff --git a/docs/examples/b6f690896001f8f9ad5bf24e1304a552.asciidoc b/docs/examples/b6f690896001f8f9ad5bf24e1304a552.asciidoc index 9d4e68d83..5b63289c6 100644 --- a/docs/examples/b6f690896001f8f9ad5bf24e1304a552.asciidoc +++ b/docs/examples/b6f690896001f8f9ad5bf24e1304a552.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/dense-vector.asciidoc:158 +// mapping/types/dense-vector.asciidoc:163 [source, python] ---- diff --git a/docs/examples/b8400dbe39215705060500f0e569f452.asciidoc b/docs/examples/b8400dbe39215705060500f0e569f452.asciidoc new file mode 100644 index 000000000..b2122a3d6 --- /dev/null +++ b/docs/examples/b8400dbe39215705060500f0e569f452.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// connector/docs/connectors-API-tutorial.asciidoc:312 + +[source, python] +---- +resp = client.connector.get( + connector_id="my-connector-id", +) +print(resp) +---- diff --git a/docs/examples/b9ba66209b7fcc111a7bcef0b3e00052.asciidoc b/docs/examples/b9ba66209b7fcc111a7bcef0b3e00052.asciidoc new file mode 100644 index 000000000..32a6c03a0 --- /dev/null +++ b/docs/examples/b9ba66209b7fcc111a7bcef0b3e00052.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// mapping/types/passthrough.asciidoc:77 + +[source, python] +---- +resp = client.index( + index="my-index-000001", + id="1", + document={ + "attributes": { + "id": "foo" + }, + "id": "bar" + }, +) +print(resp) +---- diff --git a/docs/examples/fe6a21b4a6b33cd6abc522947d6f3ea2.asciidoc b/docs/examples/ba650046f9063f6c43d76f47e0f94403.asciidoc similarity index 67% rename from docs/examples/fe6a21b4a6b33cd6abc522947d6f3ea2.asciidoc rename to docs/examples/ba650046f9063f6c43d76f47e0f94403.asciidoc index f6aaa06b4..9b94f645f 100644 --- a/docs/examples/fe6a21b4a6b33cd6abc522947d6f3ea2.asciidoc +++ b/docs/examples/ba650046f9063f6c43d76f47e0f94403.asciidoc @@ -1,14 +1,20 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/date.asciidoc:249 +// mapping/types/date.asciidoc:244 [source, python] ---- resp = client.indices.create( index="idx", + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, mappings={ - "_source": { - "mode": "synthetic" - }, "properties": { "date": { "type": "date" diff --git a/docs/examples/bb5a67e3d2d9cd3016e487e627769fe8.asciidoc b/docs/examples/bb5a67e3d2d9cd3016e487e627769fe8.asciidoc new file mode 100644 index 000000000..94bea00eb --- /dev/null +++ b/docs/examples/bb5a67e3d2d9cd3016e487e627769fe8.asciidoc @@ -0,0 +1,105 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/full-text-filtering-tutorial.asciidoc:129 + +[source, python] +---- +resp = client.bulk( + index="cooking_blog", + refresh="wait_for", + operations=[ + { + "index": { + "_id": "1" + } + }, + { + "title": "Perfect Pancakes: A Fluffy Breakfast Delight", + "description": "Learn the secrets to making the fluffiest pancakes, so amazing you won't believe your tastebuds. This recipe uses buttermilk and a special folding technique to create light, airy pancakes that are perfect for lazy Sunday mornings.", + "author": "Maria Rodriguez", + "date": "2023-05-01", + "category": "Breakfast", + "tags": [ + "pancakes", + "breakfast", + "easy recipes" + ], + "rating": 4.8 + }, + { + "index": { + "_id": "2" + } + }, + { + "title": "Spicy Thai Green Curry: A Vegetarian Adventure", + "description": "Dive into the flavors of Thailand with this vibrant green curry. Packed with vegetables and aromatic herbs, this dish is both healthy and satisfying. Don't worry about the heat - you can easily adjust the spice level to your liking.", + "author": "Liam Chen", + "date": "2023-05-05", + "category": "Main Course", + "tags": [ + "thai", + "vegetarian", + "curry", + "spicy" + ], + "rating": 4.6 + }, + { + "index": { + "_id": "3" + } + }, + { + "title": "Classic Beef Stroganoff: A Creamy Comfort Food", + "description": "Indulge in this rich and creamy beef stroganoff. Tender strips of beef in a savory mushroom sauce, served over a bed of egg noodles. It's the ultimate comfort food for chilly evenings.", + "author": "Emma Watson", + "date": "2023-05-10", + "category": "Main Course", + "tags": [ + "beef", + "pasta", + "comfort food" + ], + "rating": 4.7 + }, + { + "index": { + "_id": "4" + } + }, + { + "title": "Vegan Chocolate Avocado Mousse", + "description": "Discover the magic of avocado in this rich, vegan chocolate mousse. Creamy, indulgent, and secretly healthy, it's the perfect guilt-free dessert for chocolate lovers.", + "author": "Alex Green", + "date": "2023-05-15", + "category": "Dessert", + "tags": [ + "vegan", + "chocolate", + "avocado", + "healthy dessert" + ], + "rating": 4.5 + }, + { + "index": { + "_id": "5" + } + }, + { + "title": "Crispy Oven-Fried Chicken", + "description": "Get that perfect crunch without the deep fryer! This oven-fried chicken recipe delivers crispy, juicy results every time. A healthier take on the classic comfort food.", + "author": "Maria Rodriguez", + "date": "2023-05-20", + "category": "Main Course", + "tags": [ + "chicken", + "oven-fried", + "healthy" + ], + "rating": 4.9 + } + ], +) +print(resp) +---- diff --git a/docs/examples/bc01aee2ab2ce1690986374bd836e1c7.asciidoc b/docs/examples/bc01aee2ab2ce1690986374bd836e1c7.asciidoc new file mode 100644 index 000000000..f1a16aa7d --- /dev/null +++ b/docs/examples/bc01aee2ab2ce1690986374bd836e1c7.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/full-text-filtering-tutorial.asciidoc:317 + +[source, python] +---- +resp = client.search( + index="cooking_blog", + query={ + "multi_match": { + "query": "vegetarian curry", + "fields": [ + "title", + "description", + "tags" + ] + } + }, +) +print(resp) +---- diff --git a/docs/examples/bcc75fc01b45e482638c65b8fbdf09fa.asciidoc b/docs/examples/bcc75fc01b45e482638c65b8fbdf09fa.asciidoc index 0c0ab5ce9..7d635906f 100644 --- a/docs/examples/bcc75fc01b45e482638c65b8fbdf09fa.asciidoc +++ b/docs/examples/bcc75fc01b45e482638c65b8fbdf09fa.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// quickstart/getting-started.asciidoc:227 +// quickstart/getting-started.asciidoc:419 [source, python] ---- diff --git a/docs/examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc b/docs/examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc index 0672fc36d..4b72e7551 100644 --- a/docs/examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc +++ b/docs/examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/apis/simulate-ingest.asciidoc:278 +// ingest/apis/simulate-ingest.asciidoc:346 [source, python] ---- diff --git a/docs/examples/bd5bd5d8b3d81241335fe1e5747080ac.asciidoc b/docs/examples/bd5bd5d8b3d81241335fe1e5747080ac.asciidoc index 93a5c32dc..95a630fbe 100644 --- a/docs/examples/bd5bd5d8b3d81241335fe1e5747080ac.asciidoc +++ b/docs/examples/bd5bd5d8b3d81241335fe1e5747080ac.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ilm/error-handling.asciidoc:118 +// ilm/error-handling.asciidoc:121 [source, python] ---- diff --git a/docs/examples/bd68666ca2e0be12f7624016317a62bc.asciidoc b/docs/examples/bd68666ca2e0be12f7624016317a62bc.asciidoc index bdab504e3..b9451d30c 100644 --- a/docs/examples/bd68666ca2e0be12f7624016317a62bc.asciidoc +++ b/docs/examples/bd68666ca2e0be12f7624016317a62bc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/nodes-stats.asciidoc:2563 +// cluster/nodes-stats.asciidoc:2567 [source, python] ---- diff --git a/docs/examples/63d1c07d22a3ca3b0ec6d950547c011c.asciidoc b/docs/examples/bdaf00d791706d7fde25fd65d3735b94.asciidoc similarity index 67% rename from docs/examples/63d1c07d22a3ca3b0ec6d950547c011c.asciidoc rename to docs/examples/bdaf00d791706d7fde25fd65d3735b94.asciidoc index cc72b2cce..08227226e 100644 --- a/docs/examples/63d1c07d22a3ca3b0ec6d950547c011c.asciidoc +++ b/docs/examples/bdaf00d791706d7fde25fd65d3735b94.asciidoc @@ -1,14 +1,20 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/keyword.asciidoc:189 +// mapping/types/keyword.asciidoc:184 [source, python] ---- resp = client.indices.create( index="idx", + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, mappings={ - "_source": { - "mode": "synthetic" - }, "properties": { "kwd": { "type": "keyword" diff --git a/docs/examples/be5c5a9c25901737585e4fff9195da3c.asciidoc b/docs/examples/be5c5a9c25901737585e4fff9195da3c.asciidoc index 0959081b9..ab6ef34d1 100644 --- a/docs/examples/be5c5a9c25901737585e4fff9195da3c.asciidoc +++ b/docs/examples/be5c5a9c25901737585e4fff9195da3c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/dense-vector.asciidoc:406 +// mapping/types/dense-vector.asciidoc:437 [source, python] ---- diff --git a/docs/examples/be9836fe55c5fada404a2adc1663d832.asciidoc b/docs/examples/be9836fe55c5fada404a2adc1663d832.asciidoc index 8a151046b..0e75ab6c5 100644 --- a/docs/examples/be9836fe55c5fada404a2adc1663d832.asciidoc +++ b/docs/examples/be9836fe55c5fada404a2adc1663d832.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/runtime.asciidoc:1437 +// mapping/runtime.asciidoc:1435 [source, python] ---- diff --git a/docs/examples/befa73a8a419fcf3b7798548b54a20bf.asciidoc b/docs/examples/befa73a8a419fcf3b7798548b54a20bf.asciidoc new file mode 100644 index 000000000..4f5c79684 --- /dev/null +++ b/docs/examples/befa73a8a419fcf3b7798548b54a20bf.asciidoc @@ -0,0 +1,47 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/knn-search.asciidoc:1167 + +[source, python] +---- +resp = client.search( + index="my-index", + size=10, + knn={ + "query_vector": [ + 0.04283529, + 0.85670587, + -0.51402352, + 0 + ], + "field": "my_int4_vector", + "k": 20, + "num_candidates": 50 + }, + rescore={ + "window_size": 20, + "query": { + "rescore_query": { + "script_score": { + "query": { + "match_all": {} + }, + "script": { + "source": "(dotProduct(params.queryVector, 'my_int4_vector') + 1.0)", + "params": { + "queryVector": [ + 0.04283529, + 0.85670587, + -0.51402352, + 0 + ] + } + } + } + }, + "query_weight": 0, + "rescore_query_weight": 1 + } + }, +) +print(resp) +---- diff --git a/docs/examples/bf1de9fa1b825fa875d27fa08821a6d1.asciidoc b/docs/examples/bf1de9fa1b825fa875d27fa08821a6d1.asciidoc index 767c6efdf..11de217e5 100644 --- a/docs/examples/bf1de9fa1b825fa875d27fa08821a6d1.asciidoc +++ b/docs/examples/bf1de9fa1b825fa875d27fa08821a6d1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-across-clusters.asciidoc:114 +// esql/esql-across-clusters.asciidoc:122 [source, python] ---- diff --git a/docs/examples/c00c9412609832ebceb9e786dd9542df.asciidoc b/docs/examples/c00c9412609832ebceb9e786dd9542df.asciidoc index ea55b13c7..b6b337eac 100644 --- a/docs/examples/c00c9412609832ebceb9e786dd9542df.asciidoc +++ b/docs/examples/c00c9412609832ebceb9e786dd9542df.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-name-description-api.asciidoc:79 +// connector/apis/update-connector-name-description-api.asciidoc:80 [source, python] ---- diff --git a/docs/examples/c02c2916b97b6fa7db82dbc7f0378310.asciidoc b/docs/examples/c02c2916b97b6fa7db82dbc7f0378310.asciidoc index bf854843c..d50b44d64 100644 --- a/docs/examples/c02c2916b97b6fa7db82dbc7f0378310.asciidoc +++ b/docs/examples/c02c2916b97b6fa7db82dbc7f0378310.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-reranking/semantic-reranking.asciidoc:109 +// reranking/semantic-reranking.asciidoc:105 [source, python] ---- diff --git a/docs/examples/c067182d385f59ce5952fb9a716fbf05.asciidoc b/docs/examples/c067182d385f59ce5952fb9a716fbf05.asciidoc index 4a33d82c0..317a4d06d 100644 --- a/docs/examples/c067182d385f59ce5952fb9a716fbf05.asciidoc +++ b/docs/examples/c067182d385f59ce5952fb9a716fbf05.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/post-calendar-event.asciidoc:63 +// ml/anomaly-detection/apis/post-calendar-event.asciidoc:79 [source, python] ---- diff --git a/docs/examples/c0ddfb2e6315f5bcf0d3ef414b5bbed3.asciidoc b/docs/examples/c0ddfb2e6315f5bcf0d3ef414b5bbed3.asciidoc index d005a58f0..e41d19d84 100644 --- a/docs/examples/c0ddfb2e6315f5bcf0d3ef414b5bbed3.asciidoc +++ b/docs/examples/c0ddfb2e6315f5bcf0d3ef414b5bbed3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-configuration-api.asciidoc:335 +// connector/apis/update-connector-configuration-api.asciidoc:336 [source, python] ---- diff --git a/docs/examples/c12d6e962f083c728f9397932f05202e.asciidoc b/docs/examples/c12d6e962f083c728f9397932f05202e.asciidoc index b3e1432ec..89fab2035 100644 --- a/docs/examples/c12d6e962f083c728f9397932f05202e.asciidoc +++ b/docs/examples/c12d6e962f083c728f9397932f05202e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/list-connector-sync-jobs-api.asciidoc:71 +// connector/apis/list-connector-sync-jobs-api.asciidoc:72 [source, python] ---- diff --git a/docs/examples/c18100d62ed31bc9e05f62900156e6a8.asciidoc b/docs/examples/c18100d62ed31bc9e05f62900156e6a8.asciidoc index 0be644e92..0011dff0f 100644 --- a/docs/examples/c18100d62ed31bc9e05f62900156e6a8.asciidoc +++ b/docs/examples/c18100d62ed31bc9e05f62900156e6a8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/list-connectors-api.asciidoc:92 +// connector/apis/list-connectors-api.asciidoc:93 [source, python] ---- diff --git a/docs/examples/c186ecf6f799ddff7add1abdecea5821.asciidoc b/docs/examples/c186ecf6f799ddff7add1abdecea5821.asciidoc index 6a9a3035a..9a2e3a3b4 100644 --- a/docs/examples/c186ecf6f799ddff7add1abdecea5821.asciidoc +++ b/docs/examples/c186ecf6f799ddff7add1abdecea5821.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// scripting/fields.asciidoc:214 +// scripting/fields.asciidoc:287 [source, python] ---- diff --git a/docs/examples/c21eb4bc30087188241cbba6b6b89999.asciidoc b/docs/examples/c21eb4bc30087188241cbba6b6b89999.asciidoc index f2ad14f33..8f36dbfdd 100644 --- a/docs/examples/c21eb4bc30087188241cbba6b6b89999.asciidoc +++ b/docs/examples/c21eb4bc30087188241cbba6b6b89999.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-service-type-api.asciidoc:77 +// connector/apis/update-connector-service-type-api.asciidoc:78 [source, python] ---- diff --git a/docs/examples/8575c966b004fb124c7afd6bb5827b50.asciidoc b/docs/examples/c26b185952ddf9842e18493aca2de147.asciidoc similarity index 86% rename from docs/examples/8575c966b004fb124c7afd6bb5827b50.asciidoc rename to docs/examples/c26b185952ddf9842e18493aca2de147.asciidoc index 3b64158d3..9b4b57ad4 100644 --- a/docs/examples/8575c966b004fb124c7afd6bb5827b50.asciidoc +++ b/docs/examples/c26b185952ddf9842e18493aca2de147.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// quickstart/getting-started.asciidoc:64 +// quickstart/getting-started.asciidoc:102 [source, python] ---- diff --git a/docs/examples/c4607ca79b2bcde39305d6f4f21cad37.asciidoc b/docs/examples/c4607ca79b2bcde39305d6f4f21cad37.asciidoc index 9e77a3417..3bec7673e 100644 --- a/docs/examples/c4607ca79b2bcde39305d6f4f21cad37.asciidoc +++ b/docs/examples/c4607ca79b2bcde39305d6f4f21cad37.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-rest.asciidoc:223 +// esql/esql-rest.asciidoc:225 [source, python] ---- diff --git a/docs/examples/c6339d09f85000a6432304b0ec63b8f6.asciidoc b/docs/examples/c6339d09f85000a6432304b0ec63b8f6.asciidoc index 50b57646c..a25ac7386 100644 --- a/docs/examples/c6339d09f85000a6432304b0ec63b8f6.asciidoc +++ b/docs/examples/c6339d09f85000a6432304b0ec63b8f6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-component-template.asciidoc:226 +// indices/put-component-template.asciidoc:229 [source, python] ---- diff --git a/docs/examples/e566e898902e432bc7ea0568400f0c50.asciidoc b/docs/examples/c793efe7280e9b6e09981c4d4f832348.asciidoc similarity index 71% rename from docs/examples/e566e898902e432bc7ea0568400f0c50.asciidoc rename to docs/examples/c793efe7280e9b6e09981c4d4f832348.asciidoc index 1d39f8a28..dcdc437f8 100644 --- a/docs/examples/e566e898902e432bc7ea0568400f0c50.asciidoc +++ b/docs/examples/c793efe7280e9b6e09981c4d4f832348.asciidoc @@ -1,14 +1,20 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/ip.asciidoc:170 +// mapping/types/ip.asciidoc:166 [source, python] ---- resp = client.indices.create( index="idx", + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, mappings={ - "_source": { - "mode": "synthetic" - }, "properties": { "ip": { "type": "ip" diff --git a/docs/examples/c8aa8e8c0ac160b8c4efd1ac3b9f48f3.asciidoc b/docs/examples/c8aa8e8c0ac160b8c4efd1ac3b9f48f3.asciidoc new file mode 100644 index 000000000..93dba34c8 --- /dev/null +++ b/docs/examples/c8aa8e8c0ac160b8c4efd1ac3b9f48f3.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/ingest-vectors.asciidoc:35 + +[source, python] +---- +resp = client.indices.create( + index="amazon-reviews", + mappings={ + "properties": { + "review_vector": { + "type": "dense_vector", + "dims": 8, + "index": True, + "similarity": "cosine" + }, + "review_text": { + "type": "text" + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/c8fa8d7e029792d539464fede18ce258.asciidoc b/docs/examples/c8fa8d7e029792d539464fede18ce258.asciidoc new file mode 100644 index 000000000..e75f79036 --- /dev/null +++ b/docs/examples/c8fa8d7e029792d539464fede18ce258.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/semantic-search-semantic-text.asciidoc:72 + +[source, python] +---- +resp = client.indices.create( + index="semantic-embeddings", + mappings={ + "properties": { + "content": { + "type": "semantic_text", + "inference_id": "my-elser-endpoint" + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/cc5eefcc2102aae7e87b0c87b4af10b8.asciidoc b/docs/examples/cc5eefcc2102aae7e87b0c87b4af10b8.asciidoc index d9b7e66b8..c403a96b0 100644 --- a/docs/examples/cc5eefcc2102aae7e87b0c87b4af10b8.asciidoc +++ b/docs/examples/cc5eefcc2102aae7e87b0c87b4af10b8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/multivalued-fields.asciidoc:50 +// esql/multivalued-fields.asciidoc:53 [source, python] ---- diff --git a/docs/examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc b/docs/examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc new file mode 100644 index 000000000..f42b8b06d --- /dev/null +++ b/docs/examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc @@ -0,0 +1,77 @@ +// This file is autogenerated, DO NOT EDIT +// ingest/apis/simulate-ingest.asciidoc:56 + +[source, python] +---- +resp = client.simulate.ingest( + body={ + "docs": [ + { + "_index": "my-index", + "_id": "id", + "_source": { + "foo": "bar" + } + }, + { + "_index": "my-index", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ], + "pipeline_substitutions": { + "my-pipeline": { + "processors": [ + { + "set": { + "field": "field3", + "value": "value3" + } + } + ] + } + }, + "component_template_substitutions": { + "my-component-template": { + "template": { + "mappings": { + "dynamic": "true", + "properties": { + "field3": { + "type": "keyword" + } + } + }, + "settings": { + "index": { + "default_pipeline": "my-pipeline" + } + } + } + } + }, + "index_template_substitutions": { + "my-index-template": { + "index_patterns": [ + "my-index-*" + ], + "composed_of": [ + "component_template_1", + "component_template_2" + ] + } + }, + "mapping_addition": { + "dynamic": "strict", + "properties": { + "foo": { + "type": "keyword" + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/cd38c601ab293a6ec0e2df71d0c96b58.asciidoc b/docs/examples/cd38c601ab293a6ec0e2df71d0c96b58.asciidoc index 4f4c3fcf1..e743b527a 100644 --- a/docs/examples/cd38c601ab293a6ec0e2df71d0c96b58.asciidoc +++ b/docs/examples/cd38c601ab293a6ec0e2df71d0c96b58.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-index-template.asciidoc:344 +// indices/put-index-template.asciidoc:347 [source, python] ---- diff --git a/docs/examples/cdce7bc083dfb36e6f1d465a5c9d5049.asciidoc b/docs/examples/cdce7bc083dfb36e6f1d465a5c9d5049.asciidoc index 710a66fea..e6a36d4c5 100644 --- a/docs/examples/cdce7bc083dfb36e6f1d465a5c9d5049.asciidoc +++ b/docs/examples/cdce7bc083dfb36e6f1d465a5c9d5049.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/list-connector-sync-jobs-api.asciidoc:49 +// connector/apis/list-connector-sync-jobs-api.asciidoc:50 [source, python] ---- diff --git a/docs/examples/cde4104a29dfe942d55863cdd8718627.asciidoc b/docs/examples/cde4104a29dfe942d55863cdd8718627.asciidoc index 839bb42dd..0dc636282 100644 --- a/docs/examples/cde4104a29dfe942d55863cdd8718627.asciidoc +++ b/docs/examples/cde4104a29dfe942d55863cdd8718627.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// slm/apis/slm-get-status.asciidoc:42 +// tab-widgets/troubleshooting/data/start-slm.asciidoc:76 [source, python] ---- diff --git a/docs/examples/ce2c2e8f5a2e4daf051b6e10122e5aae.asciidoc b/docs/examples/ce2c2e8f5a2e4daf051b6e10122e5aae.asciidoc index aeeb761ae..e6cd4cf1e 100644 --- a/docs/examples/ce2c2e8f5a2e4daf051b6e10122e5aae.asciidoc +++ b/docs/examples/ce2c2e8f5a2e4daf051b6e10122e5aae.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/dense-vector.asciidoc:490 +// mapping/types/dense-vector.asciidoc:521 [source, python] ---- diff --git a/docs/examples/cedb56a71cc743d80263ce352bb21720.asciidoc b/docs/examples/cedb56a71cc743d80263ce352bb21720.asciidoc index 9f552c719..c101699af 100644 --- a/docs/examples/cedb56a71cc743d80263ce352bb21720.asciidoc +++ b/docs/examples/cedb56a71cc743d80263ce352bb21720.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-elser.asciidoc:91 +// inference/service-elser.asciidoc:113 [source, python] ---- diff --git a/docs/examples/cee491dd0a8d10ed0cb11a2faa0c99f0.asciidoc b/docs/examples/cee491dd0a8d10ed0cb11a2faa0c99f0.asciidoc index c500aef2f..3dfe110b6 100644 --- a/docs/examples/cee491dd0a8d10ed0cb11a2faa0c99f0.asciidoc +++ b/docs/examples/cee491dd0a8d10ed0cb11a2faa0c99f0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/infer-trained-model.asciidoc:1078 +// ml/trained-models/apis/infer-trained-model.asciidoc:1179 [source, python] ---- diff --git a/docs/examples/cf23f18761df33f08bc6f6d1875496fd.asciidoc b/docs/examples/cf23f18761df33f08bc6f6d1875496fd.asciidoc index 79ac096c2..b4b4bdf4e 100644 --- a/docs/examples/cf23f18761df33f08bc6f6d1875496fd.asciidoc +++ b/docs/examples/cf23f18761df33f08bc6f6d1875496fd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// how-to/size-your-shards.asciidoc:398 +// how-to/size-your-shards.asciidoc:399 [source, python] ---- diff --git a/docs/examples/d003f9110e5a474230abe11f36da9297.asciidoc b/docs/examples/d003f9110e5a474230abe11f36da9297.asciidoc index dc2540244..6c38ffc2b 100644 --- a/docs/examples/d003f9110e5a474230abe11f36da9297.asciidoc +++ b/docs/examples/d003f9110e5a474230abe11f36da9297.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/processors/redact.asciidoc:49 +// ingest/processors/redact.asciidoc:50 [source, python] ---- diff --git a/docs/examples/d01a590fa9ea8a0cb34ed8dda502296c.asciidoc b/docs/examples/d01a590fa9ea8a0cb34ed8dda502296c.asciidoc index 2aa1436ea..a696e0d6d 100644 --- a/docs/examples/d01a590fa9ea8a0cb34ed8dda502296c.asciidoc +++ b/docs/examples/d01a590fa9ea8a0cb34ed8dda502296c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// migration/transient-settings-migration-guide.asciidoc:38 +// migration/migrate_9_0/transient-settings-migration-guide.asciidoc:38 [source, python] ---- diff --git a/docs/examples/d03139a851888db53f8b7affd85eb495.asciidoc b/docs/examples/d03139a851888db53f8b7affd85eb495.asciidoc index 15826f686..fb06927ed 100644 --- a/docs/examples/d03139a851888db53f8b7affd85eb495.asciidoc +++ b/docs/examples/d03139a851888db53f8b7affd85eb495.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/check-in-connector-api.asciidoc:68 +// connector/apis/check-in-connector-api.asciidoc:69 [source, python] ---- diff --git a/docs/examples/d04f0c8c44e8b4fb55f2e7d9d05977e7.asciidoc b/docs/examples/d04f0c8c44e8b4fb55f2e7d9d05977e7.asciidoc index 442757e57..94cd61b48 100644 --- a/docs/examples/d04f0c8c44e8b4fb55f2e7d9d05977e7.asciidoc +++ b/docs/examples/d04f0c8c44e8b4fb55f2e7d9d05977e7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// quickstart/getting-started.asciidoc:103 +// quickstart/getting-started.asciidoc:155 [source, python] ---- diff --git a/docs/examples/d1ea13e1e8372cbf1480a414723ff55a.asciidoc b/docs/examples/d1ea13e1e8372cbf1480a414723ff55a.asciidoc new file mode 100644 index 000000000..3fd7d3beb --- /dev/null +++ b/docs/examples/d1ea13e1e8372cbf1480a414723ff55a.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// connector/docs/connectors-zoom.asciidoc:247 + +[source, python] +---- +resp = client.security.create_api_key( + name="connector_name-connector-api-key", + role_descriptors={ + "connector_name-connector-role": { + "cluster": [ + "monitor", + "manage_connector" + ], + "indices": [ + { + "names": [ + "index_name", + ".search-acl-filter-index_name", + ".elastic-connectors*" + ], + "privileges": [ + "all" + ], + "allow_restricted_indices": False + } + ] + } + }, +) +print(resp) +---- diff --git a/docs/examples/d260225cf97e068ead2a8a6bb5aefd90.asciidoc b/docs/examples/d260225cf97e068ead2a8a6bb5aefd90.asciidoc index 7aaf95c43..691f89eb5 100644 --- a/docs/examples/d260225cf97e068ead2a8a6bb5aefd90.asciidoc +++ b/docs/examples/d260225cf97e068ead2a8a6bb5aefd90.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// analysis/analyzers/lang-analyzer.asciidoc:1550 +// analysis/analyzers/lang-analyzer.asciidoc:1551 [source, python] ---- diff --git a/docs/examples/443f0e8fbba83777b2df624879d188d5.asciidoc b/docs/examples/d3440ec81dde5f1a01c0206cb35e539c.asciidoc similarity index 85% rename from docs/examples/443f0e8fbba83777b2df624879d188d5.asciidoc rename to docs/examples/d3440ec81dde5f1a01c0206cb35e539c.asciidoc index 19b41f3a9..63436572c 100644 --- a/docs/examples/443f0e8fbba83777b2df624879d188d5.asciidoc +++ b/docs/examples/d3440ec81dde5f1a01c0206cb35e539c.asciidoc @@ -11,7 +11,7 @@ resp = client.reindex( }, dest={ "index": "azure-openai-embeddings", - "pipeline": "azure_openai_embeddings" + "pipeline": "azure_openai_embeddings_pipeline" }, ) print(resp) diff --git a/docs/examples/d3a5b70d493e0bd77b3f2b586341c83c.asciidoc b/docs/examples/d3a5b70d493e0bd77b3f2b586341c83c.asciidoc index 4129b9464..8fbb8c4fe 100644 --- a/docs/examples/d3a5b70d493e0bd77b3f2b586341c83c.asciidoc +++ b/docs/examples/d3a5b70d493e0bd77b3f2b586341c83c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/runtime.asciidoc:1637 +// mapping/runtime.asciidoc:1635 [source, python] ---- diff --git a/docs/examples/d3e5edac5b461020017fd9d8ec7a91fa.asciidoc b/docs/examples/d3e5edac5b461020017fd9d8ec7a91fa.asciidoc index f1500d9b5..38d2830d9 100644 --- a/docs/examples/d3e5edac5b461020017fd9d8ec7a91fa.asciidoc +++ b/docs/examples/d3e5edac5b461020017fd9d8ec7a91fa.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// security/authorization/managing-roles.asciidoc:264 +// security/authorization/managing-roles.asciidoc:262 [source, python] ---- diff --git a/docs/examples/d603e76ab70131f7ec6b08758f95a0e3.asciidoc b/docs/examples/d603e76ab70131f7ec6b08758f95a0e3.asciidoc index b7e295f92..2d05e9e67 100644 --- a/docs/examples/d603e76ab70131f7ec6b08758f95a0e3.asciidoc +++ b/docs/examples/d603e76ab70131f7ec6b08758f95a0e3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/recovery.asciidoc:143 +// cat/recovery.asciidoc:142 [source, python] ---- diff --git a/docs/examples/d69bd36335774c8ae1286cee21310241.asciidoc b/docs/examples/d69bd36335774c8ae1286cee21310241.asciidoc index 846c0f023..686b8d63b 100644 --- a/docs/examples/d69bd36335774c8ae1286cee21310241.asciidoc +++ b/docs/examples/d69bd36335774c8ae1286cee21310241.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// security/authentication/remote-clusters-privileges-api-key.asciidoc:71 +// security/authentication/remote-clusters-privileges-api-key.asciidoc:72 [source, python] ---- diff --git a/docs/examples/d6a21afa4a94b9baa734eac430940bcf.asciidoc b/docs/examples/d6a21afa4a94b9baa734eac430940bcf.asciidoc index c7fca7b67..7857d03d4 100644 --- a/docs/examples/d6a21afa4a94b9baa734eac430940bcf.asciidoc +++ b/docs/examples/d6a21afa4a94b9baa734eac430940bcf.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/list-connectors-api.asciidoc:85 +// connector/apis/list-connectors-api.asciidoc:86 [source, python] ---- diff --git a/docs/examples/d7919fb6f4d02dde1390775eb8365b79.asciidoc b/docs/examples/d7919fb6f4d02dde1390775eb8365b79.asciidoc index 8a23680f6..3103cc120 100644 --- a/docs/examples/d7919fb6f4d02dde1390775eb8365b79.asciidoc +++ b/docs/examples/d7919fb6f4d02dde1390775eb8365b79.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/text.asciidoc:323 +// mapping/types/text.asciidoc:335 [source, python] ---- diff --git a/docs/examples/d98fb2ff2cdd154dff4a576430755d98.asciidoc b/docs/examples/d98fb2ff2cdd154dff4a576430755d98.asciidoc index a76e0c4c3..469ffe6ca 100644 --- a/docs/examples/d98fb2ff2cdd154dff4a576430755d98.asciidoc +++ b/docs/examples/d98fb2ff2cdd154dff4a576430755d98.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/runtime.asciidoc:1124 +// mapping/runtime.asciidoc:1122 [source, python] ---- diff --git a/docs/examples/dcee24dba43050e4b01b6e3a3211ce09.asciidoc b/docs/examples/dcee24dba43050e4b01b6e3a3211ce09.asciidoc index 21686169d..af347fdfc 100644 --- a/docs/examples/dcee24dba43050e4b01b6e3a3211ce09.asciidoc +++ b/docs/examples/dcee24dba43050e4b01b6e3a3211ce09.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/runtime.asciidoc:1283 +// mapping/runtime.asciidoc:1281 [source, python] ---- diff --git a/docs/examples/dcf82f3aacae49c0bb4ccbc673f13e9f.asciidoc b/docs/examples/dcf82f3aacae49c0bb4ccbc673f13e9f.asciidoc new file mode 100644 index 000000000..b26cee2dc --- /dev/null +++ b/docs/examples/dcf82f3aacae49c0bb4ccbc673f13e9f.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/knn-search.asciidoc:1215 + +[source, python] +---- +resp = client.search( + index="my-index", + size=10, + query={ + "script_score": { + "query": { + "knn": { + "query_vector": [ + 0.04283529, + 0.85670587, + -0.51402352, + 0 + ], + "field": "my_int4_vector", + "num_candidates": 20 + } + }, + "script": { + "source": "(dotProduct(params.queryVector, 'my_int4_vector') + 1.0)", + "params": { + "queryVector": [ + 0.04283529, + 0.85670587, + -0.51402352, + 0 + ] + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/dcfa7f479a33f459a2d222a92e651451.asciidoc b/docs/examples/dcfa7f479a33f459a2d222a92e651451.asciidoc index a52fb6c46..031b39953 100644 --- a/docs/examples/dcfa7f479a33f459a2d222a92e651451.asciidoc +++ b/docs/examples/dcfa7f479a33f459a2d222a92e651451.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/create-roles.asciidoc:107 +// rest-api/security/create-roles.asciidoc:120 [source, python] ---- diff --git a/docs/examples/3ff634a50e2e4556bad7ea8553576992.asciidoc b/docs/examples/dd3ee00ab2af607b32532180d60a41d4.asciidoc similarity index 85% rename from docs/examples/3ff634a50e2e4556bad7ea8553576992.asciidoc rename to docs/examples/dd3ee00ab2af607b32532180d60a41d4.asciidoc index fd08ed485..11b180706 100644 --- a/docs/examples/3ff634a50e2e4556bad7ea8553576992.asciidoc +++ b/docs/examples/dd3ee00ab2af607b32532180d60a41d4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// analysis/tokenfilters/snowball-tokenfilter.asciidoc:17 +// analysis/tokenfilters/snowball-tokenfilter.asciidoc:19 [source, python] ---- @@ -19,7 +19,7 @@ resp = client.indices.create( "filter": { "my_snow": { "type": "snowball", - "language": "Lovins" + "language": "English" } } } diff --git a/docs/examples/0e3abd15dde97a2334621190c4ad4f96.asciidoc b/docs/examples/dd7814258121d3c2e576a7f00469d7e3.asciidoc similarity index 92% rename from docs/examples/0e3abd15dde97a2334621190c4ad4f96.asciidoc rename to docs/examples/dd7814258121d3c2e576a7f00469d7e3.asciidoc index 1067fa1a5..93b2779f4 100644 --- a/docs/examples/0e3abd15dde97a2334621190c4ad4f96.asciidoc +++ b/docs/examples/dd7814258121d3c2e576a7f00469d7e3.asciidoc @@ -4,7 +4,7 @@ [source, python] ---- resp = client.ingest.put_pipeline( - id="mistral_embeddings", + id="mistral_embeddings_pipeline", processors=[ { "inference": { diff --git a/docs/examples/dd792bb53703a57f9207e36d16e26255.asciidoc b/docs/examples/dd792bb53703a57f9207e36d16e26255.asciidoc index a37ac7dc1..9b303bc15 100644 --- a/docs/examples/dd792bb53703a57f9207e36d16e26255.asciidoc +++ b/docs/examples/dd792bb53703a57f9207e36d16e26255.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/runtime.asciidoc:1164 +// mapping/runtime.asciidoc:1162 [source, python] ---- diff --git a/docs/examples/ddaadd91b7743a1c7e946ce1b593cd1b.asciidoc b/docs/examples/ddaadd91b7743a1c7e946ce1b593cd1b.asciidoc new file mode 100644 index 000000000..6944bbb1c --- /dev/null +++ b/docs/examples/ddaadd91b7743a1c7e946ce1b593cd1b.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// inference/update-inference.asciidoc:77 + +[source, python] +---- +resp = client.inference.inference( + task_type="my-inference-endpoint", + inference_id="_update", + service_settings={ + "api_key": "" + }, +) +print(resp) +---- diff --git a/docs/examples/dddb6a6ebd145f8411c5b4910d332f87.asciidoc b/docs/examples/dddb6a6ebd145f8411c5b4910d332f87.asciidoc index 534bd51fb..fbbeb3a8f 100644 --- a/docs/examples/dddb6a6ebd145f8411c5b4910d332f87.asciidoc +++ b/docs/examples/dddb6a6ebd145f8411c5b4910d332f87.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/multivalued-fields.asciidoc:186 +// esql/multivalued-fields.asciidoc:228 [source, python] ---- diff --git a/docs/examples/df04e2e9af66d5e30b1bfdbd458cab13.asciidoc b/docs/examples/df04e2e9af66d5e30b1bfdbd458cab13.asciidoc index e4d62925c..ce180edf2 100644 --- a/docs/examples/df04e2e9af66d5e30b1bfdbd458cab13.asciidoc +++ b/docs/examples/df04e2e9af66d5e30b1bfdbd458cab13.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// how-to/size-your-shards.asciidoc:238 +// how-to/size-your-shards.asciidoc:239 [source, python] ---- diff --git a/docs/examples/e017c2de6f93a8dd97f5c6e002dd5c4f.asciidoc b/docs/examples/e017c2de6f93a8dd97f5c6e002dd5c4f.asciidoc new file mode 100644 index 000000000..bdc91a1ff --- /dev/null +++ b/docs/examples/e017c2de6f93a8dd97f5c6e002dd5c4f.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// ml/anomaly-detection/apis/post-calendar-event.asciidoc:126 + +[source, python] +---- +resp = client.ml.post_calendar_events( + calendar_id="dst-germany", + events=[ + { + "description": "Fall 2024", + "start_time": 1729994400000, + "end_time": 1730167200000, + "skip_result": False, + "skip_model_update": False, + "force_time_shift": -3600 + }, + { + "description": "Spring 2025", + "start_time": 1743296400000, + "end_time": 1743469200000, + "skip_result": False, + "skip_model_update": False, + "force_time_shift": 3600 + } + ], +) +print(resp) +---- diff --git a/docs/examples/e04267ffc50d916800b919c6cdc9622a.asciidoc b/docs/examples/e04267ffc50d916800b919c6cdc9622a.asciidoc new file mode 100644 index 000000000..58e605469 --- /dev/null +++ b/docs/examples/e04267ffc50d916800b919c6cdc9622a.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// mapping/params/ignore-above.asciidoc:74 + +[source, python] +---- +resp = client.indices.create( + index="my-index-000001", + settings={ + "index.mapping.ignore_above": 256 + }, +) +print(resp) +---- diff --git a/docs/examples/e0fcef99656799de6b88117d56f131e2.asciidoc b/docs/examples/e0fcef99656799de6b88117d56f131e2.asciidoc index 2610b22e0..a7851c6c4 100644 --- a/docs/examples/e0fcef99656799de6b88117d56f131e2.asciidoc +++ b/docs/examples/e0fcef99656799de6b88117d56f131e2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/explain.asciidoc:99 +// troubleshooting/troubleshooting-searches.asciidoc:276 [source, python] ---- diff --git a/docs/examples/e1d6ecab4148b09f4c605474157e7dbd.asciidoc b/docs/examples/e1d6ecab4148b09f4c605474157e7dbd.asciidoc index 75fd178c1..0f1f01902 100644 --- a/docs/examples/e1d6ecab4148b09f4c605474157e7dbd.asciidoc +++ b/docs/examples/e1d6ecab4148b09f4c605474157e7dbd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-settings.asciidoc:11 +// troubleshooting/troubleshooting-searches.asciidoc:305 [source, python] ---- diff --git a/docs/examples/e22a1da3c622611be6855e534c0709ae.asciidoc b/docs/examples/e22a1da3c622611be6855e534c0709ae.asciidoc new file mode 100644 index 000000000..d0c5e0340 --- /dev/null +++ b/docs/examples/e22a1da3c622611be6855e534c0709ae.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// query-rules/apis/test-query-ruleset.asciidoc:111 + +[source, python] +---- +resp = client.query_rules.test( + ruleset_id="my-ruleset", + match_criteria={ + "query_string": "puggles" + }, +) +print(resp) +---- diff --git a/docs/examples/e2a22c6fd58cc0becf4c383134a08f8b.asciidoc b/docs/examples/e2a22c6fd58cc0becf4c383134a08f8b.asciidoc index 112f6dd18..26a8a13b2 100644 --- a/docs/examples/e2a22c6fd58cc0becf4c383134a08f8b.asciidoc +++ b/docs/examples/e2a22c6fd58cc0becf4c383134a08f8b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/intervals-query.asciidoc:374 +// query-dsl/intervals-query.asciidoc:445 [source, python] ---- diff --git a/docs/examples/e308899a306e61d1a590868308689955.asciidoc b/docs/examples/e308899a306e61d1a590868308689955.asciidoc new file mode 100644 index 000000000..8773d24c2 --- /dev/null +++ b/docs/examples/e308899a306e61d1a590868308689955.asciidoc @@ -0,0 +1,36 @@ +// This file is autogenerated, DO NOT EDIT +// ingest/processors/ip-location.asciidoc:136 + +[source, python] +---- +resp = client.ingest.put_pipeline( + id="ip_location", + description="Add ip geolocation info", + processors=[ + { + "ip_location": { + "field": "ip", + "target_field": "geo", + "database_file": "GeoLite2-Country.mmdb" + } + } + ], +) +print(resp) + +resp1 = client.index( + index="my-index-000001", + id="my_id", + pipeline="ip_location", + document={ + "ip": "89.160.20.128" + }, +) +print(resp1) + +resp2 = client.get( + index="my-index-000001", + id="my_id", +) +print(resp2) +---- diff --git a/docs/examples/e3fe842951dc873d7d00c8f6a010c53f.asciidoc b/docs/examples/e3fe842951dc873d7d00c8f6a010c53f.asciidoc new file mode 100644 index 000000000..0109eed73 --- /dev/null +++ b/docs/examples/e3fe842951dc873d7d00c8f6a010c53f.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// troubleshooting/common-issues/task-queue-backlog.asciidoc:67 + +[source, python] +---- +resp = client.tasks.list( + human=True, + detailed=True, + actions="indices:data/write/search", +) +print(resp) +---- diff --git a/docs/examples/e4b38973c74037335378d8480f1ce894.asciidoc b/docs/examples/e4b38973c74037335378d8480f1ce894.asciidoc new file mode 100644 index 000000000..54d3fd679 --- /dev/null +++ b/docs/examples/e4b38973c74037335378d8480f1ce894.asciidoc @@ -0,0 +1,44 @@ +// This file is autogenerated, DO NOT EDIT +// ingest/apis/simulate-ingest.asciidoc:429 + +[source, python] +---- +resp = client.simulate.ingest( + body={ + "docs": [ + { + "_index": "my-index", + "_id": "123", + "_source": { + "foo": "foo" + } + }, + { + "_index": "my-index", + "_id": "456", + "_source": { + "bar": "rab" + } + } + ], + "component_template_substitutions": { + "my-mappings_template": { + "template": { + "mappings": { + "dynamic": "strict", + "properties": { + "foo": { + "type": "keyword" + }, + "bar": { + "type": "keyword" + } + } + } + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/e4b64b8277af259a52c8d3940157b5fa.asciidoc b/docs/examples/e4b64b8277af259a52c8d3940157b5fa.asciidoc index 096deae8c..fda164182 100644 --- a/docs/examples/e4b64b8277af259a52c8d3940157b5fa.asciidoc +++ b/docs/examples/e4b64b8277af259a52c8d3940157b5fa.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// transform/painless-examples.asciidoc:397 +// transform/painless-examples.asciidoc:402 [source, python] ---- diff --git a/docs/examples/e4b6a6a921c97b4c0bbe97bd89f4cf33.asciidoc b/docs/examples/e4b6a6a921c97b4c0bbe97bd89f4cf33.asciidoc index dc93de433..4dfde8e77 100644 --- a/docs/examples/e4b6a6a921c97b4c0bbe97bd89f4cf33.asciidoc +++ b/docs/examples/e4b6a6a921c97b4c0bbe97bd89f4cf33.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/promote-data-stream-api.asciidoc:22 +// data-streams/promote-data-stream-api.asciidoc:26 [source, python] ---- diff --git a/docs/examples/e4d1f01c025fb797a1d87f372760eabf.asciidoc b/docs/examples/e4d1f01c025fb797a1d87f372760eabf.asciidoc index 106dc3a6c..754f5c188 100644 --- a/docs/examples/e4d1f01c025fb797a1d87f372760eabf.asciidoc +++ b/docs/examples/e4d1f01c025fb797a1d87f372760eabf.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/hotspotting.asciidoc:265 +// troubleshooting/common-issues/hotspotting.asciidoc:267 [source, python] ---- diff --git a/docs/examples/e551ea38a2d8f8deac110b33304200cc.asciidoc b/docs/examples/e551ea38a2d8f8deac110b33304200cc.asciidoc index a69bf2834..3d6783dc1 100644 --- a/docs/examples/e551ea38a2d8f8deac110b33304200cc.asciidoc +++ b/docs/examples/e551ea38a2d8f8deac110b33304200cc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/learning-to-rank-search-usage.asciidoc:17 +// reranking/learning-to-rank-search-usage.asciidoc:17 [source, python] ---- diff --git a/docs/examples/4655c3dea0c61935b7ecf1e57441df66.asciidoc b/docs/examples/e9625da419bff6470ffd9927c59ca159.asciidoc similarity index 79% rename from docs/examples/4655c3dea0c61935b7ecf1e57441df66.asciidoc rename to docs/examples/e9625da419bff6470ffd9927c59ca159.asciidoc index 98b608366..d23a92313 100644 --- a/docs/examples/4655c3dea0c61935b7ecf1e57441df66.asciidoc +++ b/docs/examples/e9625da419bff6470ffd9927c59ca159.asciidoc @@ -5,7 +5,7 @@ ---- resp = client.cat.thread_pool( v=True, - h="id,name,active,rejected,completed", + h="id,name,queue,active,rejected,completed", ) print(resp) ---- diff --git a/docs/examples/e9a0b450af6219772631703d602c7092.asciidoc b/docs/examples/e9a0b450af6219772631703d602c7092.asciidoc index 9c1d7c061..27cbd28d2 100644 --- a/docs/examples/e9a0b450af6219772631703d602c7092.asciidoc +++ b/docs/examples/e9a0b450af6219772631703d602c7092.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/text-expansion-query.asciidoc:222 +// query-dsl/text-expansion-query.asciidoc:229 [source, python] ---- diff --git a/docs/examples/e9fc47015922d51c2b05e502ce9c622e.asciidoc b/docs/examples/e9fc47015922d51c2b05e502ce9c622e.asciidoc index 1e857e8bf..0e8ed80cd 100644 --- a/docs/examples/e9fc47015922d51c2b05e502ce9c622e.asciidoc +++ b/docs/examples/e9fc47015922d51c2b05e502ce9c622e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-google-ai-studio.asciidoc:77 +// inference/service-google-ai-studio.asciidoc:97 [source, python] ---- diff --git a/docs/examples/eb54506fbc71a7d250e86b22d0600114.asciidoc b/docs/examples/eb54506fbc71a7d250e86b22d0600114.asciidoc index e4e933248..89c81acd0 100644 --- a/docs/examples/eb54506fbc71a7d250e86b22d0600114.asciidoc +++ b/docs/examples/eb54506fbc71a7d250e86b22d0600114.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/list-connectors-api.asciidoc:107 +// connector/apis/list-connectors-api.asciidoc:108 [source, python] ---- diff --git a/docs/examples/eb96d7dd5f3116a50f7a86b729f1a934.asciidoc b/docs/examples/eb96d7dd5f3116a50f7a86b729f1a934.asciidoc index 8f52a9370..fcb571114 100644 --- a/docs/examples/eb96d7dd5f3116a50f7a86b729f1a934.asciidoc +++ b/docs/examples/eb96d7dd5f3116a50f7a86b729f1a934.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-scheduling-api.asciidoc:119 +// connector/apis/update-connector-scheduling-api.asciidoc:120 [source, python] ---- diff --git a/docs/examples/eb9a41f7fc8bdf5559bb9db822ae3a65.asciidoc b/docs/examples/eb9a41f7fc8bdf5559bb9db822ae3a65.asciidoc index 62fa7fdbc..182d9c5ff 100644 --- a/docs/examples/eb9a41f7fc8bdf5559bb9db822ae3a65.asciidoc +++ b/docs/examples/eb9a41f7fc8bdf5559bb9db822ae3a65.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/bulk-create-roles.asciidoc:217 +// rest-api/security/bulk-create-roles.asciidoc:230 [source, python] ---- diff --git a/docs/examples/ecfd0d94dd14ef05dfa861f22544b388.asciidoc b/docs/examples/ecfd0d94dd14ef05dfa861f22544b388.asciidoc index 39218d68e..051f91b70 100644 --- a/docs/examples/ecfd0d94dd14ef05dfa861f22544b388.asciidoc +++ b/docs/examples/ecfd0d94dd14ef05dfa861f22544b388.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-error-api.asciidoc:80 +// connector/apis/update-connector-error-api.asciidoc:81 [source, python] ---- diff --git a/docs/examples/ee223e604bb695cad2517d28ae63ac34.asciidoc b/docs/examples/ee223e604bb695cad2517d28ae63ac34.asciidoc index 5604e2e2c..cb15a14c3 100644 --- a/docs/examples/ee223e604bb695cad2517d28ae63ac34.asciidoc +++ b/docs/examples/ee223e604bb695cad2517d28ae63ac34.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/rrf.asciidoc:49 +// search/rrf.asciidoc:47 [source, python] ---- diff --git a/docs/examples/eee6110831c08b9c1b3f56b24656e95b.asciidoc b/docs/examples/eee6110831c08b9c1b3f56b24656e95b.asciidoc index a342f7ada..515c4e510 100644 --- a/docs/examples/eee6110831c08b9c1b3f56b24656e95b.asciidoc +++ b/docs/examples/eee6110831c08b9c1b3f56b24656e95b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-hugging-face.asciidoc:81 +// inference/service-hugging-face.asciidoc:101 [source, python] ---- diff --git a/docs/examples/ef643bab44e7de6ddddde23a2eece5c7.asciidoc b/docs/examples/ef643bab44e7de6ddddde23a2eece5c7.asciidoc new file mode 100644 index 000000000..64764eb76 --- /dev/null +++ b/docs/examples/ef643bab44e7de6ddddde23a2eece5c7.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/getting-started.asciidoc:283 + +[source, python] +---- +resp = client.index( + index="books", + document={ + "name": "The Great Gatsby", + "author": "F. Scott Fitzgerald", + "release_date": "1925-04-10", + "page_count": 180, + "language": "EN" + }, +) +print(resp) +---- diff --git a/docs/examples/ef9c29759459904fef162acd223462c4.asciidoc b/docs/examples/ef9c29759459904fef162acd223462c4.asciidoc index 920357cbd..7d7cd3d0c 100644 --- a/docs/examples/ef9c29759459904fef162acd223462c4.asciidoc +++ b/docs/examples/ef9c29759459904fef162acd223462c4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/nodes-stats.asciidoc:2585 +// cluster/nodes-stats.asciidoc:2589 [source, python] ---- diff --git a/docs/examples/349823d86980d40ac45248c19a59e339.asciidoc b/docs/examples/f03352bb1129938a89f97e4b650038dd.asciidoc similarity index 91% rename from docs/examples/349823d86980d40ac45248c19a59e339.asciidoc rename to docs/examples/f03352bb1129938a89f97e4b650038dd.asciidoc index d770f3f54..ef19c0301 100644 --- a/docs/examples/349823d86980d40ac45248c19a59e339.asciidoc +++ b/docs/examples/f03352bb1129938a89f97e4b650038dd.asciidoc @@ -4,7 +4,7 @@ [source, python] ---- resp = client.ingest.put_pipeline( - id="amazon_bedrock_embeddings", + id="amazon_bedrock_embeddings_pipeline", processors=[ { "inference": { diff --git a/docs/examples/f097c02541056f3c0fc855e7bbeef8a8.asciidoc b/docs/examples/f097c02541056f3c0fc855e7bbeef8a8.asciidoc index 373fbf824..dedc1caaa 100644 --- a/docs/examples/f097c02541056f3c0fc855e7bbeef8a8.asciidoc +++ b/docs/examples/f097c02541056f3c0fc855e7bbeef8a8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// analysis/analyzers/lang-analyzer.asciidoc:1745 +// analysis/analyzers/lang-analyzer.asciidoc:1746 [source, python] ---- diff --git a/docs/examples/f160561efab38e40c2feebf5a2542ab5.asciidoc b/docs/examples/f160561efab38e40c2feebf5a2542ab5.asciidoc index e119375b9..b60a3d3af 100644 --- a/docs/examples/f160561efab38e40c2feebf5a2542ab5.asciidoc +++ b/docs/examples/f160561efab38e40c2feebf5a2542ab5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/nodes-stats.asciidoc:2593 +// cluster/nodes-stats.asciidoc:2597 [source, python] ---- diff --git a/docs/examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc b/docs/examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc index 2464c2786..bde573a84 100644 --- a/docs/examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc +++ b/docs/examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/post-inference.asciidoc:137 +// inference/post-inference.asciidoc:132 [source, python] ---- diff --git a/docs/examples/f1bf0c03581b79c3324cfa3246a60e4d.asciidoc b/docs/examples/f1bf0c03581b79c3324cfa3246a60e4d.asciidoc new file mode 100644 index 000000000..2b937027b --- /dev/null +++ b/docs/examples/f1bf0c03581b79c3324cfa3246a60e4d.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// mapping/types/dense-vector.asciidoc:184 + +[source, python] +---- +resp = client.indices.create( + index="my-byte-quantized-index", + mappings={ + "properties": { + "my_vector": { + "type": "dense_vector", + "dims": 64, + "index": True, + "index_options": { + "type": "bbq_hnsw" + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/f298c4eb50ea97b34c57f8756eb350d3.asciidoc b/docs/examples/f298c4eb50ea97b34c57f8756eb350d3.asciidoc index 1fbfb3e2a..b62249d4e 100644 --- a/docs/examples/f298c4eb50ea97b34c57f8756eb350d3.asciidoc +++ b/docs/examples/f298c4eb50ea97b34c57f8756eb350d3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/pending_tasks.asciidoc:52 +// cat/pending_tasks.asciidoc:51 [source, python] ---- diff --git a/docs/examples/f29a28fffa7ec604a33a838f48f7ea79.asciidoc b/docs/examples/f29a28fffa7ec604a33a838f48f7ea79.asciidoc index db0efa4a5..4399e8556 100644 --- a/docs/examples/f29a28fffa7ec604a33a838f48f7ea79.asciidoc +++ b/docs/examples/f29a28fffa7ec604a33a838f48f7ea79.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/query_filter_context.asciidoc:62 +// query-dsl/query_filter_context.asciidoc:81 [source, python] ---- diff --git a/docs/examples/f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc b/docs/examples/f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc index 5f93f7a79..61c8fc4f7 100644 --- a/docs/examples/f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc +++ b/docs/examples/f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/get-connector-api.asciidoc:64 +// connector/apis/get-connector-api.asciidoc:65 [source, python] ---- diff --git a/docs/examples/f321d4e92aa83d573ecf52bf56b0b774.asciidoc b/docs/examples/f321d4e92aa83d573ecf52bf56b0b774.asciidoc new file mode 100644 index 000000000..aa1c9cb27 --- /dev/null +++ b/docs/examples/f321d4e92aa83d573ecf52bf56b0b774.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// connector/docs/connectors-API-tutorial.asciidoc:377 + +[source, python] +---- +resp = client.perform_request( + "POST", + "/_connector/_sync_job", + headers={"Content-Type": "application/json"}, + body={ + "id": "my-connector-id", + "job_type": "full" + }, +) +print(resp) +---- diff --git a/docs/examples/f3fb52680482925c202c2e2f8af6f044.asciidoc b/docs/examples/f3fb52680482925c202c2e2f8af6f044.asciidoc index 260106272..bd1263028 100644 --- a/docs/examples/f3fb52680482925c202c2e2f8af6f044.asciidoc +++ b/docs/examples/f3fb52680482925c202c2e2f8af6f044.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// how-to/size-your-shards.asciidoc:458 +// how-to/size-your-shards.asciidoc:459 [source, python] ---- diff --git a/docs/examples/f45990264f8755b96b11c69c12c90ff4.asciidoc b/docs/examples/f45990264f8755b96b11c69c12c90ff4.asciidoc index df8d497cf..8050dc60c 100644 --- a/docs/examples/f45990264f8755b96b11c69c12c90ff4.asciidoc +++ b/docs/examples/f45990264f8755b96b11c69c12c90ff4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/indices-exists.asciidoc:10 +// troubleshooting/troubleshooting-searches.asciidoc:21 [source, python] ---- diff --git a/docs/examples/f57ce7de0946e9416ddb9150e95f4b74.asciidoc b/docs/examples/f57ce7de0946e9416ddb9150e95f4b74.asciidoc index 47691a06e..d2d437022 100644 --- a/docs/examples/f57ce7de0946e9416ddb9150e95f4b74.asciidoc +++ b/docs/examples/f57ce7de0946e9416ddb9150e95f4b74.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-azure-openai.asciidoc:139 +// inference/service-azure-openai.asciidoc:159 [source, python] ---- diff --git a/docs/examples/f625fdbbe78c4198d9e40b35f3f008b3.asciidoc b/docs/examples/f625fdbbe78c4198d9e40b35f3f008b3.asciidoc new file mode 100644 index 000000000..5e65d8231 --- /dev/null +++ b/docs/examples/f625fdbbe78c4198d9e40b35f3f008b3.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// connector/docs/connectors-known-issues.asciidoc:99 + +[source, python] +---- +resp = client.update( + index=".elastic-connectors", + id="connector-id", + doc={ + "custom_scheduling": {} + }, +) +print(resp) +---- diff --git a/docs/examples/f6566395f85d3afe917228643d7318d6.asciidoc b/docs/examples/f6566395f85d3afe917228643d7318d6.asciidoc index 1a38176a3..c5bc986e9 100644 --- a/docs/examples/f6566395f85d3afe917228643d7318d6.asciidoc +++ b/docs/examples/f6566395f85d3afe917228643d7318d6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// how-to/size-your-shards.asciidoc:468 +// how-to/size-your-shards.asciidoc:469 [source, python] ---- diff --git a/docs/examples/f679e414de48b8fe25e458844be05618.asciidoc b/docs/examples/f679e414de48b8fe25e458844be05618.asciidoc new file mode 100644 index 000000000..4a98eeb2d --- /dev/null +++ b/docs/examples/f679e414de48b8fe25e458844be05618.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// connector/docs/connectors-API-tutorial.asciidoc:179 + +[source, python] +---- +resp = client.connector.put( + connector_id="my-connector-id", + name="Music catalog", + index_name="music", + service_type="postgresql", +) +print(resp) +---- diff --git a/docs/examples/f6ead39c5505045543b9225deca7367d.asciidoc b/docs/examples/f6ead39c5505045543b9225deca7367d.asciidoc index e133cf10f..85a72c5e4 100644 --- a/docs/examples/f6ead39c5505045543b9225deca7367d.asciidoc +++ b/docs/examples/f6ead39c5505045543b9225deca7367d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/voting-exclusions.asciidoc:105 +// cluster/voting-exclusions.asciidoc:109 [source, python] ---- diff --git a/docs/examples/f6f647eb644a2d236637ff05f833cb73.asciidoc b/docs/examples/f6f647eb644a2d236637ff05f833cb73.asciidoc new file mode 100644 index 000000000..3301985ae --- /dev/null +++ b/docs/examples/f6f647eb644a2d236637ff05f833cb73.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// connector/docs/_connectors-create-native-api-key.asciidoc:43 + +[source, python] +---- +resp = client.connector.secret_post( + body={ + "value": "encoded_api_key" + }, +) +print(resp) +---- diff --git a/docs/examples/f785b5d17eb59f8d2a353c2dee66eb5b.asciidoc b/docs/examples/f785b5d17eb59f8d2a353c2dee66eb5b.asciidoc index 47d3faaab..d93907c92 100644 --- a/docs/examples/f785b5d17eb59f8d2a353c2dee66eb5b.asciidoc +++ b/docs/examples/f785b5d17eb59f8d2a353c2dee66eb5b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/get-connector-sync-job-api.asciidoc:44 +// connector/apis/get-connector-sync-job-api.asciidoc:45 [source, python] ---- diff --git a/docs/examples/f8525c2460a577edfef156c13f55b8a7.asciidoc b/docs/examples/f7b20e4bb8366f6d2e4486f3bf4211bc.asciidoc similarity index 94% rename from docs/examples/f8525c2460a577edfef156c13f55b8a7.asciidoc rename to docs/examples/f7b20e4bb8366f6d2e4486f3bf4211bc.asciidoc index af1900d64..dc5b9522d 100644 --- a/docs/examples/f8525c2460a577edfef156c13f55b8a7.asciidoc +++ b/docs/examples/f7b20e4bb8366f6d2e4486f3bf4211bc.asciidoc @@ -11,7 +11,7 @@ resp = client.search( "filter": { "range": { "price": { - "to": "500" + "lte": "500" } } } diff --git a/docs/examples/517d291044c3e4448b8804322616ab4a.asciidoc b/docs/examples/f86337e13526c968848cfe29a52d658f.asciidoc similarity index 92% rename from docs/examples/517d291044c3e4448b8804322616ab4a.asciidoc rename to docs/examples/f86337e13526c968848cfe29a52d658f.asciidoc index 4fc02a67c..613de0527 100644 --- a/docs/examples/517d291044c3e4448b8804322616ab4a.asciidoc +++ b/docs/examples/f86337e13526c968848cfe29a52d658f.asciidoc @@ -4,7 +4,7 @@ [source, python] ---- resp = client.ingest.put_pipeline( - id="elser_embeddings", + id="elser_embeddings_pipeline", processors=[ { "inference": { diff --git a/docs/examples/f8f960550104c33e00dc78bc8723ccef.asciidoc b/docs/examples/f8f960550104c33e00dc78bc8723ccef.asciidoc new file mode 100644 index 000000000..9b0699474 --- /dev/null +++ b/docs/examples/f8f960550104c33e00dc78bc8723ccef.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/full-text-filtering-tutorial.asciidoc:42 + +[source, python] +---- +resp = client.indices.create( + index="cooking_blog", +) +print(resp) +---- diff --git a/docs/examples/1af9742c71ce0587cd49a73ec7fc1f6c.asciidoc b/docs/examples/f95a4d7ab02bf400246c8822f0245f02.asciidoc similarity index 92% rename from docs/examples/1af9742c71ce0587cd49a73ec7fc1f6c.asciidoc rename to docs/examples/f95a4d7ab02bf400246c8822f0245f02.asciidoc index 669023540..9e2f8d55f 100644 --- a/docs/examples/1af9742c71ce0587cd49a73ec7fc1f6c.asciidoc +++ b/docs/examples/f95a4d7ab02bf400246c8822f0245f02.asciidoc @@ -5,7 +5,7 @@ ---- resp = client.cat.ml_trained_models( h="c,o,l,ct,v", - v="ture", + v=True, ) print(resp) ---- diff --git a/docs/examples/1e0f203aced9344382081ab095c44dde.asciidoc b/docs/examples/f9bad6fd369764185e1cb09b89ee39cc.asciidoc similarity index 70% rename from docs/examples/1e0f203aced9344382081ab095c44dde.asciidoc rename to docs/examples/f9bad6fd369764185e1cb09b89ee39cc.asciidoc index 9c3bb543d..64d5b6209 100644 --- a/docs/examples/1e0f203aced9344382081ab095c44dde.asciidoc +++ b/docs/examples/f9bad6fd369764185e1cb09b89ee39cc.asciidoc @@ -1,14 +1,20 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/text.asciidoc:233 +// mapping/types/text.asciidoc:237 [source, python] ---- resp = client.indices.create( index="idx", + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, mappings={ - "_source": { - "mode": "synthetic" - }, "properties": { "text": { "type": "text", diff --git a/docs/examples/633c8a9fc57268979d8735c557705809.asciidoc b/docs/examples/fb0152f6c70f647a8b6709969113486d.asciidoc similarity index 69% rename from docs/examples/633c8a9fc57268979d8735c557705809.asciidoc rename to docs/examples/fb0152f6c70f647a8b6709969113486d.asciidoc index d9a6eaf22..1548e7f1b 100644 --- a/docs/examples/633c8a9fc57268979d8735c557705809.asciidoc +++ b/docs/examples/fb0152f6c70f647a8b6709969113486d.asciidoc @@ -1,14 +1,20 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/keyword.asciidoc:219 +// mapping/types/keyword.asciidoc:222 [source, python] ---- resp = client.indices.create( index="idx", + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, mappings={ - "_source": { - "mode": "synthetic" - }, "properties": { "kwd": { "type": "keyword", diff --git a/docs/examples/fb56c2ac77d4c308d7702b6b33698382.asciidoc b/docs/examples/fb56c2ac77d4c308d7702b6b33698382.asciidoc new file mode 100644 index 000000000..2d56c218f --- /dev/null +++ b/docs/examples/fb56c2ac77d4c308d7702b6b33698382.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// connector/docs/_connectors-create-native-api-key.asciidoc:54 + +[source, python] +---- +resp = client.connector.update_api_key_id( + connector_id="my_connector_id>", + api_key_id="API key_id", + api_key_secret_id="secret_id", +) +print(resp) +---- diff --git a/docs/examples/fc1907515f6a913884a9f86451e90ee8.asciidoc b/docs/examples/fc1907515f6a913884a9f86451e90ee8.asciidoc index 543709c8c..8c60861bf 100644 --- a/docs/examples/fc1907515f6a913884a9f86451e90ee8.asciidoc +++ b/docs/examples/fc1907515f6a913884a9f86451e90ee8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/semantic-search-elser.asciidoc:304 +// search/search-your-data/semantic-search-elser.asciidoc:316 [source, python] ---- diff --git a/docs/examples/fd620f09dbce62c6f0f603a366623607.asciidoc b/docs/examples/fd620f09dbce62c6f0f603a366623607.asciidoc index c8d98272a..dcebe10f0 100644 --- a/docs/examples/fd620f09dbce62c6f0f603a366623607.asciidoc +++ b/docs/examples/fd620f09dbce62c6f0f603a366623607.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-filtering-api.asciidoc:149 +// connector/apis/update-connector-filtering-api.asciidoc:150 [source, python] ---- diff --git a/docs/examples/fd7eeadab6251d9113c4380a7fbe2572.asciidoc b/docs/examples/fd7eeadab6251d9113c4380a7fbe2572.asciidoc index e7756bd54..d2ed6b932 100644 --- a/docs/examples/fd7eeadab6251d9113c4380a7fbe2572.asciidoc +++ b/docs/examples/fd7eeadab6251d9113c4380a7fbe2572.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// security/authentication/remote-clusters-privileges-api-key.asciidoc:26 +// security/authentication/remote-clusters-privileges-api-key.asciidoc:27 [source, python] ---- diff --git a/docs/examples/1fcc4a3280be399753dcfd5c489ff682.asciidoc b/docs/examples/fe6429d0d82174aa5acf95e96e237380.asciidoc similarity index 70% rename from docs/examples/1fcc4a3280be399753dcfd5c489ff682.asciidoc rename to docs/examples/fe6429d0d82174aa5acf95e96e237380.asciidoc index 2dac63638..efcf594b4 100644 --- a/docs/examples/1fcc4a3280be399753dcfd5c489ff682.asciidoc +++ b/docs/examples/fe6429d0d82174aa5acf95e96e237380.asciidoc @@ -1,14 +1,20 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/range.asciidoc:316 +// mapping/types/range.asciidoc:324 [source, python] ---- resp = client.indices.create( index="idx", + settings={ + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, mappings={ - "_source": { - "mode": "synthetic" - }, "properties": { "my_range": { "type": "ip_range" diff --git a/docs/examples/ff1b96d2fdcf628bd938bff9e939943c.asciidoc b/docs/examples/ff1b96d2fdcf628bd938bff9e939943c.asciidoc index 8bab9f402..6894641fb 100644 --- a/docs/examples/ff1b96d2fdcf628bd938bff9e939943c.asciidoc +++ b/docs/examples/ff1b96d2fdcf628bd938bff9e939943c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/runtime.asciidoc:967 +// mapping/runtime.asciidoc:965 [source, python] ---- diff --git a/docs/examples/ff27e5cddd1f58d8a8f84f807fd27eec.asciidoc b/docs/examples/ff27e5cddd1f58d8a8f84f807fd27eec.asciidoc index 9ce7f1ce8..4f9e86fb4 100644 --- a/docs/examples/ff27e5cddd1f58d8a8f84f807fd27eec.asciidoc +++ b/docs/examples/ff27e5cddd1f58d8a8f84f807fd27eec.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/processors/redact.asciidoc:178 +// ingest/processors/redact.asciidoc:179 [source, python] ---- diff --git a/docs/examples/ff776c0fccf93e1c7050f7cb7efbae0b.asciidoc b/docs/examples/ff776c0fccf93e1c7050f7cb7efbae0b.asciidoc index 5effa743d..be4d854b0 100644 --- a/docs/examples/ff776c0fccf93e1c7050f7cb7efbae0b.asciidoc +++ b/docs/examples/ff776c0fccf93e1c7050f7cb7efbae0b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/infer-trained-model.asciidoc:905 +// ml/trained-models/apis/infer-trained-model.asciidoc:1006 [source, python] ---- diff --git a/docs/guide/release-notes.asciidoc b/docs/guide/release-notes.asciidoc index 674346865..b09fd4569 100644 --- a/docs/guide/release-notes.asciidoc +++ b/docs/guide/release-notes.asciidoc @@ -1,6 +1,7 @@ [[release-notes]] == Release notes +* <> * <> * <> * <> @@ -41,6 +42,37 @@ * <> * <> +[discrete] +[[rn-8-16-0]] +=== 8.16.0 (2024-11-12) + +- Support Python 3.13 (https://github.com/elastic/elasticsearch-py/pull/2689[#2689]) +- Emit Python warnings for beta and tech preview APIs (https://github.com/elastic/elasticsearch-py/pull/2685[#2685]) +- Vectorstore: use a retriever query for hybrid search (https://github.com/elastic/elasticsearch-py/pull/2682[#2682]) +- Allow retries for statuses other than 429 in streaming bulk (https://github.com/elastic/elasticsearch-py/pull/2702[#2702]) +- Make `BulkIndexError` and `ScanError` serializable (https://github.com/elastic/elasticsearch-py/pull/2700[#2700]) +- Fix import when `trace` is missing from `opentelemetry` package (https://github.com/elastic/elasticsearch-py/pull/2705[#2705]) +- Update APIs: + * Fix `nodes` parameter in Task management API + * Add Test query rule API + * Add Create Cross-Cluster API key and Update Cross-Cluster API key APIs + * Add Verify snapshot repository API + * Add `data_stream_name` and `settings` to Delete auto-follow pattern API + * Add `max_samples_per_key` to Get async EQL status API + * Add `lifecycle` and remove unused `data_retention` and `downsampling parameters` from Put data stream lifecycle API + * Add `include_remotes` and remove `flat_settings` from Cluster stats API + * Add `remote_indices` to Create or update application privileges and Create or update roles APIs + +Note that the new Python warnings can be disabled as follows: + +[source,python] +---- +import warnings +from elasticsearch.exceptions import GeneralAvailabilityWarning + +warnings.filterwarnings("ignore", category=GeneralAvailabilityWarning) +---- + [discrete] [[rn-8-15-1]] === 8.15.1 (2024-09-08) @@ -51,8 +83,6 @@ - Add `q` parameter to Update by Query API - Add `allow_no_indices` and `ignore_unavailable` parameters to Resolve index API - - [discrete] [[rn-8-15-0]] === 8.15.0 (2024-08-09) diff --git a/elasticsearch/_version.py b/elasticsearch/_version.py index cf59dc4a8..f1a7a85f5 100644 --- a/elasticsearch/_version.py +++ b/elasticsearch/_version.py @@ -15,4 +15,4 @@ # specific language governing permissions and limitations # under the License. -__versionstr__ = "8.15.1" +__versionstr__ = "8.16.0" diff --git a/pyproject.toml b/pyproject.toml index 8640b40fa..8a55e0b67 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,7 +42,7 @@ keywords = [ ] dynamic = ["version"] dependencies = [ - "elastic-transport>=8.13,<9", + "elastic-transport>=8.15.1,<9", ] [project.optional-dependencies] diff --git a/utils/generate-docs-examples/package-lock.json b/utils/generate-docs-examples/package-lock.json index 3148aba96..6c4f1bc41 100644 --- a/utils/generate-docs-examples/package-lock.json +++ b/utils/generate-docs-examples/package-lock.json @@ -9,7 +9,7 @@ "version": "1.0.0", "license": "Apache-2.0", "dependencies": { - "@elastic/request-converter": "~8.15.2", + "@elastic/request-converter": "^8.15.2", "minimist": "^1.2.6", "node-fetch": "^2.6.7", "ora": "^5.4.1", @@ -17,9 +17,9 @@ } }, "node_modules/@elastic/request-converter": { - "version": "8.15.4", - "resolved": "https://registry.npmjs.org/@elastic/request-converter/-/request-converter-8.15.4.tgz", - "integrity": "sha512-iZDQpZpygV+AVOweaDzTsMJBfa2hwwduPXNNzk/yTXgC9qtjmns/AjehtLStKXs274+u3fg+BFxVt6NcMwUAAg==", + "version": "8.16.1", + "resolved": "https://registry.npmjs.org/@elastic/request-converter/-/request-converter-8.16.1.tgz", + "integrity": "sha512-lg2qCJ4kyxsP/0NpZo0+NsJfaY4JwyxGIVqD2l2Vmx9tv7ZNaZMn/TjHKBo2+jN0laJBInpxpnkPUgVWo5kw1g==", "license": "Apache-2.0", "dependencies": { "child-process-promise": "^2.2.1", From 22377c0030a4657a6f12d8849a37d1bec1a461f0 Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Tue, 10 Dec 2024 11:06:36 +0400 Subject: [PATCH 14/65] Auto-generated code for 8.x (#2719) --- elasticsearch/_async/client/__init__.py | 30 +++- elasticsearch/_async/client/async_search.py | 26 +--- elasticsearch/_async/client/autoscaling.py | 35 +++++ elasticsearch/_async/client/cluster.py | 158 ++++++++++++++++---- elasticsearch/_async/client/enrich.py | 2 +- elasticsearch/_async/client/eql.py | 16 +- elasticsearch/_async/client/esql.py | 3 +- elasticsearch/_async/client/graph.py | 10 +- elasticsearch/_async/client/indices.py | 6 +- elasticsearch/_async/client/inference.py | 12 +- elasticsearch/_async/client/ingest.py | 33 ++-- elasticsearch/_async/client/nodes.py | 40 +++-- elasticsearch/_async/client/query_rules.py | 17 ++- elasticsearch/_async/client/security.py | 7 + elasticsearch/_async/client/sql.py | 19 +-- elasticsearch/_async/client/synonyms.py | 17 ++- elasticsearch/_sync/client/__init__.py | 30 +++- elasticsearch/_sync/client/async_search.py | 26 +--- elasticsearch/_sync/client/autoscaling.py | 35 +++++ elasticsearch/_sync/client/cluster.py | 158 ++++++++++++++++---- elasticsearch/_sync/client/enrich.py | 2 +- elasticsearch/_sync/client/eql.py | 16 +- elasticsearch/_sync/client/esql.py | 3 +- elasticsearch/_sync/client/graph.py | 10 +- elasticsearch/_sync/client/indices.py | 6 +- elasticsearch/_sync/client/inference.py | 12 +- elasticsearch/_sync/client/ingest.py | 33 ++-- elasticsearch/_sync/client/nodes.py | 40 +++-- elasticsearch/_sync/client/query_rules.py | 17 ++- elasticsearch/_sync/client/security.py | 7 + elasticsearch/_sync/client/sql.py | 19 +-- elasticsearch/_sync/client/synonyms.py | 17 ++- 32 files changed, 604 insertions(+), 258 deletions(-) diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index d5985ca05..e63c1dc77 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -872,7 +872,7 @@ async def count( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns number of documents matching a query. + Count search results. Get the number of documents matching a query. ``_ @@ -2274,7 +2274,26 @@ async def health_report( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the health of the cluster. + Get the cluster health. Get a report with the health status of an Elasticsearch + cluster. The report contains a list of indicators that compose Elasticsearch + functionality. Each indicator has a health status of: green, unknown, yellow + or red. The indicator will provide an explanation and metadata describing the + reason for its current health status. The cluster’s status is controlled by the + worst indicator status. In the event that an indicator’s status is non-green, + a list of impacts may be present in the indicator result which detail the functionalities + that are negatively affected by the health issue. Each impact carries with it + a severity level, an area of the system that is affected, and a simple description + of the impact on the system. Some health indicators can determine the root cause + of a health problem and prescribe a set of steps that can be performed in order + to improve the health of the system. The root cause and remediation steps are + encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause + analysis, an action containing a brief description of the steps to take to fix + the problem, the list of affected resources (if applicable), and a detailed step-by-step + troubleshooting guide to fix the diagnosed problem. NOTE: The health indicators + perform root cause analysis of non-green health statuses. This can be computationally + expensive when called frequently. When setting up automated polling of the API + for health status, set verbose to false to disable the more expensive analysis + logic. ``_ @@ -3079,6 +3098,7 @@ async def open_point_in_time( *, index: t.Union[str, t.Sequence[str]], keep_alive: t.Union[str, t.Literal[-1], t.Literal[0]], + allow_partial_search_results: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ @@ -3113,6 +3133,10 @@ async def open_point_in_time( :param index: A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices :param keep_alive: Extends the time to live of the corresponding point in time. + :param allow_partial_search_results: If `false`, creating a point in time request + when a shard is missing or unavailable will throw an exception. If `true`, + the point in time will contain all the shards that are available at the time + of the request. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such @@ -3135,6 +3159,8 @@ async def open_point_in_time( __body: t.Dict[str, t.Any] = body if body is not None else {} if keep_alive is not None: __query["keep_alive"] = keep_alive + if allow_partial_search_results is not None: + __query["allow_partial_search_results"] = allow_partial_search_results if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: diff --git a/elasticsearch/_async/client/async_search.py b/elasticsearch/_async/client/async_search.py index 0093273ca..8e2bbecf9 100644 --- a/elasticsearch/_async/client/async_search.py +++ b/elasticsearch/_async/client/async_search.py @@ -145,6 +145,7 @@ async def status( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -156,6 +157,9 @@ async def status( ``_ :param id: A unique identifier for the async search. + :param keep_alive: Specifies how long the async search needs to be available. + Ongoing async searches and any saved search results are deleted after this + period. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -168,6 +172,8 @@ async def status( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if keep_alive is not None: + __query["keep_alive"] = keep_alive if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -258,7 +264,6 @@ async def submit( ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, indices_boost: t.Optional[t.Sequence[t.Mapping[str, float]]] = None, - keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, keep_on_completion: t.Optional[bool] = None, knn: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] @@ -269,7 +274,6 @@ async def submit( min_score: t.Optional[float] = None, pit: t.Optional[t.Mapping[str, t.Any]] = None, post_filter: t.Optional[t.Mapping[str, t.Any]] = None, - pre_filter_shard_size: t.Optional[int] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, profile: t.Optional[bool] = None, @@ -283,7 +287,6 @@ async def submit( routing: t.Optional[str] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, script_fields: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, - scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, search_after: t.Optional[ t.Sequence[t.Union[None, bool, float, int, str, t.Any]] ] = None, @@ -376,9 +379,6 @@ async def submit( :param ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :param indices_boost: Boosts the _score of documents from specified indices. - :param keep_alive: Specifies how long the async search needs to be available. - Ongoing async searches and any saved search results are deleted after this - period. :param keep_on_completion: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. :param knn: Defines the approximate kNN search to run. @@ -394,10 +394,6 @@ async def submit( :param pit: Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. :param post_filter: - :param pre_filter_shard_size: The default value cannot be changed, which enforces - the execution of a pre-filter roundtrip to retrieve statistics from each - shard so that the ones that surely don’t hold any document matching the query - get skipped. :param preference: Specify the node or shard the operation should be performed on (default: random) :param profile: @@ -406,13 +402,13 @@ async def submit( :param request_cache: Specify if request cache should be used for this request or not, defaults to true :param rescore: - :param rest_total_hits_as_int: + :param rest_total_hits_as_int: Indicates whether hits.total should be rendered + as an integer or an object in the rest search response :param routing: A comma-separated list of specific routing values :param runtime_mappings: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. :param script_fields: Retrieve a script evaluation (based on different fields) for each hit. - :param scroll: :param search_after: :param search_type: Search operation type :param seq_no_primary_term: If true, returns sequence number and primary term @@ -509,8 +505,6 @@ async def submit( __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable - if keep_alive is not None: - __query["keep_alive"] = keep_alive if keep_on_completion is not None: __query["keep_on_completion"] = keep_on_completion if lenient is not None: @@ -519,8 +513,6 @@ async def submit( __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests if min_compatible_shard_node is not None: __query["min_compatible_shard_node"] = min_compatible_shard_node - if pre_filter_shard_size is not None: - __query["pre_filter_shard_size"] = pre_filter_shard_size if preference is not None: __query["preference"] = preference if pretty is not None: @@ -533,8 +525,6 @@ async def submit( __query["rest_total_hits_as_int"] = rest_total_hits_as_int if routing is not None: __query["routing"] = routing - if scroll is not None: - __query["scroll"] = scroll if search_type is not None: __query["search_type"] = search_type if source_excludes is not None: diff --git a/elasticsearch/_async/client/autoscaling.py b/elasticsearch/_async/client/autoscaling.py index cb2eccabf..7c1b1f01c 100644 --- a/elasticsearch/_async/client/autoscaling.py +++ b/elasticsearch/_async/client/autoscaling.py @@ -33,7 +33,9 @@ async def delete_autoscaling_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Delete an autoscaling policy. NOTE: This feature is designed for indirect use @@ -43,6 +45,11 @@ async def delete_autoscaling_policy( ``_ :param name: the name of the autoscaling policy + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -55,8 +62,12 @@ async def delete_autoscaling_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", @@ -74,6 +85,7 @@ async def get_autoscaling_capacity( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -91,6 +103,10 @@ async def get_autoscaling_capacity( use this information to make autoscaling decisions. ``_ + + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_autoscaling/capacity" @@ -101,6 +117,8 @@ async def get_autoscaling_capacity( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -121,6 +139,7 @@ async def get_autoscaling_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -131,6 +150,9 @@ async def get_autoscaling_policy( ``_ :param name: the name of the autoscaling policy + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -143,6 +165,8 @@ async def get_autoscaling_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -167,7 +191,9 @@ async def put_autoscaling_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Create or update an autoscaling policy. NOTE: This feature is designed for indirect @@ -178,6 +204,11 @@ async def put_autoscaling_policy( :param name: the name of the autoscaling policy :param policy: + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -196,8 +227,12 @@ async def put_autoscaling_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __body = policy if policy is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] diff --git a/elasticsearch/_async/client/cluster.py b/elasticsearch/_async/client/cluster.py index c3f5ec8dc..03c17de2d 100644 --- a/elasticsearch/_async/client/cluster.py +++ b/elasticsearch/_async/client/cluster.py @@ -44,7 +44,13 @@ async def allocation_explain( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides explanations for shard allocations in the cluster. + Explain the shard allocations. Get explanations for shard allocations in the + cluster. For unassigned shards, it provides an explanation for why the shard + is unassigned. For assigned shards, it provides an explanation for why the shard + is remaining on its current node and has not moved or rebalanced to another node. + This API can be very useful when attempting to diagnose why a shard is unassigned + or why a shard continues to remain on its current node when you might expect + otherwise. ``_ @@ -165,7 +171,8 @@ async def delete_voting_config_exclusions( wait_for_removal: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears cluster voting config exclusions. + Clear cluster voting config exclusions. Remove master-eligible nodes from the + voting configuration exclusion list. ``_ @@ -331,8 +338,8 @@ async def get_settings( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster-wide settings. By default, it returns only settings that have - been explicitly defined. + Get cluster-wide settings. By default, it returns only settings that have been + explicitly defined. ``_ @@ -414,14 +421,15 @@ async def health( ] = None, ) -> ObjectApiResponse[t.Any]: """ - The cluster health API returns a simple status on the health of the cluster. - You can also use the API to get the health status of only specified data streams - and indices. For data streams, the API retrieves the health status of the stream’s - backing indices. The cluster health status is: green, yellow or red. On the shard - level, a red status indicates that the specific shard is not allocated in the - cluster, yellow means that the primary shard is allocated but replicas are not, - and green means that all shards are allocated. The index level status is controlled - by the worst shard status. The cluster status is controlled by the worst index + Get the cluster health status. You can also use the API to get the health status + of only specified data streams and indices. For data streams, the API retrieves + the health status of the stream’s backing indices. The cluster health status + is: green, yellow or red. On the shard level, a red status indicates that the + specific shard is not allocated in the cluster. Yellow means that the primary + shard is allocated but replicas are not. Green means that all shards are allocated. + The index level status is controlled by the worst shard status. One of the main + benefits of the API is the ability to wait until the cluster reaches a certain + high watermark health level. The cluster status is controlled by the worst index status. ``_ @@ -568,14 +576,14 @@ async def pending_tasks( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster-level changes (such as create index, update mapping, allocate - or fail shard) that have not yet been executed. NOTE: This API returns a list - of any pending updates to the cluster state. These are distinct from the tasks - reported by the Task Management API which include periodic tasks and tasks initiated - by the user, such as node stats, search queries, or create index requests. However, - if a user-initiated task such as a create index command causes a cluster state - update, the activity of this task might be reported by both task api and pending - cluster tasks API. + Get the pending cluster tasks. Get information about cluster-level changes (such + as create index, update mapping, allocate or fail shard) that have not yet taken + effect. NOTE: This API returns a list of any pending updates to the cluster state. + These are distinct from the tasks reported by the task management API which include + periodic tasks and tasks initiated by the user, such as node stats, search queries, + or create index requests. However, if a user-initiated task such as a create + index command causes a cluster state update, the activity of this task might + be reported by both task api and pending cluster tasks API. ``_ @@ -623,7 +631,33 @@ async def post_voting_config_exclusions( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the cluster voting config exclusions by node ids or node names. + Update voting configuration exclusions. Update the cluster voting config exclusions + by node IDs or node names. By default, if there are more than three master-eligible + nodes in the cluster and you remove fewer than half of the master-eligible nodes + in the cluster at once, the voting configuration automatically shrinks. If you + want to shrink the voting configuration to contain fewer than three nodes or + to remove half or more of the master-eligible nodes in the cluster at once, use + this API to remove departing nodes from the voting configuration manually. The + API adds an entry for each specified node to the cluster’s voting configuration + exclusions list. It then waits until the cluster has reconfigured its voting + configuration to exclude the specified nodes. Clusters should have no voting + configuration exclusions in normal operation. Once the excluded nodes have stopped, + clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. + This API waits for the nodes to be fully removed from the cluster before it returns. + If your cluster has voting configuration exclusions for nodes that you no longer + intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` + to clear the voting configuration exclusions without waiting for the nodes to + leave the cluster. A response to `POST /_cluster/voting_config_exclusions` with + an HTTP status code of 200 OK guarantees that the node has been removed from + the voting configuration and will not be reinstated until the voting configuration + exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. + If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response + with an HTTP status code other than 200 OK then the node may not have been removed + from the voting configuration. In that case, you may safely retry the call. NOTE: + Voting exclusions are required only when you remove at least half of the master-eligible + nodes from a cluster in a short time period. They are not required when removing + master-ineligible nodes or when removing fewer than half of the master-eligible + nodes. ``_ @@ -787,7 +821,26 @@ async def put_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the cluster settings. + Update the cluster settings. Configure and update dynamic settings on a running + cluster. You can also configure dynamic settings locally on an unstarted or shut + down node in `elasticsearch.yml`. Updates made with this API can be persistent, + which apply across cluster restarts, or transient, which reset after a cluster + restart. You can also reset transient or persistent settings by assigning them + a null value. If you configure the same setting using multiple methods, Elasticsearch + applies the settings in following order of precedence: 1) Transient setting; + 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. + For example, you can apply a transient setting to override a persistent setting + or `elasticsearch.yml` setting. However, a change to an `elasticsearch.yml` setting + will not override a defined transient or persistent setting. TIP: In Elastic + Cloud, use the user settings feature to configure all cluster settings. This + method automatically rejects unsafe settings that could break your cluster. If + you run Elasticsearch on your own hardware, use this API to configure dynamic + cluster settings. Only use `elasticsearch.yml` for static cluster settings and + node settings. The API doesn’t require a restart and ensures a setting’s value + is the same on all nodes. WARNING: Transient cluster settings are no longer recommended. + Use persistent cluster settings instead. If a cluster becomes unstable, transient + settings can clear unexpectedly, resulting in a potentially undesired cluster + configuration. ``_ @@ -841,9 +894,9 @@ async def remote_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The cluster remote info API allows you to retrieve all of the configured remote - cluster information. It returns connection and endpoint information keyed by - the configured remote cluster alias. + Get remote cluster information. Get all of the configured remote cluster information. + This API returns connection and endpoint information keyed by the configured + remote cluster alias. ``_ """ @@ -888,15 +941,35 @@ async def reroute( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to manually change the allocation of individual shards in the cluster. + Reroute the cluster. Manually change the allocation of individual shards in the + cluster. For example, a shard can be moved from one node to another explicitly, + an allocation can be canceled, and an unassigned shard can be explicitly allocated + to a specific node. It is important to note that after processing any reroute + commands Elasticsearch will perform rebalancing as normal (respecting the values + of settings such as `cluster.routing.rebalance.enable`) in order to remain in + a balanced state. For example, if the requested allocation includes moving a + shard from node1 to node2 then this may cause a shard to be moved from node2 + back to node1 to even things out. The cluster can be set to disable allocations + using the `cluster.routing.allocation.enable` setting. If allocations are disabled + then the only allocations that will be performed are explicit ones given using + the reroute command, and consequent allocations due to rebalancing. The cluster + will attempt to allocate a shard a maximum of `index.allocation.max_retries` + times in a row (defaults to `5`), before giving up and leaving the shard unallocated. + This scenario can be caused by structural problems such as having an analyzer + which refers to a stopwords file which doesn’t exist on all nodes. Once the problem + has been corrected, allocation can be manually retried by calling the reroute + API with the `?retry_failed` URI query parameter, which will attempt a single + retry round for these shards. ``_ :param commands: Defines the commands to perform. - :param dry_run: If true, then the request simulates the operation only and returns - the resulting state. + :param dry_run: If true, then the request simulates the operation. It will calculate + the result of applying the commands to the current cluster state and return + the resulting cluster state after the commands (and rebalancing) have been + applied; it will not actually perform the requested changes. :param explain: If true, then the response contains an explanation of why the - commands can or cannot be executed. + commands can or cannot run. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -975,7 +1048,26 @@ async def state( wait_for_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a comprehensive information about the state of the cluster. + Get the cluster state. Get comprehensive information about the state of the cluster. + The cluster state is an internal data structure which keeps track of a variety + of information needed by every node, including the identity and attributes of + the other nodes in the cluster; cluster-wide settings; index metadata, including + the mapping and settings for each index; the location and status of every shard + copy in the cluster. The elected master node ensures that every node in the cluster + has a copy of the same cluster state. This API lets you retrieve a representation + of this internal state for debugging or diagnostic purposes. You may need to + consult the Elasticsearch source code to determine the precise meaning of the + response. By default the API will route requests to the elected master node since + this node is the authoritative source of cluster states. You can also retrieve + the cluster state held on the node handling the API request by adding the `?local=true` + query parameter. Elasticsearch may need to expend significant effort to compute + a response to this API in larger clusters, and the response may comprise a very + large quantity of data. If you use this API repeatedly, your cluster may become + unstable. WARNING: The response is a representation of an internal data structure. + Its format is not subject to the same compatibility guarantees as other more + stable APIs and may change from version to version. Do not query this API using + external monitoring tools. Instead, obtain the information you require using + other more stable cluster APIs. ``_ @@ -1059,9 +1151,9 @@ async def stats( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster statistics. It returns basic index metrics (shard numbers, store - size, memory usage) and information about the current nodes that form the cluster - (number, roles, os, jvm versions, memory usage, cpu and installed plugins). + Get cluster statistics. Get basic index metrics (shard numbers, store size, memory + usage) and information about the current nodes that form the cluster (number, + roles, os, jvm versions, memory usage, cpu and installed plugins). ``_ diff --git a/elasticsearch/_async/client/enrich.py b/elasticsearch/_async/client/enrich.py index 6cb22bf26..643ef6b90 100644 --- a/elasticsearch/_async/client/enrich.py +++ b/elasticsearch/_async/client/enrich.py @@ -77,7 +77,7 @@ async def execute_policy( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates the enrich index for an existing enrich policy. + Run an enrich policy. Create the enrich index for an existing enrich policy. ``_ diff --git a/elasticsearch/_async/client/eql.py b/elasticsearch/_async/client/eql.py index e835620fd..1a8239eec 100644 --- a/elasticsearch/_async/client/eql.py +++ b/elasticsearch/_async/client/eql.py @@ -36,8 +36,8 @@ async def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an async EQL search or a stored synchronous EQL search. The API also - deletes results for the search. + Delete an async EQL search. Delete an async EQL search or a stored synchronous + EQL search. The API also deletes results for the search. ``_ @@ -83,8 +83,8 @@ async def get( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current status and available results for an async EQL search or a - stored synchronous EQL search. + Get async EQL search results. Get the current status and available results for + an async EQL search or a stored synchronous EQL search. ``_ @@ -134,8 +134,8 @@ async def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current status for an async EQL search or a stored synchronous EQL - search without returning results. + Get the async EQL status. Get the current status for an async EQL search or a + stored synchronous EQL search without returning results. ``_ @@ -223,7 +223,9 @@ async def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns results matching a query expressed in Event Query Language (EQL) + Get EQL search results. Returns search results for an Event Query Language (EQL) + query. EQL assumes each document in a data stream or index corresponds to an + event. ``_ diff --git a/elasticsearch/_async/client/esql.py b/elasticsearch/_async/client/esql.py index 68eb37243..b8a39d611 100644 --- a/elasticsearch/_async/client/esql.py +++ b/elasticsearch/_async/client/esql.py @@ -68,7 +68,8 @@ async def query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes an ES|QL request + Run an ES|QL query. Get search results for an ES|QL (Elasticsearch query language) + query. ``_ diff --git a/elasticsearch/_async/client/graph.py b/elasticsearch/_async/client/graph.py index 2ce75e7f2..df8f3fdbe 100644 --- a/elasticsearch/_async/client/graph.py +++ b/elasticsearch/_async/client/graph.py @@ -45,8 +45,14 @@ async def explore( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Extracts and summarizes information about the documents and terms in an Elasticsearch - data stream or index. + Explore graph analytics. Extract and summarize information about the documents + and terms in an Elasticsearch data stream or index. The easiest way to understand + the behavior of this API is to use the Graph UI to explore connections. An initial + request to the `_explore` API contains a seed query that identifies the documents + of interest and specifies the fields that define the vertices and connections + you want to include in the graph. Subsequent requests enable you to spider out + from one more vertices of interest. You can exclude vertices that have already + been returned. ``_ diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index e884cd8ff..e7af76ecc 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -1354,7 +1354,7 @@ async def exists_index_template( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Returns information about whether a particular index template exists. + Check index templates. Check whether index templates exist. ``_ @@ -3698,8 +3698,8 @@ async def resolve_index( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resolves the specified name(s) and/or index patterns for indices, aliases, and - data streams. Multiple patterns and remote clusters are supported. + Resolve indices. Resolve the names and/or index patterns for indices, aliases, + and data streams. Multiple patterns and remote clusters are supported. ``_ diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index a3ddb1628..701ba6835 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -20,19 +20,12 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import ( - SKIP_IN_PATH, - Stability, - _quote, - _rewrite_parameters, - _stability_warning, -) +from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class InferenceClient(NamespacedClient): @_rewrite_parameters() - @_stability_warning(Stability.EXPERIMENTAL) async def delete( self, *, @@ -100,7 +93,6 @@ async def delete( ) @_rewrite_parameters() - @_stability_warning(Stability.EXPERIMENTAL) async def get( self, *, @@ -159,7 +151,6 @@ async def get( @_rewrite_parameters( body_fields=("input", "query", "task_settings"), ) - @_stability_warning(Stability.EXPERIMENTAL) async def inference( self, *, @@ -246,7 +237,6 @@ async def inference( @_rewrite_parameters( body_name="inference_config", ) - @_stability_warning(Stability.EXPERIMENTAL) async def put( self, *, diff --git a/elasticsearch/_async/client/ingest.py b/elasticsearch/_async/client/ingest.py index 0d78dc03c..ecd516365 100644 --- a/elasticsearch/_async/client/ingest.py +++ b/elasticsearch/_async/client/ingest.py @@ -38,7 +38,8 @@ async def delete_geoip_database( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a geoip database configuration. + Delete GeoIP database configurations. Delete one or more IP geolocation database + configurations. ``_ @@ -89,7 +90,7 @@ async def delete_pipeline( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes one or more existing ingest pipeline. + Delete pipelines. Delete one or more ingest pipelines. ``_ @@ -138,7 +139,8 @@ async def geo_ip_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets download statistics for GeoIP2 databases used with the geoip processor. + Get GeoIP statistics. Get download statistics for GeoIP2 databases that are used + with the GeoIP processor. ``_ """ @@ -175,7 +177,8 @@ async def get_geoip_database( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about one or more geoip database configurations. + Get GeoIP database configurations. Get information about one or more IP geolocation + database configurations. ``_ @@ -227,8 +230,8 @@ async def get_pipeline( summary: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about one or more ingest pipelines. This API returns a local - reference of the pipeline. + Get pipelines. Get information about one or more ingest pipelines. This API returns + a local reference of the pipeline. ``_ @@ -279,10 +282,10 @@ async def processor_grok( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Extracts structured fields out of a single text field within a document. You - choose which field to extract matched fields from, as well as the grok pattern - you expect will match. A grok pattern is like a regular expression that supports - aliased expressions that can be reused. + Run a grok processor. Extract structured fields out of a single text field within + a document. You must choose which field to extract matched fields from, as well + as the grok pattern you expect will match. A grok pattern is like a regular expression + that supports aliased expressions that can be reused. ``_ """ @@ -325,7 +328,8 @@ async def put_geoip_database( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about one or more geoip database configurations. + Create or update GeoIP database configurations. Create or update IP geolocation + database configurations. ``_ @@ -411,8 +415,7 @@ async def put_pipeline( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates an ingest pipeline. Changes made using this API take effect - immediately. + Create or update a pipeline. Changes made using this API take effect immediately. ``_ @@ -504,7 +507,9 @@ async def simulate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes an ingest pipeline against a set of provided documents. + Simulate a pipeline. Run an ingest pipeline against a set of provided documents. + You can either specify an existing pipeline to use with the provided documents + or supply a pipeline definition in the body of the request. ``_ diff --git a/elasticsearch/_async/client/nodes.py b/elasticsearch/_async/client/nodes.py index a7b516588..5aa8aa0be 100644 --- a/elasticsearch/_async/client/nodes.py +++ b/elasticsearch/_async/client/nodes.py @@ -44,8 +44,8 @@ async def clear_repositories_metering_archive( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - You can use this API to clear the archived repositories metering information - in the cluster. + Clear the archived repositories metering. Clear the archived repositories metering + information in the cluster. ``_ @@ -94,11 +94,11 @@ async def get_repositories_metering_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - You can use the cluster repositories metering API to retrieve repositories metering - information in a cluster. This API exposes monotonically non-decreasing counters - and it’s expected that clients would durably store the information needed to - compute aggregations over a period of time. Additionally, the information exposed - by this API is volatile, meaning that it won’t be present after node restarts. + Get cluster repositories metering. Get repositories metering information for + a cluster. This API exposes monotonically non-decreasing counters and it is expected + that clients would durably store the information needed to compute aggregations + over a period of time. Additionally, the information exposed by this API is volatile, + meaning that it will not be present after node restarts. ``_ @@ -151,8 +151,9 @@ async def hot_threads( ] = None, ) -> TextApiResponse: """ - This API yields a breakdown of the hot threads on each selected node in the cluster. - The output is plain text with a breakdown of each node’s top hot threads. + Get the hot threads for nodes. Get a breakdown of the hot threads on each selected + node in the cluster. The output is plain text with a breakdown of the top hot + threads for each node. ``_ @@ -227,7 +228,8 @@ async def info( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster nodes information. + Get node information. By default, the API returns all attributes and core settings + for cluster nodes. ``_ @@ -296,7 +298,18 @@ async def reload_secure_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reloads the keystore on nodes in the cluster. + Reload the keystore on nodes in the cluster. Secure settings are stored in an + on-disk keystore. Certain of these settings are reloadable. That is, you can + change them on disk and reload them without restarting any nodes in the cluster. + When you have updated reloadable secure settings in your keystore, you can use + this API to reload those settings on each node. When the Elasticsearch keystore + is password protected and not simply obfuscated, you must provide the password + for the keystore when you reload the secure settings. Reloading the settings + for the whole cluster assumes that the keystores for all nodes are protected + with the same password; this method is allowed only when inter-node communications + are encrypted. Alternatively, you can reload the secure settings on each node + by locally accessing the API and passing the node-specific Elasticsearch keystore + password. ``_ @@ -367,7 +380,8 @@ async def stats( types: t.Optional[t.Sequence[str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster nodes statistics. + Get node statistics. Get statistics for nodes in a cluster. By default, all stats + are returned. You can limit the returned information by using metrics. ``_ @@ -484,7 +498,7 @@ async def usage( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information on the usage of features. + Get feature usage information. ``_ diff --git a/elasticsearch/_async/client/query_rules.py b/elasticsearch/_async/client/query_rules.py index cbf39e121..02f97bac7 100644 --- a/elasticsearch/_async/client/query_rules.py +++ b/elasticsearch/_async/client/query_rules.py @@ -37,7 +37,7 @@ async def delete_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a query rule within a query ruleset. + Delete a query rule. Delete a query rule within a query ruleset. ``_ @@ -85,7 +85,7 @@ async def delete_ruleset( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a query ruleset. + Delete a query ruleset. ``_ @@ -126,7 +126,7 @@ async def get_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the details about a query rule within a query ruleset + Get a query rule. Get details about a query rule within a query ruleset. ``_ @@ -174,7 +174,7 @@ async def get_ruleset( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the details about a query ruleset + Get a query ruleset. Get details about a query ruleset. ``_ @@ -217,7 +217,7 @@ async def list_rulesets( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns summarized information about existing query rulesets. + Get all query rulesets. Get summarized information about the query rulesets. ``_ @@ -270,7 +270,7 @@ async def put_rule( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a query rule within a query ruleset. + Create or update a query rule. Create or update a query rule within a query ruleset. ``_ @@ -345,7 +345,7 @@ async def put_ruleset( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a query ruleset. + Create or update a query ruleset. ``_ @@ -398,7 +398,8 @@ async def test( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a query ruleset. + Test a query ruleset. Evaluate match criteria against a query ruleset to identify + the rules that would match that criteria. ``_ diff --git a/elasticsearch/_async/client/security.py b/elasticsearch/_async/client/security.py index f7e3d7ff7..2304eb2cf 100644 --- a/elasticsearch/_async/client/security.py +++ b/elasticsearch/_async/client/security.py @@ -2052,6 +2052,7 @@ async def has_privileges( "monitor_ml", "monitor_rollup", "monitor_snapshot", + "monitor_stats", "monitor_text_structure", "monitor_transform", "monitor_watcher", @@ -2392,6 +2393,7 @@ async def put_privileges( "global_", "indices", "metadata", + "remote_cluster", "remote_indices", "run_as", "transient_metadata", @@ -2452,6 +2454,7 @@ async def put_role( "monitor_ml", "monitor_rollup", "monitor_snapshot", + "monitor_stats", "monitor_text_structure", "monitor_transform", "monitor_watcher", @@ -2481,6 +2484,7 @@ async def put_role( refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, + remote_cluster: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, remote_indices: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, run_as: t.Optional[t.Sequence[str]] = None, transient_metadata: t.Optional[t.Mapping[str, t.Any]] = None, @@ -2508,6 +2512,7 @@ async def put_role( :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + :param remote_cluster: A list of remote cluster permissions entries. :param remote_indices: A list of remote indices permissions entries. :param run_as: A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, @@ -2549,6 +2554,8 @@ async def put_role( __body["indices"] = indices if metadata is not None: __body["metadata"] = metadata + if remote_cluster is not None: + __body["remote_cluster"] = remote_cluster if remote_indices is not None: __body["remote_indices"] = remote_indices if run_as is not None: diff --git a/elasticsearch/_async/client/sql.py b/elasticsearch/_async/client/sql.py index c4b2f4335..06e8f98a3 100644 --- a/elasticsearch/_async/client/sql.py +++ b/elasticsearch/_async/client/sql.py @@ -39,7 +39,7 @@ async def clear_cursor( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears the SQL cursor + Clear an SQL search cursor. ``_ @@ -84,8 +84,8 @@ async def delete_async( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an async SQL search or a stored synchronous SQL search. If the search - is still running, the API cancels it. + Delete an async SQL search. Delete an async SQL search or a stored synchronous + SQL search. If the search is still running, the API cancels it. ``_ @@ -131,8 +131,8 @@ async def get_async( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current status and available results for an async SQL search or stored - synchronous SQL search + Get async SQL search results. Get the current status and available results for + an async SQL search or stored synchronous SQL search. ``_ @@ -189,8 +189,8 @@ async def get_async_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current status of an async SQL search or a stored synchronous SQL - search + Get the async SQL search status. Get the current status of an async SQL search + or a stored synchronous SQL search. ``_ @@ -273,7 +273,7 @@ async def query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes a SQL request + Get SQL search results. Run an SQL request. ``_ @@ -383,7 +383,8 @@ async def translate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Translates SQL into Elasticsearch queries + Translate SQL into Elasticsearch queries. Translate an SQL search into a search + API request containing Query DSL. ``_ diff --git a/elasticsearch/_async/client/synonyms.py b/elasticsearch/_async/client/synonyms.py index fac176a30..c86b2c584 100644 --- a/elasticsearch/_async/client/synonyms.py +++ b/elasticsearch/_async/client/synonyms.py @@ -36,7 +36,7 @@ async def delete_synonym( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a synonym set + Delete a synonym set. ``_ @@ -77,7 +77,7 @@ async def delete_synonym_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a synonym rule in a synonym set + Delete a synonym rule. Delete a synonym rule from a synonym set. ``_ @@ -127,7 +127,7 @@ async def get_synonym( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a synonym set + Get a synonym set. ``_ @@ -174,7 +174,7 @@ async def get_synonym_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a synonym rule from a synonym set + Get a synonym rule. Get a synonym rule from a synonym set. ``_ @@ -223,7 +223,7 @@ async def get_synonyms_sets( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a summary of all defined synonym sets + Get all synonym sets. Get a summary of all defined synonym sets. ``_ @@ -272,7 +272,9 @@ async def put_synonym( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a synonym set. + Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 + synonym rules per set. If you need to manage more synonym rules, you can create + multiple synonym sets. ``_ @@ -325,7 +327,8 @@ async def put_synonym_rule( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a synonym rule in a synonym set + Create or update a synonym rule. Create or update a synonym rule in a synonym + set. ``_ diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index adf877523..12d6f3fc5 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -870,7 +870,7 @@ def count( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns number of documents matching a query. + Count search results. Get the number of documents matching a query. ``_ @@ -2272,7 +2272,26 @@ def health_report( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the health of the cluster. + Get the cluster health. Get a report with the health status of an Elasticsearch + cluster. The report contains a list of indicators that compose Elasticsearch + functionality. Each indicator has a health status of: green, unknown, yellow + or red. The indicator will provide an explanation and metadata describing the + reason for its current health status. The cluster’s status is controlled by the + worst indicator status. In the event that an indicator’s status is non-green, + a list of impacts may be present in the indicator result which detail the functionalities + that are negatively affected by the health issue. Each impact carries with it + a severity level, an area of the system that is affected, and a simple description + of the impact on the system. Some health indicators can determine the root cause + of a health problem and prescribe a set of steps that can be performed in order + to improve the health of the system. The root cause and remediation steps are + encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause + analysis, an action containing a brief description of the steps to take to fix + the problem, the list of affected resources (if applicable), and a detailed step-by-step + troubleshooting guide to fix the diagnosed problem. NOTE: The health indicators + perform root cause analysis of non-green health statuses. This can be computationally + expensive when called frequently. When setting up automated polling of the API + for health status, set verbose to false to disable the more expensive analysis + logic. ``_ @@ -3077,6 +3096,7 @@ def open_point_in_time( *, index: t.Union[str, t.Sequence[str]], keep_alive: t.Union[str, t.Literal[-1], t.Literal[0]], + allow_partial_search_results: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ @@ -3111,6 +3131,10 @@ def open_point_in_time( :param index: A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices :param keep_alive: Extends the time to live of the corresponding point in time. + :param allow_partial_search_results: If `false`, creating a point in time request + when a shard is missing or unavailable will throw an exception. If `true`, + the point in time will contain all the shards that are available at the time + of the request. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such @@ -3133,6 +3157,8 @@ def open_point_in_time( __body: t.Dict[str, t.Any] = body if body is not None else {} if keep_alive is not None: __query["keep_alive"] = keep_alive + if allow_partial_search_results is not None: + __query["allow_partial_search_results"] = allow_partial_search_results if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: diff --git a/elasticsearch/_sync/client/async_search.py b/elasticsearch/_sync/client/async_search.py index 147553dc3..3a8791e3c 100644 --- a/elasticsearch/_sync/client/async_search.py +++ b/elasticsearch/_sync/client/async_search.py @@ -145,6 +145,7 @@ def status( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -156,6 +157,9 @@ def status( ``_ :param id: A unique identifier for the async search. + :param keep_alive: Specifies how long the async search needs to be available. + Ongoing async searches and any saved search results are deleted after this + period. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -168,6 +172,8 @@ def status( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if keep_alive is not None: + __query["keep_alive"] = keep_alive if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -258,7 +264,6 @@ def submit( ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, indices_boost: t.Optional[t.Sequence[t.Mapping[str, float]]] = None, - keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, keep_on_completion: t.Optional[bool] = None, knn: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] @@ -269,7 +274,6 @@ def submit( min_score: t.Optional[float] = None, pit: t.Optional[t.Mapping[str, t.Any]] = None, post_filter: t.Optional[t.Mapping[str, t.Any]] = None, - pre_filter_shard_size: t.Optional[int] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, profile: t.Optional[bool] = None, @@ -283,7 +287,6 @@ def submit( routing: t.Optional[str] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, script_fields: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, - scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, search_after: t.Optional[ t.Sequence[t.Union[None, bool, float, int, str, t.Any]] ] = None, @@ -376,9 +379,6 @@ def submit( :param ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :param indices_boost: Boosts the _score of documents from specified indices. - :param keep_alive: Specifies how long the async search needs to be available. - Ongoing async searches and any saved search results are deleted after this - period. :param keep_on_completion: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. :param knn: Defines the approximate kNN search to run. @@ -394,10 +394,6 @@ def submit( :param pit: Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. :param post_filter: - :param pre_filter_shard_size: The default value cannot be changed, which enforces - the execution of a pre-filter roundtrip to retrieve statistics from each - shard so that the ones that surely don’t hold any document matching the query - get skipped. :param preference: Specify the node or shard the operation should be performed on (default: random) :param profile: @@ -406,13 +402,13 @@ def submit( :param request_cache: Specify if request cache should be used for this request or not, defaults to true :param rescore: - :param rest_total_hits_as_int: + :param rest_total_hits_as_int: Indicates whether hits.total should be rendered + as an integer or an object in the rest search response :param routing: A comma-separated list of specific routing values :param runtime_mappings: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. :param script_fields: Retrieve a script evaluation (based on different fields) for each hit. - :param scroll: :param search_after: :param search_type: Search operation type :param seq_no_primary_term: If true, returns sequence number and primary term @@ -509,8 +505,6 @@ def submit( __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable - if keep_alive is not None: - __query["keep_alive"] = keep_alive if keep_on_completion is not None: __query["keep_on_completion"] = keep_on_completion if lenient is not None: @@ -519,8 +513,6 @@ def submit( __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests if min_compatible_shard_node is not None: __query["min_compatible_shard_node"] = min_compatible_shard_node - if pre_filter_shard_size is not None: - __query["pre_filter_shard_size"] = pre_filter_shard_size if preference is not None: __query["preference"] = preference if pretty is not None: @@ -533,8 +525,6 @@ def submit( __query["rest_total_hits_as_int"] = rest_total_hits_as_int if routing is not None: __query["routing"] = routing - if scroll is not None: - __query["scroll"] = scroll if search_type is not None: __query["search_type"] = search_type if source_excludes is not None: diff --git a/elasticsearch/_sync/client/autoscaling.py b/elasticsearch/_sync/client/autoscaling.py index 6dc45d2a5..c73f74986 100644 --- a/elasticsearch/_sync/client/autoscaling.py +++ b/elasticsearch/_sync/client/autoscaling.py @@ -33,7 +33,9 @@ def delete_autoscaling_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Delete an autoscaling policy. NOTE: This feature is designed for indirect use @@ -43,6 +45,11 @@ def delete_autoscaling_policy( ``_ :param name: the name of the autoscaling policy + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -55,8 +62,12 @@ def delete_autoscaling_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", @@ -74,6 +85,7 @@ def get_autoscaling_capacity( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -91,6 +103,10 @@ def get_autoscaling_capacity( use this information to make autoscaling decisions. ``_ + + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_autoscaling/capacity" @@ -101,6 +117,8 @@ def get_autoscaling_capacity( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -121,6 +139,7 @@ def get_autoscaling_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -131,6 +150,9 @@ def get_autoscaling_policy( ``_ :param name: the name of the autoscaling policy + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -143,6 +165,8 @@ def get_autoscaling_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -167,7 +191,9 @@ def put_autoscaling_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Create or update an autoscaling policy. NOTE: This feature is designed for indirect @@ -178,6 +204,11 @@ def put_autoscaling_policy( :param name: the name of the autoscaling policy :param policy: + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -196,8 +227,12 @@ def put_autoscaling_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __body = policy if policy is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] diff --git a/elasticsearch/_sync/client/cluster.py b/elasticsearch/_sync/client/cluster.py index d7f60e889..6c1afa6c7 100644 --- a/elasticsearch/_sync/client/cluster.py +++ b/elasticsearch/_sync/client/cluster.py @@ -44,7 +44,13 @@ def allocation_explain( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides explanations for shard allocations in the cluster. + Explain the shard allocations. Get explanations for shard allocations in the + cluster. For unassigned shards, it provides an explanation for why the shard + is unassigned. For assigned shards, it provides an explanation for why the shard + is remaining on its current node and has not moved or rebalanced to another node. + This API can be very useful when attempting to diagnose why a shard is unassigned + or why a shard continues to remain on its current node when you might expect + otherwise. ``_ @@ -165,7 +171,8 @@ def delete_voting_config_exclusions( wait_for_removal: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears cluster voting config exclusions. + Clear cluster voting config exclusions. Remove master-eligible nodes from the + voting configuration exclusion list. ``_ @@ -331,8 +338,8 @@ def get_settings( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster-wide settings. By default, it returns only settings that have - been explicitly defined. + Get cluster-wide settings. By default, it returns only settings that have been + explicitly defined. ``_ @@ -414,14 +421,15 @@ def health( ] = None, ) -> ObjectApiResponse[t.Any]: """ - The cluster health API returns a simple status on the health of the cluster. - You can also use the API to get the health status of only specified data streams - and indices. For data streams, the API retrieves the health status of the stream’s - backing indices. The cluster health status is: green, yellow or red. On the shard - level, a red status indicates that the specific shard is not allocated in the - cluster, yellow means that the primary shard is allocated but replicas are not, - and green means that all shards are allocated. The index level status is controlled - by the worst shard status. The cluster status is controlled by the worst index + Get the cluster health status. You can also use the API to get the health status + of only specified data streams and indices. For data streams, the API retrieves + the health status of the stream’s backing indices. The cluster health status + is: green, yellow or red. On the shard level, a red status indicates that the + specific shard is not allocated in the cluster. Yellow means that the primary + shard is allocated but replicas are not. Green means that all shards are allocated. + The index level status is controlled by the worst shard status. One of the main + benefits of the API is the ability to wait until the cluster reaches a certain + high watermark health level. The cluster status is controlled by the worst index status. ``_ @@ -568,14 +576,14 @@ def pending_tasks( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster-level changes (such as create index, update mapping, allocate - or fail shard) that have not yet been executed. NOTE: This API returns a list - of any pending updates to the cluster state. These are distinct from the tasks - reported by the Task Management API which include periodic tasks and tasks initiated - by the user, such as node stats, search queries, or create index requests. However, - if a user-initiated task such as a create index command causes a cluster state - update, the activity of this task might be reported by both task api and pending - cluster tasks API. + Get the pending cluster tasks. Get information about cluster-level changes (such + as create index, update mapping, allocate or fail shard) that have not yet taken + effect. NOTE: This API returns a list of any pending updates to the cluster state. + These are distinct from the tasks reported by the task management API which include + periodic tasks and tasks initiated by the user, such as node stats, search queries, + or create index requests. However, if a user-initiated task such as a create + index command causes a cluster state update, the activity of this task might + be reported by both task api and pending cluster tasks API. ``_ @@ -623,7 +631,33 @@ def post_voting_config_exclusions( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the cluster voting config exclusions by node ids or node names. + Update voting configuration exclusions. Update the cluster voting config exclusions + by node IDs or node names. By default, if there are more than three master-eligible + nodes in the cluster and you remove fewer than half of the master-eligible nodes + in the cluster at once, the voting configuration automatically shrinks. If you + want to shrink the voting configuration to contain fewer than three nodes or + to remove half or more of the master-eligible nodes in the cluster at once, use + this API to remove departing nodes from the voting configuration manually. The + API adds an entry for each specified node to the cluster’s voting configuration + exclusions list. It then waits until the cluster has reconfigured its voting + configuration to exclude the specified nodes. Clusters should have no voting + configuration exclusions in normal operation. Once the excluded nodes have stopped, + clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. + This API waits for the nodes to be fully removed from the cluster before it returns. + If your cluster has voting configuration exclusions for nodes that you no longer + intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` + to clear the voting configuration exclusions without waiting for the nodes to + leave the cluster. A response to `POST /_cluster/voting_config_exclusions` with + an HTTP status code of 200 OK guarantees that the node has been removed from + the voting configuration and will not be reinstated until the voting configuration + exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. + If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response + with an HTTP status code other than 200 OK then the node may not have been removed + from the voting configuration. In that case, you may safely retry the call. NOTE: + Voting exclusions are required only when you remove at least half of the master-eligible + nodes from a cluster in a short time period. They are not required when removing + master-ineligible nodes or when removing fewer than half of the master-eligible + nodes. ``_ @@ -787,7 +821,26 @@ def put_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the cluster settings. + Update the cluster settings. Configure and update dynamic settings on a running + cluster. You can also configure dynamic settings locally on an unstarted or shut + down node in `elasticsearch.yml`. Updates made with this API can be persistent, + which apply across cluster restarts, or transient, which reset after a cluster + restart. You can also reset transient or persistent settings by assigning them + a null value. If you configure the same setting using multiple methods, Elasticsearch + applies the settings in following order of precedence: 1) Transient setting; + 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. + For example, you can apply a transient setting to override a persistent setting + or `elasticsearch.yml` setting. However, a change to an `elasticsearch.yml` setting + will not override a defined transient or persistent setting. TIP: In Elastic + Cloud, use the user settings feature to configure all cluster settings. This + method automatically rejects unsafe settings that could break your cluster. If + you run Elasticsearch on your own hardware, use this API to configure dynamic + cluster settings. Only use `elasticsearch.yml` for static cluster settings and + node settings. The API doesn’t require a restart and ensures a setting’s value + is the same on all nodes. WARNING: Transient cluster settings are no longer recommended. + Use persistent cluster settings instead. If a cluster becomes unstable, transient + settings can clear unexpectedly, resulting in a potentially undesired cluster + configuration. ``_ @@ -841,9 +894,9 @@ def remote_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The cluster remote info API allows you to retrieve all of the configured remote - cluster information. It returns connection and endpoint information keyed by - the configured remote cluster alias. + Get remote cluster information. Get all of the configured remote cluster information. + This API returns connection and endpoint information keyed by the configured + remote cluster alias. ``_ """ @@ -888,15 +941,35 @@ def reroute( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Allows to manually change the allocation of individual shards in the cluster. + Reroute the cluster. Manually change the allocation of individual shards in the + cluster. For example, a shard can be moved from one node to another explicitly, + an allocation can be canceled, and an unassigned shard can be explicitly allocated + to a specific node. It is important to note that after processing any reroute + commands Elasticsearch will perform rebalancing as normal (respecting the values + of settings such as `cluster.routing.rebalance.enable`) in order to remain in + a balanced state. For example, if the requested allocation includes moving a + shard from node1 to node2 then this may cause a shard to be moved from node2 + back to node1 to even things out. The cluster can be set to disable allocations + using the `cluster.routing.allocation.enable` setting. If allocations are disabled + then the only allocations that will be performed are explicit ones given using + the reroute command, and consequent allocations due to rebalancing. The cluster + will attempt to allocate a shard a maximum of `index.allocation.max_retries` + times in a row (defaults to `5`), before giving up and leaving the shard unallocated. + This scenario can be caused by structural problems such as having an analyzer + which refers to a stopwords file which doesn’t exist on all nodes. Once the problem + has been corrected, allocation can be manually retried by calling the reroute + API with the `?retry_failed` URI query parameter, which will attempt a single + retry round for these shards. ``_ :param commands: Defines the commands to perform. - :param dry_run: If true, then the request simulates the operation only and returns - the resulting state. + :param dry_run: If true, then the request simulates the operation. It will calculate + the result of applying the commands to the current cluster state and return + the resulting cluster state after the commands (and rebalancing) have been + applied; it will not actually perform the requested changes. :param explain: If true, then the response contains an explanation of why the - commands can or cannot be executed. + commands can or cannot run. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -975,7 +1048,26 @@ def state( wait_for_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns a comprehensive information about the state of the cluster. + Get the cluster state. Get comprehensive information about the state of the cluster. + The cluster state is an internal data structure which keeps track of a variety + of information needed by every node, including the identity and attributes of + the other nodes in the cluster; cluster-wide settings; index metadata, including + the mapping and settings for each index; the location and status of every shard + copy in the cluster. The elected master node ensures that every node in the cluster + has a copy of the same cluster state. This API lets you retrieve a representation + of this internal state for debugging or diagnostic purposes. You may need to + consult the Elasticsearch source code to determine the precise meaning of the + response. By default the API will route requests to the elected master node since + this node is the authoritative source of cluster states. You can also retrieve + the cluster state held on the node handling the API request by adding the `?local=true` + query parameter. Elasticsearch may need to expend significant effort to compute + a response to this API in larger clusters, and the response may comprise a very + large quantity of data. If you use this API repeatedly, your cluster may become + unstable. WARNING: The response is a representation of an internal data structure. + Its format is not subject to the same compatibility guarantees as other more + stable APIs and may change from version to version. Do not query this API using + external monitoring tools. Instead, obtain the information you require using + other more stable cluster APIs. ``_ @@ -1059,9 +1151,9 @@ def stats( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster statistics. It returns basic index metrics (shard numbers, store - size, memory usage) and information about the current nodes that form the cluster - (number, roles, os, jvm versions, memory usage, cpu and installed plugins). + Get cluster statistics. Get basic index metrics (shard numbers, store size, memory + usage) and information about the current nodes that form the cluster (number, + roles, os, jvm versions, memory usage, cpu and installed plugins). ``_ diff --git a/elasticsearch/_sync/client/enrich.py b/elasticsearch/_sync/client/enrich.py index 099cbf2dd..b33eff34d 100644 --- a/elasticsearch/_sync/client/enrich.py +++ b/elasticsearch/_sync/client/enrich.py @@ -77,7 +77,7 @@ def execute_policy( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates the enrich index for an existing enrich policy. + Run an enrich policy. Create the enrich index for an existing enrich policy. ``_ diff --git a/elasticsearch/_sync/client/eql.py b/elasticsearch/_sync/client/eql.py index 39af43fb0..2610b3261 100644 --- a/elasticsearch/_sync/client/eql.py +++ b/elasticsearch/_sync/client/eql.py @@ -36,8 +36,8 @@ def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an async EQL search or a stored synchronous EQL search. The API also - deletes results for the search. + Delete an async EQL search. Delete an async EQL search or a stored synchronous + EQL search. The API also deletes results for the search. ``_ @@ -83,8 +83,8 @@ def get( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current status and available results for an async EQL search or a - stored synchronous EQL search. + Get async EQL search results. Get the current status and available results for + an async EQL search or a stored synchronous EQL search. ``_ @@ -134,8 +134,8 @@ def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current status for an async EQL search or a stored synchronous EQL - search without returning results. + Get the async EQL status. Get the current status for an async EQL search or a + stored synchronous EQL search without returning results. ``_ @@ -223,7 +223,9 @@ def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns results matching a query expressed in Event Query Language (EQL) + Get EQL search results. Returns search results for an Event Query Language (EQL) + query. EQL assumes each document in a data stream or index corresponds to an + event. ``_ diff --git a/elasticsearch/_sync/client/esql.py b/elasticsearch/_sync/client/esql.py index fc9fd2a7e..8863d8e84 100644 --- a/elasticsearch/_sync/client/esql.py +++ b/elasticsearch/_sync/client/esql.py @@ -68,7 +68,8 @@ def query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes an ES|QL request + Run an ES|QL query. Get search results for an ES|QL (Elasticsearch query language) + query. ``_ diff --git a/elasticsearch/_sync/client/graph.py b/elasticsearch/_sync/client/graph.py index 5a29add0b..f62bbb15a 100644 --- a/elasticsearch/_sync/client/graph.py +++ b/elasticsearch/_sync/client/graph.py @@ -45,8 +45,14 @@ def explore( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Extracts and summarizes information about the documents and terms in an Elasticsearch - data stream or index. + Explore graph analytics. Extract and summarize information about the documents + and terms in an Elasticsearch data stream or index. The easiest way to understand + the behavior of this API is to use the Graph UI to explore connections. An initial + request to the `_explore` API contains a seed query that identifies the documents + of interest and specifies the fields that define the vertices and connections + you want to include in the graph. Subsequent requests enable you to spider out + from one more vertices of interest. You can exclude vertices that have already + been returned. ``_ diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index 92133311a..b27909af1 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -1354,7 +1354,7 @@ def exists_index_template( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Returns information about whether a particular index template exists. + Check index templates. Check whether index templates exist. ``_ @@ -3698,8 +3698,8 @@ def resolve_index( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resolves the specified name(s) and/or index patterns for indices, aliases, and - data streams. Multiple patterns and remote clusters are supported. + Resolve indices. Resolve the names and/or index patterns for indices, aliases, + and data streams. Multiple patterns and remote clusters are supported. ``_ diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index 9f58dfbfc..08f9da4aa 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -20,19 +20,12 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import ( - SKIP_IN_PATH, - Stability, - _quote, - _rewrite_parameters, - _stability_warning, -) +from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class InferenceClient(NamespacedClient): @_rewrite_parameters() - @_stability_warning(Stability.EXPERIMENTAL) def delete( self, *, @@ -100,7 +93,6 @@ def delete( ) @_rewrite_parameters() - @_stability_warning(Stability.EXPERIMENTAL) def get( self, *, @@ -159,7 +151,6 @@ def get( @_rewrite_parameters( body_fields=("input", "query", "task_settings"), ) - @_stability_warning(Stability.EXPERIMENTAL) def inference( self, *, @@ -246,7 +237,6 @@ def inference( @_rewrite_parameters( body_name="inference_config", ) - @_stability_warning(Stability.EXPERIMENTAL) def put( self, *, diff --git a/elasticsearch/_sync/client/ingest.py b/elasticsearch/_sync/client/ingest.py index e244e91a3..db211c1c3 100644 --- a/elasticsearch/_sync/client/ingest.py +++ b/elasticsearch/_sync/client/ingest.py @@ -38,7 +38,8 @@ def delete_geoip_database( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a geoip database configuration. + Delete GeoIP database configurations. Delete one or more IP geolocation database + configurations. ``_ @@ -89,7 +90,7 @@ def delete_pipeline( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes one or more existing ingest pipeline. + Delete pipelines. Delete one or more ingest pipelines. ``_ @@ -138,7 +139,8 @@ def geo_ip_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets download statistics for GeoIP2 databases used with the geoip processor. + Get GeoIP statistics. Get download statistics for GeoIP2 databases that are used + with the GeoIP processor. ``_ """ @@ -175,7 +177,8 @@ def get_geoip_database( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about one or more geoip database configurations. + Get GeoIP database configurations. Get information about one or more IP geolocation + database configurations. ``_ @@ -227,8 +230,8 @@ def get_pipeline( summary: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about one or more ingest pipelines. This API returns a local - reference of the pipeline. + Get pipelines. Get information about one or more ingest pipelines. This API returns + a local reference of the pipeline. ``_ @@ -279,10 +282,10 @@ def processor_grok( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Extracts structured fields out of a single text field within a document. You - choose which field to extract matched fields from, as well as the grok pattern - you expect will match. A grok pattern is like a regular expression that supports - aliased expressions that can be reused. + Run a grok processor. Extract structured fields out of a single text field within + a document. You must choose which field to extract matched fields from, as well + as the grok pattern you expect will match. A grok pattern is like a regular expression + that supports aliased expressions that can be reused. ``_ """ @@ -325,7 +328,8 @@ def put_geoip_database( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about one or more geoip database configurations. + Create or update GeoIP database configurations. Create or update IP geolocation + database configurations. ``_ @@ -411,8 +415,7 @@ def put_pipeline( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates an ingest pipeline. Changes made using this API take effect - immediately. + Create or update a pipeline. Changes made using this API take effect immediately. ``_ @@ -504,7 +507,9 @@ def simulate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes an ingest pipeline against a set of provided documents. + Simulate a pipeline. Run an ingest pipeline against a set of provided documents. + You can either specify an existing pipeline to use with the provided documents + or supply a pipeline definition in the body of the request. ``_ diff --git a/elasticsearch/_sync/client/nodes.py b/elasticsearch/_sync/client/nodes.py index 5c8e36979..13e5254ef 100644 --- a/elasticsearch/_sync/client/nodes.py +++ b/elasticsearch/_sync/client/nodes.py @@ -44,8 +44,8 @@ def clear_repositories_metering_archive( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - You can use this API to clear the archived repositories metering information - in the cluster. + Clear the archived repositories metering. Clear the archived repositories metering + information in the cluster. ``_ @@ -94,11 +94,11 @@ def get_repositories_metering_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - You can use the cluster repositories metering API to retrieve repositories metering - information in a cluster. This API exposes monotonically non-decreasing counters - and it’s expected that clients would durably store the information needed to - compute aggregations over a period of time. Additionally, the information exposed - by this API is volatile, meaning that it won’t be present after node restarts. + Get cluster repositories metering. Get repositories metering information for + a cluster. This API exposes monotonically non-decreasing counters and it is expected + that clients would durably store the information needed to compute aggregations + over a period of time. Additionally, the information exposed by this API is volatile, + meaning that it will not be present after node restarts. ``_ @@ -151,8 +151,9 @@ def hot_threads( ] = None, ) -> TextApiResponse: """ - This API yields a breakdown of the hot threads on each selected node in the cluster. - The output is plain text with a breakdown of each node’s top hot threads. + Get the hot threads for nodes. Get a breakdown of the hot threads on each selected + node in the cluster. The output is plain text with a breakdown of the top hot + threads for each node. ``_ @@ -227,7 +228,8 @@ def info( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster nodes information. + Get node information. By default, the API returns all attributes and core settings + for cluster nodes. ``_ @@ -296,7 +298,18 @@ def reload_secure_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reloads the keystore on nodes in the cluster. + Reload the keystore on nodes in the cluster. Secure settings are stored in an + on-disk keystore. Certain of these settings are reloadable. That is, you can + change them on disk and reload them without restarting any nodes in the cluster. + When you have updated reloadable secure settings in your keystore, you can use + this API to reload those settings on each node. When the Elasticsearch keystore + is password protected and not simply obfuscated, you must provide the password + for the keystore when you reload the secure settings. Reloading the settings + for the whole cluster assumes that the keystores for all nodes are protected + with the same password; this method is allowed only when inter-node communications + are encrypted. Alternatively, you can reload the secure settings on each node + by locally accessing the API and passing the node-specific Elasticsearch keystore + password. ``_ @@ -367,7 +380,8 @@ def stats( types: t.Optional[t.Sequence[str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns cluster nodes statistics. + Get node statistics. Get statistics for nodes in a cluster. By default, all stats + are returned. You can limit the returned information by using metrics. ``_ @@ -484,7 +498,7 @@ def usage( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information on the usage of features. + Get feature usage information. ``_ diff --git a/elasticsearch/_sync/client/query_rules.py b/elasticsearch/_sync/client/query_rules.py index d5aaa2f76..57e2d74ee 100644 --- a/elasticsearch/_sync/client/query_rules.py +++ b/elasticsearch/_sync/client/query_rules.py @@ -37,7 +37,7 @@ def delete_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a query rule within a query ruleset. + Delete a query rule. Delete a query rule within a query ruleset. ``_ @@ -85,7 +85,7 @@ def delete_ruleset( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a query ruleset. + Delete a query ruleset. ``_ @@ -126,7 +126,7 @@ def get_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the details about a query rule within a query ruleset + Get a query rule. Get details about a query rule within a query ruleset. ``_ @@ -174,7 +174,7 @@ def get_ruleset( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the details about a query ruleset + Get a query ruleset. Get details about a query ruleset. ``_ @@ -217,7 +217,7 @@ def list_rulesets( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns summarized information about existing query rulesets. + Get all query rulesets. Get summarized information about the query rulesets. ``_ @@ -270,7 +270,7 @@ def put_rule( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a query rule within a query ruleset. + Create or update a query rule. Create or update a query rule within a query ruleset. ``_ @@ -345,7 +345,7 @@ def put_ruleset( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a query ruleset. + Create or update a query ruleset. ``_ @@ -398,7 +398,8 @@ def test( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a query ruleset. + Test a query ruleset. Evaluate match criteria against a query ruleset to identify + the rules that would match that criteria. ``_ diff --git a/elasticsearch/_sync/client/security.py b/elasticsearch/_sync/client/security.py index 35d35a8db..c139f2868 100644 --- a/elasticsearch/_sync/client/security.py +++ b/elasticsearch/_sync/client/security.py @@ -2052,6 +2052,7 @@ def has_privileges( "monitor_ml", "monitor_rollup", "monitor_snapshot", + "monitor_stats", "monitor_text_structure", "monitor_transform", "monitor_watcher", @@ -2392,6 +2393,7 @@ def put_privileges( "global_", "indices", "metadata", + "remote_cluster", "remote_indices", "run_as", "transient_metadata", @@ -2452,6 +2454,7 @@ def put_role( "monitor_ml", "monitor_rollup", "monitor_snapshot", + "monitor_stats", "monitor_text_structure", "monitor_transform", "monitor_watcher", @@ -2481,6 +2484,7 @@ def put_role( refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, + remote_cluster: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, remote_indices: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, run_as: t.Optional[t.Sequence[str]] = None, transient_metadata: t.Optional[t.Mapping[str, t.Any]] = None, @@ -2508,6 +2512,7 @@ def put_role( :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. + :param remote_cluster: A list of remote cluster permissions entries. :param remote_indices: A list of remote indices permissions entries. :param run_as: A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, @@ -2549,6 +2554,8 @@ def put_role( __body["indices"] = indices if metadata is not None: __body["metadata"] = metadata + if remote_cluster is not None: + __body["remote_cluster"] = remote_cluster if remote_indices is not None: __body["remote_indices"] = remote_indices if run_as is not None: diff --git a/elasticsearch/_sync/client/sql.py b/elasticsearch/_sync/client/sql.py index b7da9229c..dc5f238e8 100644 --- a/elasticsearch/_sync/client/sql.py +++ b/elasticsearch/_sync/client/sql.py @@ -39,7 +39,7 @@ def clear_cursor( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears the SQL cursor + Clear an SQL search cursor. ``_ @@ -84,8 +84,8 @@ def delete_async( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an async SQL search or a stored synchronous SQL search. If the search - is still running, the API cancels it. + Delete an async SQL search. Delete an async SQL search or a stored synchronous + SQL search. If the search is still running, the API cancels it. ``_ @@ -131,8 +131,8 @@ def get_async( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current status and available results for an async SQL search or stored - synchronous SQL search + Get async SQL search results. Get the current status and available results for + an async SQL search or stored synchronous SQL search. ``_ @@ -189,8 +189,8 @@ def get_async_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current status of an async SQL search or a stored synchronous SQL - search + Get the async SQL search status. Get the current status of an async SQL search + or a stored synchronous SQL search. ``_ @@ -273,7 +273,7 @@ def query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes a SQL request + Get SQL search results. Run an SQL request. ``_ @@ -383,7 +383,8 @@ def translate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Translates SQL into Elasticsearch queries + Translate SQL into Elasticsearch queries. Translate an SQL search into a search + API request containing Query DSL. ``_ diff --git a/elasticsearch/_sync/client/synonyms.py b/elasticsearch/_sync/client/synonyms.py index b82ec67a0..ccc4a6d89 100644 --- a/elasticsearch/_sync/client/synonyms.py +++ b/elasticsearch/_sync/client/synonyms.py @@ -36,7 +36,7 @@ def delete_synonym( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a synonym set + Delete a synonym set. ``_ @@ -77,7 +77,7 @@ def delete_synonym_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a synonym rule in a synonym set + Delete a synonym rule. Delete a synonym rule from a synonym set. ``_ @@ -127,7 +127,7 @@ def get_synonym( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a synonym set + Get a synonym set. ``_ @@ -174,7 +174,7 @@ def get_synonym_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a synonym rule from a synonym set + Get a synonym rule. Get a synonym rule from a synonym set. ``_ @@ -223,7 +223,7 @@ def get_synonyms_sets( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a summary of all defined synonym sets + Get all synonym sets. Get a summary of all defined synonym sets. ``_ @@ -272,7 +272,9 @@ def put_synonym( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a synonym set. + Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 + synonym rules per set. If you need to manage more synonym rules, you can create + multiple synonym sets. ``_ @@ -325,7 +327,8 @@ def put_synonym_rule( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a synonym rule in a synonym set + Create or update a synonym rule. Create or update a synonym rule in a synonym + set. ``_ From 49352a6f43549c14208fbce529582699020cc670 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 13 Dec 2024 17:48:08 +0400 Subject: [PATCH 15/65] [Backport 8.x] Allow simsimd again on Python 3.13 (#2723) * Allow simsimd again on Python 3.13 * Remove the importorskip calls (cherry picked from commit e1603f4f25888ca7fee03ee977abafb00d512601) Co-authored-by: Quentin Pradet --- pyproject.toml | 2 +- .../test_server/test_vectorstore/test_vectorstore.py | 4 ---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 8a55e0b67..b5f03e1d0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -70,7 +70,7 @@ dev = [ "nox", "orjson", "numpy", - "simsimd ; python_version<'3.13'", + "simsimd", "pyarrow", "pandas", "mapbox-vector-tile", diff --git a/test_elasticsearch/test_server/test_vectorstore/test_vectorstore.py b/test_elasticsearch/test_server/test_vectorstore/test_vectorstore.py index 7b675a754..3e17442eb 100644 --- a/test_elasticsearch/test_server/test_vectorstore/test_vectorstore.py +++ b/test_elasticsearch/test_server/test_vectorstore/test_vectorstore.py @@ -899,8 +899,6 @@ def test_max_marginal_relevance_search_errors( self, sync_client: Elasticsearch, index: str ) -> None: """Test max marginal relevance search error conditions.""" - pytest.importorskip("simsimd") - texts = ["foo", "bar", "baz"] vector_field = "vector_field" embedding_service = ConsistentFakeEmbeddings() @@ -942,8 +940,6 @@ def test_max_marginal_relevance_search( self, sync_client: Elasticsearch, index: str ) -> None: """Test max marginal relevance search.""" - pytest.importorskip("simsimd") - texts = ["foo", "bar", "baz"] vector_field = "vector_field" text_field = "text_field" From f319f5d990a63ad08aa23baf219bdc98e6aad29d Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Tue, 7 Jan 2025 11:26:08 +0000 Subject: [PATCH 16/65] Auto-generated API code (#2728) --- elasticsearch/_async/client/__init__.py | 10 + elasticsearch/_async/client/cat.py | 259 +++--------- elasticsearch/_async/client/ccr.py | 173 +++++--- elasticsearch/_async/client/connector.py | 237 +++++++++++ elasticsearch/_async/client/eql.py | 21 + elasticsearch/_async/client/features.py | 28 +- elasticsearch/_async/client/ilm.py | 75 +++- elasticsearch/_async/client/indices.py | 300 +++++++++++--- elasticsearch/_async/client/inference.py | 16 +- elasticsearch/_async/client/ingest.py | 177 ++++++++- elasticsearch/_async/client/license.py | 46 ++- elasticsearch/_async/client/logstash.py | 14 +- elasticsearch/_async/client/migration.py | 19 +- elasticsearch/_async/client/ml.py | 56 ++- elasticsearch/_async/client/monitoring.py | 3 +- elasticsearch/_async/client/rollup.py | 63 ++- .../_async/client/search_application.py | 131 +++++- .../_async/client/searchable_snapshots.py | 12 +- elasticsearch/_async/client/security.py | 224 +++++++++++ elasticsearch/_async/client/shutdown.py | 31 +- elasticsearch/_async/client/slm.py | 46 ++- elasticsearch/_async/client/snapshot.py | 91 ++++- elasticsearch/_async/client/tasks.py | 22 +- elasticsearch/_async/client/text_structure.py | 375 +++++++++++++++++- elasticsearch/_async/client/transform.py | 11 +- elasticsearch/_async/client/watcher.py | 53 ++- elasticsearch/_async/client/xpack.py | 10 +- elasticsearch/_sync/client/__init__.py | 10 + elasticsearch/_sync/client/cat.py | 259 +++--------- elasticsearch/_sync/client/ccr.py | 173 +++++--- elasticsearch/_sync/client/connector.py | 237 +++++++++++ elasticsearch/_sync/client/eql.py | 21 + elasticsearch/_sync/client/features.py | 28 +- elasticsearch/_sync/client/ilm.py | 75 +++- elasticsearch/_sync/client/indices.py | 300 +++++++++++--- elasticsearch/_sync/client/inference.py | 16 +- elasticsearch/_sync/client/ingest.py | 177 ++++++++- elasticsearch/_sync/client/license.py | 46 ++- elasticsearch/_sync/client/logstash.py | 14 +- elasticsearch/_sync/client/migration.py | 19 +- elasticsearch/_sync/client/ml.py | 56 ++- elasticsearch/_sync/client/monitoring.py | 3 +- elasticsearch/_sync/client/rollup.py | 63 ++- .../_sync/client/search_application.py | 131 +++++- .../_sync/client/searchable_snapshots.py | 12 +- elasticsearch/_sync/client/security.py | 224 +++++++++++ elasticsearch/_sync/client/shutdown.py | 31 +- elasticsearch/_sync/client/slm.py | 46 ++- elasticsearch/_sync/client/snapshot.py | 91 ++++- elasticsearch/_sync/client/tasks.py | 22 +- elasticsearch/_sync/client/text_structure.py | 375 +++++++++++++++++- elasticsearch/_sync/client/transform.py | 11 +- elasticsearch/_sync/client/watcher.py | 53 ++- elasticsearch/_sync/client/xpack.py | 10 +- 54 files changed, 4146 insertions(+), 860 deletions(-) diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index e63c1dc77..802ec316f 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -626,12 +626,14 @@ async def bulk( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + list_executed_pipelines: t.Optional[bool] = None, pipeline: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, require_alias: t.Optional[bool] = None, + require_data_stream: t.Optional[bool] = None, routing: t.Optional[str] = None, source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, @@ -651,6 +653,8 @@ async def bulk( :param operations: :param index: Name of the data stream, index, or index alias to perform bulk actions on. + :param list_executed_pipelines: If `true`, the response will include the ingest + pipelines that were executed for each index or create. :param pipeline: ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final @@ -661,6 +665,8 @@ async def bulk( make this operation visible to search, if `false` do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. :param require_alias: If `true`, the request’s actions must target an index alias. + :param require_data_stream: If `true`, the request's actions must target a data + stream (existing or to-be-created). :param routing: Custom value used to route operations to a specific shard. :param source: `true` or `false` to return the `_source` field or not, or a list of fields to return. @@ -694,6 +700,8 @@ async def bulk( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if list_executed_pipelines is not None: + __query["list_executed_pipelines"] = list_executed_pipelines if pipeline is not None: __query["pipeline"] = pipeline if pretty is not None: @@ -702,6 +710,8 @@ async def bulk( __query["refresh"] = refresh if require_alias is not None: __query["require_alias"] = require_alias + if require_data_stream is not None: + __query["require_data_stream"] = require_data_stream if routing is not None: __query["routing"] = routing if source is not None: diff --git a/elasticsearch/_async/client/cat.py b/elasticsearch/_async/client/cat.py index 257470b84..c99745002 100644 --- a/elasticsearch/_async/client/cat.py +++ b/elasticsearch/_async/client/cat.py @@ -308,8 +308,6 @@ async def count( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, @@ -332,11 +330,6 @@ async def count( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -362,10 +355,6 @@ async def count( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -396,8 +385,6 @@ async def fielddata( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, @@ -418,11 +405,6 @@ async def fielddata( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -450,10 +432,6 @@ async def fielddata( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -480,8 +458,6 @@ async def health( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, time: t.Optional[ @@ -510,11 +486,6 @@ async def health( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -537,10 +508,6 @@ async def health( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -562,66 +529,15 @@ async def health( ) @_rewrite_parameters() - async def help( - self, - *, - error_trace: t.Optional[bool] = None, - filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, - format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, - help: t.Optional[bool] = None, - human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - pretty: t.Optional[bool] = None, - s: t.Optional[t.Union[str, t.Sequence[str]]] = None, - v: t.Optional[bool] = None, - ) -> TextApiResponse: + async def help(self) -> TextApiResponse: """ Get CAT help. Returns help for the CAT APIs. ``_ - - :param format: Specifies the format to return the columnar data in, can be set - to `text`, `json`, `cbor`, `yaml`, or `smile`. - :param h: List of columns to appear in the response. Supports simple wildcards. - :param help: When set to `true` will output available columns. This option can't - be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. - :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat" __query: t.Dict[str, t.Any] = {} - if error_trace is not None: - __query["error_trace"] = error_trace - if filter_path is not None: - __query["filter_path"] = filter_path - if format is not None: - __query["format"] = format - if h is not None: - __query["h"] = h - if help is not None: - __query["help"] = help - if human is not None: - __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout - if pretty is not None: - __query["pretty"] = pretty - if s is not None: - __query["s"] = s - if v is not None: - __query["v"] = v __headers = {"accept": "text/plain"} return await self.perform_request( # type: ignore[return-value] "GET", @@ -656,7 +572,6 @@ async def indices( help: t.Optional[bool] = None, human: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, - local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, pri: t.Optional[bool] = None, @@ -694,10 +609,6 @@ async def indices( be combined with any other query string option. :param include_unloaded_segments: If true, the response includes information from segments that are not loaded into memory. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param pri: If true, the response only includes information from primary shards. :param s: List of columns that determine how the table should be sorted. Sorting @@ -734,8 +645,6 @@ async def indices( __query["human"] = human if include_unloaded_segments is not None: __query["include_unloaded_segments"] = include_unloaded_segments - if local is not None: - __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: @@ -894,8 +803,6 @@ async def ml_data_frame_analytics( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -945,7 +852,9 @@ async def ml_data_frame_analytics( ], ] ] = None, - time: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -966,11 +875,6 @@ async def ml_data_frame_analytics( :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: Comma-separated list of column names or column aliases used to sort the response. :param time: Unit used to display time values. @@ -1000,10 +904,6 @@ async def ml_data_frame_analytics( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -1073,8 +973,6 @@ async def ml_datafeeds( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -1145,11 +1043,6 @@ async def ml_datafeeds( :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: Comma-separated list of column names or column aliases used to sort the response. :param time: The unit used to display time values. @@ -1177,10 +1070,6 @@ async def ml_datafeeds( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -1349,8 +1238,6 @@ async def ml_jobs( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -1518,11 +1405,6 @@ async def ml_jobs( :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: Comma-separated list of column names or column aliases used to sort the response. :param time: The unit used to display time values. @@ -1552,10 +1434,6 @@ async def ml_jobs( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -1635,8 +1513,6 @@ async def ml_trained_models( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -1683,6 +1559,9 @@ async def ml_trained_models( ] ] = None, size: t.Optional[int] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -1708,14 +1587,10 @@ async def ml_trained_models( :param h: A comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: A comma-separated list of column names or aliases used to sort the response. :param size: The maximum number of transforms to display. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -1744,16 +1619,14 @@ async def ml_trained_models( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if size is not None: __query["size"] = size + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -1855,10 +1728,12 @@ async def nodes( help: t.Optional[bool] = None, human: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, - local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -1879,14 +1754,11 @@ async def nodes( be combined with any other query string option. :param include_unloaded_segments: If true, the response includes information from segments that are not loaded into memory. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} @@ -1910,14 +1782,14 @@ async def nodes( __query["human"] = human if include_unloaded_segments is not None: __query["include_unloaded_segments"] = include_unloaded_segments - if local is not None: - __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -1944,6 +1816,9 @@ async def pending_tasks( master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -1967,6 +1842,7 @@ async def pending_tasks( :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} @@ -1992,6 +1868,8 @@ async def pending_tasks( __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -2014,6 +1892,7 @@ async def plugins( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, + include_bootstrap: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, @@ -2033,6 +1912,7 @@ async def plugins( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. + :param include_bootstrap: Include bootstrap plugins in the response :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating @@ -2058,6 +1938,8 @@ async def plugins( __query["help"] = help if human is not None: __query["human"] = human + if include_bootstrap is not None: + __query["include_bootstrap"] = include_bootstrap if local is not None: __query["local"] = local if master_timeout is not None: @@ -2094,10 +1976,11 @@ async def recovery( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -2124,14 +2007,10 @@ async def recovery( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2160,14 +2039,12 @@ async def recovery( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -2358,10 +2235,12 @@ async def shards( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -2381,14 +2260,11 @@ async def shards( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2413,14 +2289,14 @@ async def shards( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -2445,10 +2321,12 @@ async def snapshots( help: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, - local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -2470,14 +2348,11 @@ async def snapshots( be combined with any other query string option. :param ignore_unavailable: If `true`, the response does not include information from unavailable snapshots. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2502,14 +2377,14 @@ async def snapshots( __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable - if local is not None: - __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -2535,13 +2410,16 @@ async def tasks( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - node_id: t.Optional[t.Sequence[str]] = None, + nodes: t.Optional[t.Sequence[str]] = None, parent_task_id: t.Optional[str] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, v: t.Optional[bool] = None, + wait_for_completion: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ Returns information about tasks currently executing in the cluster. IMPORTANT: @@ -2559,18 +2437,18 @@ async def tasks( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. - :param node_id: Unique node identifiers, which are used to limit the response. + :param nodes: Unique node identifiers, which are used to limit the response. :param parent_task_id: The parent task identifier, which is used to limit the response. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. :param v: When set to `true` will enable verbose output. + :param wait_for_completion: If `true`, the request blocks until the task has + completed. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat/tasks" @@ -2591,20 +2469,22 @@ async def tasks( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout - if node_id is not None: - __query["node_id"] = node_id + if nodes is not None: + __query["nodes"] = nodes if parent_task_id is not None: __query["parent_task_id"] = parent_task_id if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time + if timeout is not None: + __query["timeout"] = timeout if v is not None: __query["v"] = v + if wait_for_completion is not None: + __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", @@ -2883,8 +2763,6 @@ async def transforms( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -2998,11 +2876,6 @@ async def transforms( :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: Comma-separated list of column names or column aliases used to sort the response. :param size: The maximum number of transforms to obtain. @@ -3033,10 +2906,6 @@ async def transforms( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: diff --git a/elasticsearch/_async/client/ccr.py b/elasticsearch/_async/client/ccr.py index 0cd6c7c4c..5b2f5e533 100644 --- a/elasticsearch/_async/client/ccr.py +++ b/elasticsearch/_async/client/ccr.py @@ -36,7 +36,8 @@ async def delete_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes auto-follow patterns. + Delete auto-follow patterns. Delete a collection of cross-cluster replication + auto-follow patterns. ``_ @@ -68,6 +69,8 @@ async def delete_auto_follow_pattern( @_rewrite_parameters( body_fields=( "leader_index", + "remote_cluster", + "data_stream_name", "max_outstanding_read_requests", "max_outstanding_write_requests", "max_read_request_operation_count", @@ -78,59 +81,89 @@ async def delete_auto_follow_pattern( "max_write_request_operation_count", "max_write_request_size", "read_poll_timeout", - "remote_cluster", + "settings", ), ) async def follow( self, *, index: str, + leader_index: t.Optional[str] = None, + remote_cluster: t.Optional[str] = None, + data_stream_name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, - leader_index: t.Optional[str] = None, max_outstanding_read_requests: t.Optional[int] = None, max_outstanding_write_requests: t.Optional[int] = None, max_read_request_operation_count: t.Optional[int] = None, - max_read_request_size: t.Optional[str] = None, + max_read_request_size: t.Optional[t.Union[int, str]] = None, max_retry_delay: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, max_write_buffer_count: t.Optional[int] = None, - max_write_buffer_size: t.Optional[str] = None, + max_write_buffer_size: t.Optional[t.Union[int, str]] = None, max_write_request_operation_count: t.Optional[int] = None, - max_write_request_size: t.Optional[str] = None, + max_write_request_size: t.Optional[t.Union[int, str]] = None, pretty: t.Optional[bool] = None, read_poll_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - remote_cluster: t.Optional[str] = None, + settings: t.Optional[t.Mapping[str, t.Any]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new follower index configured to follow the referenced leader index. + Create a follower. Create a cross-cluster replication follower index that follows + a specific leader index. When the API returns, the follower index exists and + cross-cluster replication starts replicating operations from the leader index + to the follower index. ``_ - :param index: The name of the follower index - :param leader_index: - :param max_outstanding_read_requests: - :param max_outstanding_write_requests: - :param max_read_request_operation_count: - :param max_read_request_size: - :param max_retry_delay: - :param max_write_buffer_count: - :param max_write_buffer_size: - :param max_write_request_operation_count: - :param max_write_request_size: - :param read_poll_timeout: - :param remote_cluster: - :param wait_for_active_shards: Sets the number of shard copies that must be active - before returning. Defaults to 0. Set to `all` for all shard copies, otherwise - set to any non-negative value less than or equal to the total number of copies - for the shard (number of replicas + 1) + :param index: The name of the follower index. + :param leader_index: The name of the index in the leader cluster to follow. + :param remote_cluster: The remote cluster containing the leader index. + :param data_stream_name: If the leader index is part of a data stream, the name + to which the local data stream for the followed index should be renamed. + :param max_outstanding_read_requests: The maximum number of outstanding reads + requests from the remote cluster. + :param max_outstanding_write_requests: The maximum number of outstanding write + requests on the follower. + :param max_read_request_operation_count: The maximum number of operations to + pull per read from the remote cluster. + :param max_read_request_size: The maximum size in bytes of per read of a batch + of operations pulled from the remote cluster. + :param max_retry_delay: The maximum time to wait before retrying an operation + that failed exceptionally. An exponential backoff strategy is employed when + retrying. + :param max_write_buffer_count: The maximum number of operations that can be queued + for writing. When this limit is reached, reads from the remote cluster will + be deferred until the number of queued operations goes below the limit. + :param max_write_buffer_size: The maximum total bytes of operations that can + be queued for writing. When this limit is reached, reads from the remote + cluster will be deferred until the total bytes of queued operations goes + below the limit. + :param max_write_request_operation_count: The maximum number of operations per + bulk write request executed on the follower. + :param max_write_request_size: The maximum total bytes of operations per bulk + write request executed on the follower. + :param read_poll_timeout: The maximum time to wait for new operations on the + remote cluster when the follower index is synchronized with the leader index. + When the timeout has elapsed, the poll for operations will return to the + follower so that it can update some statistics. Then the follower will immediately + attempt to read from the leader again. + :param settings: Settings to override from the leader index. + :param wait_for_active_shards: Specifies the number of shards to wait on being + active before responding. This defaults to waiting on none of the shards + to be active. A shard must be restored from the leader index before being + active. Restoring a follower shard requires transferring all the remote Lucene + segment files to the follower index. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") + if leader_index is None and body is None: + raise ValueError("Empty value passed for parameter 'leader_index'") + if remote_cluster is None and body is None: + raise ValueError("Empty value passed for parameter 'remote_cluster'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_ccr/follow' __query: t.Dict[str, t.Any] = {} @@ -148,6 +181,10 @@ async def follow( if not __body: if leader_index is not None: __body["leader_index"] = leader_index + if remote_cluster is not None: + __body["remote_cluster"] = remote_cluster + if data_stream_name is not None: + __body["data_stream_name"] = data_stream_name if max_outstanding_read_requests is not None: __body["max_outstanding_read_requests"] = max_outstanding_read_requests if max_outstanding_write_requests is not None: @@ -174,8 +211,8 @@ async def follow( __body["max_write_request_size"] = max_write_request_size if read_poll_timeout is not None: __body["read_poll_timeout"] = read_poll_timeout - if remote_cluster is not None: - __body["remote_cluster"] = remote_cluster + if settings is not None: + __body["settings"] = settings __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", @@ -198,8 +235,10 @@ async def follow_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about all follower indices, including parameters and status - for each follower index + Get follower information. Get information about all cross-cluster replication + follower indices. For example, the results include follower index names, leader + index names, replication options, and whether the follower indices are active + or paused. ``_ @@ -240,8 +279,9 @@ async def follow_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves follower stats. return shard-level stats about the following tasks - associated with each shard for the specified indices. + Get follower stats. Get cross-cluster replication follower stats. The API returns + shard-level stats about the "following tasks" associated with each shard for + the specified indices. ``_ @@ -294,7 +334,23 @@ async def forget_follower( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes the follower retention leases from the leader. + Forget a follower. Remove the cross-cluster replication follower retention leases + from the leader. A following index takes out retention leases on its leader index. + These leases are used to increase the likelihood that the shards of the leader + index retain the history of operations that the shards of the following index + need to run replication. When a follower index is converted to a regular index + by the unfollow API (either by directly calling the API or by index lifecycle + management tasks), these leases are removed. However, removal of the leases can + fail, for example when the remote cluster containing the leader index is unavailable. + While the leases will eventually expire on their own, their extended existence + can cause the leader index to hold more history than necessary and prevent index + lifecycle management from performing some operations on the leader index. This + API exists to enable manually removing the leases when the unfollow API is unable + to do so. NOTE: This API does not stop replication by a following index. If you + use this API with a follower index that is still actively following, the following + index will add back retention leases on the leader. The only purpose of this + API is to handle the case of failure to remove the following retention leases + after the unfollow API is invoked. ``_ @@ -350,8 +406,7 @@ async def get_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets configured auto-follow patterns. Returns the specified auto-follow pattern - collection. + Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. ``_ @@ -395,7 +450,14 @@ async def pause_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Pauses an auto-follow pattern + Pause an auto-follow pattern. Pause a cross-cluster replication auto-follow pattern. + When the API returns, the auto-follow pattern is inactive. New indices that are + created on the remote cluster and match the auto-follow patterns are ignored. + You can resume auto-following with the resume auto-follow pattern API. When it + resumes, the auto-follow pattern is active again and automatically configures + follower indices for newly created indices on the remote cluster that match its + patterns. Remote indices that were created while the pattern was paused will + also be followed, unless they have been deleted or closed in the interim. ``_ @@ -436,8 +498,10 @@ async def pause_follow( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Pauses a follower index. The follower index will not fetch any additional operations - from the leader index. + Pause a follower. Pause a cross-cluster replication follower index. The follower + index will not fetch any additional operations from the leader index. You can + resume following with the resume follower API. You can pause and resume a follower + index to change the configuration of the following task. ``_ @@ -512,9 +576,14 @@ async def put_auto_follow_pattern( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new named collection of auto-follow patterns against a specified remote - cluster. Newly created indices on the remote cluster matching any of the specified - patterns will be automatically configured as follower indices. + Create or update auto-follow patterns. Create a collection of cross-cluster replication + auto-follow patterns for a remote cluster. Newly created indices on the remote + cluster that match any of the patterns are automatically configured as follower + indices. Indices on the remote cluster that were created before the auto-follow + pattern was created will not be auto-followed even if they match the pattern. + This API can also be used to update auto-follow patterns. NOTE: Follower indices + that were configured automatically before updating an auto-follow pattern will + remain unchanged even if they do not match against the new patterns. ``_ @@ -638,7 +707,11 @@ async def resume_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resumes an auto-follow pattern that has been paused + Resume an auto-follow pattern. Resume a cross-cluster replication auto-follow + pattern that was paused. The auto-follow pattern will resume configuring following + indices for newly created indices that match its patterns on the remote cluster. + Remote indices created while the pattern was paused will also be followed unless + they have been deleted or closed in the interim. ``_ @@ -703,7 +776,11 @@ async def resume_follow( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Resumes a follower index that has been paused + Resume a follower. Resume a cross-cluster replication follower index that was + paused. The follower index could have been paused with the pause follower API. + Alternatively it could be paused due to replication that cannot be retried due + to failures during following tasks. When this API returns, the follower index + will resume fetching operations from the leader index. ``_ @@ -785,7 +862,8 @@ async def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets all stats related to cross-cluster replication. + Get cross-cluster replication stats. This API returns stats about auto-following + and the same shard-level stats as the get follower stats API. ``_ """ @@ -821,8 +899,13 @@ async def unfollow( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops the following task associated with a follower index and removes index metadata - and settings associated with cross-cluster replication. + Unfollow an index. Convert a cross-cluster replication follower index to a regular + index. The API stops the following task associated with a follower index and + removes index metadata and settings associated with cross-cluster replication. + The follower index must be paused and closed before you call the unfollow API. + NOTE: Currently cross-cluster replication does not support converting an existing + regular index to a follower index. Converting a follower index to a regular index + is an irreversible operation. ``_ diff --git a/elasticsearch/_async/client/connector.py b/elasticsearch/_async/client/connector.py index 34d566333..8a3f6b4b1 100644 --- a/elasticsearch/_async/client/connector.py +++ b/elasticsearch/_async/client/connector.py @@ -589,6 +589,125 @@ async def sync_job_cancel( path_parts=__path_parts, ) + @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) + async def sync_job_check_in( + self, + *, + connector_sync_job_id: str, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Check in a connector sync job. Check in a connector sync job and set the `last_seen` + field to the current time before updating it in the internal index. To sync data + using self-managed connectors, you need to deploy the Elastic connector service + on your own infrastructure. This service runs automatically on Elastic Cloud + for Elastic managed connectors. + + ``_ + + :param connector_sync_job_id: The unique identifier of the connector sync job + to be checked in. + """ + if connector_sync_job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") + __path_parts: t.Dict[str, str] = { + "connector_sync_job_id": _quote(connector_sync_job_id) + } + __path = ( + f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_check_in' + ) + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + endpoint_id="connector.sync_job_check_in", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("worker_hostname", "sync_cursor"), + ) + @_stability_warning(Stability.EXPERIMENTAL) + async def sync_job_claim( + self, + *, + connector_sync_job_id: str, + worker_hostname: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + sync_cursor: t.Optional[t.Any] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Claim a connector sync job. This action updates the job status to `in_progress` + and sets the `last_seen` and `started_at` timestamps to the current time. Additionally, + it can set the `sync_cursor` property for the sync job. This API is not intended + for direct connector management by users. It supports the implementation of services + that utilize the connector protocol to communicate with Elasticsearch. To sync + data using self-managed connectors, you need to deploy the Elastic connector + service on your own infrastructure. This service runs automatically on Elastic + Cloud for Elastic managed connectors. + + ``_ + + :param connector_sync_job_id: The unique identifier of the connector sync job. + :param worker_hostname: The host name of the current system that will run the + job. + :param sync_cursor: The cursor object from the last incremental sync job. This + should reference the `sync_cursor` field in the connector state for which + the job runs. + """ + if connector_sync_job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") + if worker_hostname is None and body is None: + raise ValueError("Empty value passed for parameter 'worker_hostname'") + __path_parts: t.Dict[str, str] = { + "connector_sync_job_id": _quote(connector_sync_job_id) + } + __path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_claim' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if worker_hostname is not None: + __body["worker_hostname"] = worker_hostname + if sync_cursor is not None: + __body["sync_cursor"] = sync_cursor + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="connector.sync_job_claim", + path_parts=__path_parts, + ) + @_rewrite_parameters() @_stability_warning(Stability.BETA) async def sync_job_delete( @@ -634,6 +753,64 @@ async def sync_job_delete( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("error",), + ) + @_stability_warning(Stability.EXPERIMENTAL) + async def sync_job_error( + self, + *, + connector_sync_job_id: str, + error: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Set a connector sync job error. Set the `error` field for a connector sync job + and set its `status` to `error`. To sync data using self-managed connectors, + you need to deploy the Elastic connector service on your own infrastructure. + This service runs automatically on Elastic Cloud for Elastic managed connectors. + + ``_ + + :param connector_sync_job_id: The unique identifier for the connector sync job. + :param error: The error for the connector sync job error field. + """ + if connector_sync_job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") + if error is None and body is None: + raise ValueError("Empty value passed for parameter 'error'") + __path_parts: t.Dict[str, str] = { + "connector_sync_job_id": _quote(connector_sync_job_id) + } + __path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_error' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if error is not None: + __body["error"] = error + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="connector.sync_job_error", + path_parts=__path_parts, + ) + @_rewrite_parameters() @_stability_warning(Stability.BETA) async def sync_job_get( @@ -1032,6 +1209,66 @@ async def update_error( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("features",), + ) + @_stability_warning(Stability.EXPERIMENTAL) + async def update_features( + self, + *, + connector_id: str, + features: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Update the connector features. Update the connector features in the connector + document. This API can be used to control the following aspects of a connector: + * document-level security * incremental syncs * advanced sync rules * basic sync + rules Normally, the running connector service automatically manages these features. + However, you can use this API to override the default behavior. To sync data + using self-managed connectors, you need to deploy the Elastic connector service + on your own infrastructure. This service runs automatically on Elastic Cloud + for Elastic managed connectors. + + ``_ + + :param connector_id: The unique identifier of the connector to be updated. + :param features: + """ + if connector_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_id'") + if features is None and body is None: + raise ValueError("Empty value passed for parameter 'features'") + __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} + __path = f'/_connector/{__path_parts["connector_id"]}/_features' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if features is not None: + __body["features"] = features + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="connector.update_features", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=("advanced_snippet", "filtering", "rules"), ) diff --git a/elasticsearch/_async/client/eql.py b/elasticsearch/_async/client/eql.py index 1a8239eec..47af75be0 100644 --- a/elasticsearch/_async/client/eql.py +++ b/elasticsearch/_async/client/eql.py @@ -167,6 +167,8 @@ async def get_status( @_rewrite_parameters( body_fields=( "query", + "allow_partial_search_results", + "allow_partial_sequence_results", "case_sensitive", "event_category_field", "fetch_size", @@ -174,6 +176,7 @@ async def get_status( "filter", "keep_alive", "keep_on_completion", + "max_samples_per_key", "result_position", "runtime_mappings", "size", @@ -188,6 +191,8 @@ async def search( index: t.Union[str, t.Sequence[str]], query: t.Optional[str] = None, allow_no_indices: t.Optional[bool] = None, + allow_partial_search_results: t.Optional[bool] = None, + allow_partial_sequence_results: t.Optional[bool] = None, case_sensitive: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, event_category_field: t.Optional[str] = None, @@ -211,6 +216,7 @@ async def search( ignore_unavailable: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, keep_on_completion: t.Optional[bool] = None, + max_samples_per_key: t.Optional[int] = None, pretty: t.Optional[bool] = None, result_position: t.Optional[t.Union[str, t.Literal["head", "tail"]]] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, @@ -232,6 +238,8 @@ async def search( :param index: The name of the index to scope the operation :param query: EQL query you wish to run. :param allow_no_indices: + :param allow_partial_search_results: + :param allow_partial_sequence_results: :param case_sensitive: :param event_category_field: Field containing the event classification, such as process, file, or network. @@ -246,6 +254,11 @@ async def search( in the response. :param keep_alive: :param keep_on_completion: + :param max_samples_per_key: By default, the response of a sample query contains + up to `10` samples, with one sample per unique set of join keys. Use the + `size` parameter to get a smaller or larger set of samples. To retrieve more + than one sample per set of join keys, use the `max_samples_per_key` parameter. + Pipes are not supported for sample queries. :param result_position: :param runtime_mappings: :param size: For basic queries, the maximum number of matching events to return. @@ -280,6 +293,12 @@ async def search( if not __body: if query is not None: __body["query"] = query + if allow_partial_search_results is not None: + __body["allow_partial_search_results"] = allow_partial_search_results + if allow_partial_sequence_results is not None: + __body["allow_partial_sequence_results"] = ( + allow_partial_sequence_results + ) if case_sensitive is not None: __body["case_sensitive"] = case_sensitive if event_category_field is not None: @@ -294,6 +313,8 @@ async def search( __body["keep_alive"] = keep_alive if keep_on_completion is not None: __body["keep_on_completion"] = keep_on_completion + if max_samples_per_key is not None: + __body["max_samples_per_key"] = max_samples_per_key if result_position is not None: __body["result_position"] = result_position if runtime_mappings is not None: diff --git a/elasticsearch/_async/client/features.py b/elasticsearch/_async/client/features.py index 32fecf55a..ed85bbb4b 100644 --- a/elasticsearch/_async/client/features.py +++ b/elasticsearch/_async/client/features.py @@ -35,8 +35,17 @@ async def get_features( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets a list of features which can be included in snapshots using the feature_states - field when creating a snapshot + Get the features. Get a list of features that can be included in snapshots using + the `feature_states` field when creating a snapshot. You can use this API to + determine which feature states to include when taking a snapshot. By default, + all feature states are included in a snapshot if that snapshot includes the global + state, or none if it does not. A feature state includes one or more system indices + necessary for a given feature to function. In order to ensure data integrity, + all system indices that comprise a feature state are snapshotted and restored + together. The features listed by this API are a combination of built-in features + and features defined by plugins. In order for a feature state to be listed in + this API and recognized as a valid feature state by the create snapshot API, + the plugin that defines that feature must be installed on the master node. ``_ """ @@ -72,7 +81,20 @@ async def reset_features( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resets the internal state of features, usually by deleting system indices + Reset the features. Clear all of the state information stored in system indices + by Elasticsearch features, including the security and machine learning indices. + WARNING: Intended for development and testing use only. Do not reset features + on a production cluster. Return a cluster to the same state as a new installation + by resetting the feature state for all Elasticsearch features. This deletes all + state information stored in system indices. The response code is HTTP 200 if + the state is successfully reset for all features. It is HTTP 500 if the reset + operation failed for any feature. Note that select features might provide a way + to reset particular system indices. Using this API resets all features, both + those that are built-in and implemented as plugins. To list the features that + will be affected, use the get features API. IMPORTANT: The features installed + on the node you submit this request to are the features that will be reset. Run + on the master node if you have any doubts about which plugins are installed on + individual nodes. ``_ """ diff --git a/elasticsearch/_async/client/ilm.py b/elasticsearch/_async/client/ilm.py index 66df40190..009706e41 100644 --- a/elasticsearch/_async/client/ilm.py +++ b/elasticsearch/_async/client/ilm.py @@ -38,9 +38,9 @@ async def delete_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes the specified lifecycle policy definition. You cannot delete policies - that are currently in use. If the policy is being used to manage any indices, - the request fails and returns an error. + Delete a lifecycle policy. You cannot delete policies that are currently in use. + If the policy is being used to manage any indices, the request fails and returns + an error. ``_ @@ -93,9 +93,11 @@ async def explain_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the index’s current lifecycle state, such as the - currently executing phase, action, and step. Shows when the index entered each - one, the definition of the running phase, and information about any failures. + Explain the lifecycle state. Get the current lifecycle status for one or more + indices. For data streams, the API retrieves the current lifecycle status for + the stream's backing indices. The response indicates when the index entered each + lifecycle state, provides the definition of the running phase, and information + about any failures. ``_ @@ -157,7 +159,7 @@ async def get_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a lifecycle policy. + Get lifecycle policies. ``_ @@ -208,7 +210,7 @@ async def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the current index lifecycle management (ILM) status. + Get the ILM status. Get the current index lifecycle management status. ``_ """ @@ -249,10 +251,18 @@ async def migrate_to_data_tiers( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Switches the indices, ILM policies, and legacy, composable and component templates - from using custom node attributes and attribute-based allocation filters to using - data tiers, and optionally deletes one legacy index template.+ Using node roles - enables ILM to automatically move the indices between data tiers. + Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, + composable, and component templates from using custom node attributes and attribute-based + allocation filters to using data tiers. Optionally, delete one legacy index template. + Using node roles enables ILM to automatically move the indices between data tiers. + Migrating away from custom node attributes routing can be manually performed. + This API provides an automated way of performing three out of the four manual + steps listed in the migration guide: 1. Stop setting the custom hot attribute + on new indices. 1. Remove custom allocation settings from existing ILM policies. + 1. Replace custom allocation settings from existing indices with the corresponding + tier preference. ILM must be stopped before performing the migration. Use the + stop ILM and get ILM status APIs to wait until the reported operation mode is + `STOPPED`. ``_ @@ -312,7 +322,21 @@ async def move_to_step( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Manually moves an index into the specified step and executes that step. + Move to a lifecycle step. Manually move an index into a specific step in the + lifecycle policy and run that step. WARNING: This operation can result in the + loss of data. Manually moving an index into a specific step runs that step even + if it has already been performed. This is a potentially destructive action and + this should be considered an expert level API. You must specify both the current + step and the step to be executed in the body of the request. The request will + fail if the current step does not match the step currently running for the index + This is to prevent the index from being moved from an unexpected step into the + next step. When specifying the target (`next_step`) to which the index will be + moved, either the name or both the action and name fields are optional. If only + the phase is specified, the index will move to the first step of the first action + in the target phase. If the phase and action are specified, the index will move + to the first step of the specified action in the specified phase. Only actions + specified in the ILM policy are considered valid. An index cannot move to a step + that is not part of its policy. ``_ @@ -375,8 +399,9 @@ async def put_lifecycle( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a lifecycle policy. If the specified policy exists, the policy is replaced - and the policy version is incremented. + Create or update a lifecycle policy. If the specified policy exists, it is replaced + and the policy version is incremented. NOTE: Only the latest version of the policy + is stored, you cannot revert to previous versions. ``_ @@ -435,7 +460,8 @@ async def remove_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes the assigned lifecycle policy and stops managing the specified index + Remove policies from an index. Remove the assigned lifecycle policies from an + index or a data stream's backing indices. It also stops managing the indices. ``_ @@ -475,7 +501,10 @@ async def retry( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retries executing the policy for an index that is in the ERROR step. + Retry a policy. Retry running the lifecycle policy for an index that is in the + ERROR step. The API sets the policy back to the step where the error occurred + and runs the step. Use the explain lifecycle state API to determine whether an + index is in the ERROR step. ``_ @@ -517,7 +546,9 @@ async def start( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Start the index lifecycle management (ILM) plugin. + Start the ILM plugin. Start the index lifecycle management plugin if it is currently + stopped. ILM is started automatically when the cluster is formed. Restarting + ILM is necessary only when it has been stopped using the stop ILM API. ``_ @@ -561,8 +592,12 @@ async def stop( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Halts all lifecycle management operations and stops the index lifecycle management - (ILM) plugin + Stop the ILM plugin. Halt all lifecycle management operations and stop the index + lifecycle management plugin. This is useful when you are performing maintenance + on the cluster and need to prevent ILM from performing any actions on your indices. + The API returns as soon as the stop request has been acknowledged, but the plugin + might continue to run until in-progress operations complete and the plugin can + be safely stopped. Use the get ILM status API to check whether ILM is running. ``_ diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index e7af76ecc..11768a72a 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -245,8 +245,8 @@ async def clear_cache( request: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears the caches of one or more indices. For data streams, the API clears the - caches of the stream’s backing indices. + Clear the cache. Clear the cache of one or more indices. For data streams, the + API clears the caches of the stream's backing indices. ``_ @@ -331,7 +331,26 @@ async def clone( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clones an existing index. + Clone an index. Clone an existing index into a new index. Each original primary + shard is cloned into a new primary shard in the new index. IMPORTANT: Elasticsearch + does not apply index templates to the resulting index. The API also does not + copy index metadata from the original index. Index metadata includes aliases, + index lifecycle management phase definitions, and cross-cluster replication (CCR) + follower information. For example, if you clone a CCR follower index, the resulting + clone will not be a follower index. The clone API copies most index settings + from the source index to the resulting index, with the exception of `index.number_of_replicas` + and `index.auto_expand_replicas`. To set the number of replicas in the resulting + index, configure these settings in the clone request. Cloning works as follows: + * First, it creates a new target index with the same definition as the source + index. * Then it hard-links segments from the source index into the target index. + If the file system does not support hard-linking, all segments are copied into + the new index, which is a much more time consuming process. * Finally, it recovers + the target index as though it were a closed index which had just been re-opened. + IMPORTANT: Indices can only be cloned if they meet the following requirements: + * The target index must not exist. * The source index must have the same number + of primary shards as the target index. * The node handling the clone process + must have sufficient free disk space to accommodate a second copy of the existing + index. ``_ @@ -419,7 +438,24 @@ async def close( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Closes an index. + Close an index. A closed index is blocked for read or write operations and does + not allow all operations that opened indices allow. It is not possible to index + documents or to search for documents in a closed index. Closed indices do not + have to maintain internal data structures for indexing or searching documents, + which results in a smaller overhead on the cluster. When opening or closing an + index, the master node is responsible for restarting the index shards to reflect + the new state of the index. The shards will then go through the normal recovery + process. The data of opened and closed indices is automatically replicated by + the cluster to ensure that enough shard copies are safely kept around at all + times. You can open and close multiple indices. An error is thrown if the request + explicitly refers to a missing index. This behaviour can be turned off using + the `ignore_unavailable=true` parameter. By default, you must explicitly name + the indices you are opening or closing. To open or close indices with `_all`, + `*`, or other wildcard expressions, change the` action.destructive_requires_name` + setting to `false`. This setting can also be changed with the cluster update + settings API. Closed indices consume a significant amount of disk-space which + can cause problems in managed environments. Closing indices can be turned off + with the cluster settings API by setting `cluster.indices.close.enable` to `false`. ``_ @@ -1061,7 +1097,10 @@ async def disk_usage( run_expensive_tasks: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Analyzes the disk usage of each field of an index or data stream. + Analyze the index disk usage. Analyze the disk usage of each field of an index + or data stream. This API might not support indices created in previous Elasticsearch + versions. The result of a small index can be inaccurate as some parts of an index + might not be analyzed by the API. ``_ @@ -1135,9 +1174,14 @@ async def downsample( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Aggregates a time series (TSDS) index and stores pre-computed statistical summaries - (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped - by a configured time interval. + Downsample an index. Aggregate a time series (TSDS) index and store pre-computed + statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each + metric field grouped by a configured time interval. For example, a TSDS index + that contains metrics sampled every 10 seconds can be downsampled to an hourly + index. All documents within an hour interval are summarized and stored as a single + document in the downsample index. NOTE: Only indices in a time series data stream + are supported. Neither field nor document level security can be defined on the + source index. The source index must be read only (`index.blocks.write: true`). ``_ @@ -1456,8 +1500,8 @@ async def explain_data_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the status for a data stream lifecycle. Retrieves information about an index - or data stream’s current data stream lifecycle status, such as time since index + Get the status for a data stream lifecycle. Get information about an index or + data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. @@ -1523,7 +1567,10 @@ async def field_usage_stats( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns field usage information for each shard and field of an index. + Get field usage stats. Get field usage information for each shard and field of + an index. Field usage statistics are automatically captured when queries are + running on a cluster. A shard-level search request that accesses a given field, + even if multiple times during that request, is counted as a single use. ``_ @@ -1611,7 +1658,22 @@ async def flush( wait_if_ongoing: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Flushes one or more data streams or indices. + Flush data streams or indices. Flushing a data stream or index is the process + of making sure that any data that is currently only stored in the transaction + log is also permanently stored in the Lucene index. When restarting, Elasticsearch + replays any unflushed operations from the transaction log into the Lucene index + to bring it back into the state that it was in before the restart. Elasticsearch + automatically triggers flushes as needed, using heuristics that trade off the + size of the unflushed transaction log against the cost of performing each flush. + After each operation has been flushed it is permanently stored in the Lucene + index. This may mean that there is no need to maintain an additional copy of + it in the transaction log. The transaction log is made up of multiple files, + called generations, and Elasticsearch will delete any generation files when they + are no longer needed, freeing up disk space. It is also possible to trigger a + flush on one or more indices using the flush API, although it is rare for users + to need to call this API directly. If you call the flush API after indexing some + documents then a successful response indicates that Elasticsearch has flushed + all the documents that were indexed before the flush API was called. ``_ @@ -1694,7 +1756,21 @@ async def forcemerge( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs the force merge operation on one or more indices. + Force a merge. Perform the force merge operation on the shards of one or more + indices. For data streams, the API forces a merge on the shards of the stream's + backing indices. Merging reduces the number of segments in each shard by merging + some of them together and also frees up the space used by deleted documents. + Merging normally happens automatically, but sometimes it is useful to trigger + a merge manually. WARNING: We recommend force merging only a read-only index + (meaning the index is no longer receiving writes). When documents are updated + or deleted, the old version is not immediately removed but instead soft-deleted + and marked with a "tombstone". These soft-deleted documents are automatically + cleaned up during regular segment merges. But force merge can cause very large + (greater than 5 GB) segments to be produced, which are not eligible for regular + merges. So the number of soft-deleted documents can then grow rapidly, resulting + in higher disk usage and worse search performance. If you regularly force merge + an index receiving writes, this can also make snapshots more expensive, since + the new documents can't be backed up incrementally. ``_ @@ -2679,8 +2755,18 @@ async def promote_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Promotes a data stream from a replicated data stream managed by CCR to a regular - data stream + Promote a data stream. Promote a data stream from a replicated data stream managed + by cross-cluster replication (CCR) to a regular data stream. With CCR auto following, + a data stream from a remote cluster can be replicated to the local cluster. These + data streams can't be rolled over in the local cluster. These replicated data + streams roll over only if the upstream data stream rolls over. In the event that + the remote cluster is no longer available, the data stream in the local cluster + can be promoted to a regular data stream, which allows these data streams to + be rolled over in the local cluster. NOTE: When promoting a data stream, ensure + the local cluster has a data stream enabled index template that matches the data + stream. If this is missing, the data stream will not be able to roll over until + a matching index template is created. This will affect the lifecycle management + of the data stream and interfere with the data stream size and retention. ``_ @@ -2819,14 +2905,14 @@ async def put_alias( ) @_rewrite_parameters( - body_fields=("data_retention", "downsampling"), + body_name="lifecycle", ) async def put_data_lifecycle( self, *, name: t.Union[str, t.Sequence[str]], - data_retention: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - downsampling: t.Optional[t.Mapping[str, t.Any]] = None, + lifecycle: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ @@ -2841,7 +2927,6 @@ async def put_data_lifecycle( master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ Update data stream lifecycles. Update the data stream lifecycle of the specified @@ -2851,13 +2936,7 @@ async def put_data_lifecycle( :param name: Comma-separated list of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. - :param data_retention: If defined, every document added to this data stream will - be stored at least for this time frame. Any time after this duration the - document could be deleted. When empty, every document in this data stream - will be stored indefinitely. - :param downsampling: If defined, every backing index will execute the configured - downsampling configuration after the backing index is not the data stream - write index anymore. + :param lifecycle: :param expand_wildcards: Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `hidden`, `open`, `closed`, `none`. @@ -2869,10 +2948,15 @@ async def put_data_lifecycle( """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") + if lifecycle is None and body is None: + raise ValueError( + "Empty value passed for parameters 'lifecycle' and 'body', one of them should be set." + ) + elif lifecycle is not None and body is not None: + raise ValueError("Cannot set both 'lifecycle' and 'body'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}/_lifecycle' __query: t.Dict[str, t.Any] = {} - __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: @@ -2887,16 +2971,8 @@ async def put_data_lifecycle( __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout - if not __body: - if data_retention is not None: - __body["data_retention"] = data_retention - if downsampling is not None: - __body["downsampling"] = downsampling - if not __body: - __body = None # type: ignore[assignment] - __headers = {"accept": "application/json"} - if __body is not None: - __headers["content-type"] = "application/json" + __body = lifecycle if lifecycle is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, @@ -3343,7 +3419,16 @@ async def put_template( ) -> ObjectApiResponse[t.Any]: """ Create or update an index template. Index templates define settings, mappings, - and aliases that can be applied automatically to new indices. + and aliases that can be applied automatically to new indices. Elasticsearch applies + templates to new indices based on an index pattern that matches the index name. + IMPORTANT: This documentation is about legacy index templates, which are deprecated + and will be replaced by the composable templates introduced in Elasticsearch + 7.8. Composable templates always take precedence over legacy templates. If no + composable template matches a new index, matching legacy templates are applied + according to their order. Index templates are only applied during index creation. + Changes to index templates do not affect existing indices. Settings and mappings + specified in create index API requests override any settings or mappings specified + in an index template. ``_ @@ -3423,9 +3508,25 @@ async def recovery( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about ongoing and completed shard recoveries for one or more - indices. For data streams, the API returns information for the stream’s backing - indices. + Get index recovery information. Get information about ongoing and completed shard + recoveries for one or more indices. For data streams, the API returns information + for the stream's backing indices. Shard recovery is the process of initializing + a shard copy, such as restoring a primary shard from a snapshot or creating a + replica shard from a primary shard. When a shard recovery completes, the recovered + shard is available for search and indexing. Recovery automatically occurs during + the following processes: * When creating an index for the first time. * When + a node rejoins the cluster and starts up any missing primary shard copies using + the data that it holds in its data path. * Creation of new replica shard copies + from the primary. * Relocation of a shard copy to a different node in the same + cluster. * A snapshot restore operation. * A clone, shrink, or split operation. + You can determine the cause of a shard recovery using the recovery or cat recovery + APIs. The index recovery API reports information about completed recoveries only + for shard copies that currently exist in the cluster. It only reports the last + recovery for each shard copy and does not report historical information about + earlier recoveries, nor does it report information about the recoveries of shard + copies that no longer exist. This means that if a shard copy completes a recovery + and then Elasticsearch relocates it onto a different node then the information + about the original recovery will not be shown in the recovery API. ``_ @@ -3559,7 +3660,21 @@ async def reload_search_analyzers( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Reloads an index's search analyzers and their resources. + Reload search analyzers. Reload an index's search analyzers and their resources. + For data streams, the API reloads search analyzers and resources for the stream's + backing indices. IMPORTANT: After reloading the search analyzers you should clear + the request cache to make sure it doesn't contain responses derived from the + previous versions of the analyzer. You can use the reload search analyzers API + to pick up changes to synonym files used in the `synonym_graph` or `synonym` + token filter of a search analyzer. To be eligible, the token filter must have + an `updateable` flag of `true` and only be used in search analyzers. NOTE: This + API does not perform a reload for each shard of an index. Instead, it performs + a reload for each node containing index shards. As a result, the total shard + count returned by the API can differ from the number of index shards. Because + reloading affects every node with an index shard, it is important to update the + synonym file on every data node in the cluster--including nodes that don't contain + a shard replica--before using this API. This ensures the synonym file is updated + everywhere in the cluster in case shards are relocated in the future. ``_ @@ -3623,9 +3738,20 @@ async def resolve_cluster( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resolves the specified index expressions to return information about each cluster, - including the local cluster, if included. Multiple patterns and remote clusters - are supported. + Resolve the cluster. Resolve the specified index expressions to return information + about each cluster, including the local cluster, if included. Multiple patterns + and remote clusters are supported. This endpoint is useful before doing a cross-cluster + search in order to determine which remote clusters should be included in a search. + You use the same index expression with this endpoint as you would for cross-cluster + search. Index and cluster exclusions are also supported with this endpoint. For + each cluster in the index expression, information is returned about: * Whether + the querying ("local") cluster is currently connected to each remote cluster + in the index expression scope. * Whether each remote cluster is configured with + `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, + or data streams on that cluster that match the index expression. * Whether the + search is likely to have errors returned when you do the cross-cluster search + (including any authorization errors if you do not have permission to query the + index). * Cluster version information, including the Elasticsearch server version. ``_ @@ -3877,8 +4003,9 @@ async def segments( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns low-level information about the Lucene segments in index shards. For - data streams, the API returns information about the stream’s backing indices. + Get index segments. Get low-level information about the Lucene segments in index + shards. For data streams, the API returns information about the stream's backing + indices. ``_ @@ -3957,8 +4084,14 @@ async def shard_stores( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves store information about replica shards in one or more indices. For - data streams, the API retrieves store information for the stream’s backing indices. + Get index shard stores. Get store information about replica shards in one or + more indices. For data streams, the API retrieves store information for the stream's + backing indices. The index shard stores API returns the following information: + * The node on which each replica shard exists. * The allocation ID for each replica + shard. * A unique ID for each replica shard. * Any errors encountered while opening + the shard index or from an earlier failure. By default, the API returns store + information only for primary shards that are unassigned or have one or more unassigned + replica shards. ``_ @@ -4029,7 +4162,39 @@ async def shrink( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Shrinks an existing index into a new index with fewer primary shards. + Shrink an index. Shrink an index into a new index with fewer primary shards. + Before you can shrink an index: * The index must be read-only. * A copy of every + shard in the index must reside on the same node. * The index must have a green + health status. To make shard allocation easier, we recommend you also remove + the index's replica shards. You can later re-add replica shards as part of the + shrink operation. The requested number of primary shards in the target index + must be a factor of the number of shards in the source index. For example an + index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an + index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards + in the index is a prime number it can only be shrunk into a single primary shard + Before shrinking, a (primary or replica) copy of every shard in the index must + be present on the same node. The current write index on a data stream cannot + be shrunk. In order to shrink the current write index, the data stream must first + be rolled over so that a new write index is created and then the previous write + index can be shrunk. A shrink operation: * Creates a new target index with the + same definition as the source index, but with a smaller number of primary shards. + * Hard-links segments from the source index into the target index. If the file + system does not support hard-linking, then all segments are copied into the new + index, which is a much more time consuming process. Also if using multiple data + paths, shards on different data paths require a full copy of segment files if + they are not on the same disk since hardlinks do not work across disks. * Recovers + the target index as though it were a closed index which had just been re-opened. + Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. + IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: + * The target index must not exist. * The source index must have more primary + shards than the target index. * The number of primary shards in the target index + must be a factor of the number of primary shards in the source index. The source + index must have more primary shards than the target index. * The index must not + contain more than 2,147,483,519 documents in total across all shards that will + be shrunk into a single shard on the target index as this is the maximum number + of docs that can fit into a single shard. * The node handling the shrink process + must have sufficient free disk space to accommodate a second copy of the existing + index. ``_ @@ -4314,7 +4479,27 @@ async def split( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Splits an existing index into a new index with more primary shards. + Split an index. Split an index into a new index with more primary shards. * Before + you can split an index: * The index must be read-only. * The cluster health status + must be green. The number of times the index can be split (and the number of + shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` + setting. The number of routing shards specifies the hashing space that is used + internally to distribute documents across shards with consistent hashing. For + instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x + 3) could be split by a factor of 2 or 3. A split operation: * Creates a new target + index with the same definition as the source index, but with a larger number + of primary shards. * Hard-links segments from the source index into the target + index. If the file system doesn't support hard-linking, all segments are copied + into the new index, which is a much more time consuming process. * Hashes all + documents again, after low level files are created, to delete documents that + belong to a different shard. * Recovers the target index as though it were a + closed index which had just been re-opened. IMPORTANT: Indices can only be split + if they satisfy the following requirements: * The target index must not exist. + * The source index must have fewer primary shards than the target index. * The + number of primary shards in the target index must be a multiple of the number + of primary shards in the source index. * The node handling the split process + must have sufficient free disk space to accommodate a second copy of the existing + index. ``_ @@ -4406,8 +4591,14 @@ async def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns statistics for one or more indices. For data streams, the API retrieves - statistics for the stream’s backing indices. + Get index statistics. For data streams, the API retrieves statistics for the + stream's backing indices. By default, the returned statistics are index-level + with `primaries` and `total` aggregations. `primaries` are the values for only + the primary shards. `total` are the accumulated values for both primary and replica + shards. To get shard-level statistics, set the `level` parameter to `shards`. + NOTE: When moving to another node, the shard-level statistics for a shard are + cleared. Although the shard is no longer part of the node, that node retains + any node-level statistics to which the shard contributed. ``_ @@ -4510,7 +4701,8 @@ async def unfreeze( wait_for_active_shards: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Unfreezes an index. + Unfreeze an index. When a frozen index is unfrozen, the index goes through the + normal recovery process and becomes writeable again. ``_ diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index 701ba6835..60addf118 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -255,7 +255,21 @@ async def put( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Create an inference endpoint + Create an inference endpoint. When you create an inference endpoint, the associated + machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before + using it. To verify the deployment status, use the get trained model statistics + API. Look for `"state": "fully_allocated"` in the response and ensure that the + `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating + multiple endpoints for the same model unless required, as each endpoint consumes + significant resources. IMPORTANT: The inference APIs enable you to use certain + services, such as built-in machine learning models (ELSER, E5), models uploaded + through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google + Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models + uploaded through Eland, the inference APIs offer an alternative way to use and + manage trained models. However, if you do not plan to use the inference APIs + to use these models or if you want to use non-NLP models, use the machine learning + trained model APIs. ``_ diff --git a/elasticsearch/_async/client/ingest.py b/elasticsearch/_async/client/ingest.py index ecd516365..c2585b037 100644 --- a/elasticsearch/_async/client/ingest.py +++ b/elasticsearch/_async/client/ingest.py @@ -77,6 +77,59 @@ async def delete_geoip_database( path_parts=__path_parts, ) + @_rewrite_parameters() + async def delete_ip_location_database( + self, + *, + id: t.Union[str, t.Sequence[str]], + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Delete IP geolocation database configurations. + + ``_ + + :param id: A comma-separated list of IP location database configurations. + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. A value of `-1` indicates that the request should never + time out. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. A value + of `-1` indicates that the request should never time out. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "DELETE", + __path, + params=__query, + headers=__headers, + endpoint_id="ingest.delete_ip_location_database", + path_parts=__path_parts, + ) + @_rewrite_parameters() async def delete_pipeline( self, @@ -217,6 +270,58 @@ async def get_geoip_database( path_parts=__path_parts, ) + @_rewrite_parameters() + async def get_ip_location_database( + self, + *, + id: t.Optional[t.Union[str, t.Sequence[str]]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Get IP geolocation database configurations. + + ``_ + + :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard + (`*`) expressions are supported. To get all database configurations, omit + this parameter or use `*`. + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. A value of `-1` indicates that the request should never + time out. + """ + __path_parts: t.Dict[str, str] + if id not in SKIP_IN_PATH: + __path_parts = {"id": _quote(id)} + __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' + else: + __path_parts = {} + __path = "/_ingest/ip_location/database" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="ingest.get_ip_location_database", + path_parts=__path_parts, + ) + @_rewrite_parameters() async def get_pipeline( self, @@ -328,8 +433,8 @@ async def put_geoip_database( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update GeoIP database configurations. Create or update IP geolocation - database configurations. + Create or update a GeoIP database configuration. Refer to the create or update + IP geolocation database configuration API. ``_ @@ -384,6 +489,74 @@ async def put_geoip_database( path_parts=__path_parts, ) + @_rewrite_parameters( + body_name="configuration", + ) + async def put_ip_location_database( + self, + *, + id: str, + configuration: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Create or update an IP geolocation database configuration. + + ``_ + + :param id: The database configuration identifier. + :param configuration: + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. A value of `-1` indicates that the request should never + time out. + :param timeout: The period to wait for a response from all relevant nodes in + the cluster after updating the cluster metadata. If no response is received + before the timeout expires, the cluster metadata update still applies but + the response indicates that it was not completely acknowledged. A value of + `-1` indicates that the request should never time out. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + if configuration is None and body is None: + raise ValueError( + "Empty value passed for parameters 'configuration' and 'body', one of them should be set." + ) + elif configuration is not None and body is not None: + raise ValueError("Cannot set both 'configuration' and 'body'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + __body = configuration if configuration is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="ingest.put_ip_location_database", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=( "deprecated", diff --git a/elasticsearch/_async/client/license.py b/elasticsearch/_async/client/license.py index fd9f0957c..72f606865 100644 --- a/elasticsearch/_async/client/license.py +++ b/elasticsearch/_async/client/license.py @@ -35,7 +35,9 @@ async def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes licensing information for the cluster + Delete the license. When the license expires, your subscription level reverts + to Basic. If the operator privileges feature is enabled, only operator users + can use this API. ``_ """ @@ -72,9 +74,11 @@ async def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get license information. Returns information about your Elastic license, including - its type, its status, when it was issued, and when it expires. For more information - about the different types of licenses, refer to [Elastic Stack subscriptions](https://www.elastic.co/subscriptions). + Get license information. Get information about your Elastic license including + its type, its status, when it was issued, and when it expires. NOTE: If the master + node is generating a new cluster state, the get license API may return a `404 + Not Found` response. If you receive an unexpected 404 response after cluster + startup, wait a short period and retry the request. ``_ @@ -120,7 +124,7 @@ async def get_basic_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the status of the basic license. + Get the basic license status. ``_ """ @@ -155,7 +159,7 @@ async def get_trial_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the status of the trial license. + Get the trial status. ``_ """ @@ -196,7 +200,14 @@ async def post( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the license for the cluster. + Update the license. You can update your license at runtime without shutting down + your nodes. License updates take effect immediately. If the license you are installing + does not support all of the features that were available with your previous license, + however, you are notified in the response. You must then re-submit the API request + with the acknowledge parameter set to true. NOTE: If Elasticsearch security features + are enabled and you are installing a gold or higher license, you must enable + TLS on the transport networking layer before you install the license. If the + operator privileges feature is enabled, only operator users can use this API. ``_ @@ -250,12 +261,13 @@ async def post_start_basic( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The start basic API enables you to initiate an indefinite basic license, which - gives access to all the basic features. If the basic license does not support - all of the features that are available with your current license, however, you - are notified in the response. You must then re-submit the API request with the - acknowledge parameter set to true. To check the status of your basic license, - use the following API: [Get basic status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). + Start a basic license. Start an indefinite basic license, which gives access + to all the basic features. NOTE: In order to start a basic license, you must + not currently have a basic license. If the basic license does not support all + of the features that are available with your current license, however, you are + notified in the response. You must then re-submit the API request with the `acknowledge` + parameter set to `true`. To check the status of your basic license, use the get + basic license API. ``_ @@ -297,8 +309,12 @@ async def post_start_trial( type_query_string: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - The start trial API enables you to start a 30-day trial, which gives access to - all subscription features. + Start a trial. Start a 30-day trial, which gives access to all subscription features. + NOTE: You are allowed to start a trial only if your cluster has not already activated + a trial for the current major product version. For example, if you have already + activated a trial for v8.0, you cannot start a new trial until v9.0. You can, + however, request an extended trial at https://www.elastic.co/trialextension. + To check the status of your trial, use the get trial status API. ``_ diff --git a/elasticsearch/_async/client/logstash.py b/elasticsearch/_async/client/logstash.py index c25a79bdd..c4651a39a 100644 --- a/elasticsearch/_async/client/logstash.py +++ b/elasticsearch/_async/client/logstash.py @@ -36,11 +36,12 @@ async def delete_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a pipeline used for Logstash Central Management. + Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central + Management. ``_ - :param id: Identifier for the pipeline. + :param id: An identifier for the pipeline. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -76,11 +77,11 @@ async def get_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves pipelines used for Logstash Central Management. + Get Logstash pipelines. Get pipelines that are used for Logstash Central Management. ``_ - :param id: Comma-separated list of pipeline identifiers. + :param id: A comma-separated list of pipeline identifiers. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: @@ -123,11 +124,12 @@ async def put_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a pipeline used for Logstash Central Management. + Create or update a Logstash pipeline. Create a pipeline that is used for Logstash + Central Management. If the specified pipeline exists, it is replaced. ``_ - :param id: Identifier for the pipeline. + :param id: An identifier for the pipeline. :param pipeline: """ if id in SKIP_IN_PATH: diff --git a/elasticsearch/_async/client/migration.py b/elasticsearch/_async/client/migration.py index f73dff7c3..e864e2ecf 100644 --- a/elasticsearch/_async/client/migration.py +++ b/elasticsearch/_async/client/migration.py @@ -36,9 +36,10 @@ async def deprecations( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about different cluster, node, and index level settings - that use deprecated features that will be removed or changed in the next major - version. + Get deprecation information. Get information about different cluster, node, and + index level settings that use deprecated features that will be removed or changed + in the next major version. TIP: This APIs is designed for indirect use by the + Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. ``_ @@ -81,7 +82,11 @@ async def get_feature_upgrade_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Find out whether system features need to be upgraded or not + Get feature migration information. Version upgrades sometimes require changes + to how features store configuration information and data in system indices. Check + which features need to be migrated and the status of any migrations that are + in progress. TIP: This API is designed for indirect use by the Upgrade Assistant. + We strongly recommend you use the Upgrade Assistant. ``_ """ @@ -116,7 +121,11 @@ async def post_feature_upgrade( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Begin upgrades for system features + Start the feature migration. Version upgrades sometimes require changes to how + features store configuration information and data in system indices. This API + starts the automatic migration process. Some functionality might be temporarily + unavailable during the migration process. TIP: The API is designed for indirect + use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. ``_ """ diff --git a/elasticsearch/_async/client/ml.py b/elasticsearch/_async/client/ml.py index da3a23b1c..c2edb5858 100644 --- a/elasticsearch/_async/client/ml.py +++ b/elasticsearch/_async/client/ml.py @@ -2488,6 +2488,7 @@ async def get_trained_models( ], ] ] = None, + include_model_definition: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, tags: t.Optional[t.Union[str, t.Sequence[str]]] = None, @@ -2514,6 +2515,8 @@ async def get_trained_models( :param from_: Skips the specified number of models. :param include: A comma delimited string of optional fields to include in the response body. + :param include_model_definition: parameter is deprecated! Use [include=definition] + instead :param size: Specifies the maximum number of models to obtain. :param tags: A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied @@ -2543,6 +2546,8 @@ async def get_trained_models( __query["human"] = human if include is not None: __query["include"] = include + if include_model_definition is not None: + __query["include_model_definition"] = include_model_definition if pretty is not None: __query["pretty"] = pretty if size is not None: @@ -2697,7 +2702,7 @@ async def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Return ML defaults and limits. Returns defaults and limits used by machine learning. + Get machine learning information. Get defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out @@ -3169,9 +3174,11 @@ async def put_calendar_job( "description", "headers", "max_num_threads", + "meta", "model_memory_limit", "version", ), + parameter_aliases={"_meta": "meta"}, ignore_deprecated_options={"headers"}, ) async def put_data_frame_analytics( @@ -3189,6 +3196,7 @@ async def put_data_frame_analytics( headers: t.Optional[t.Mapping[str, t.Union[str, t.Sequence[str]]]] = None, human: t.Optional[bool] = None, max_num_threads: t.Optional[int] = None, + meta: t.Optional[t.Mapping[str, t.Any]] = None, model_memory_limit: t.Optional[str] = None, pretty: t.Optional[bool] = None, version: t.Optional[str] = None, @@ -3249,6 +3257,7 @@ async def put_data_frame_analytics( Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. + :param meta: :param model_memory_limit: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs @@ -3293,6 +3302,8 @@ async def put_data_frame_analytics( __body["headers"] = headers if max_num_threads is not None: __body["max_num_threads"] = max_num_threads + if meta is not None: + __body["_meta"] = meta if model_memory_limit is not None: __body["model_memory_limit"] = model_memory_limit if version is not None: @@ -3311,6 +3322,7 @@ async def put_data_frame_analytics( @_rewrite_parameters( body_fields=( "aggregations", + "aggs", "chunking_config", "delayed_data_check_config", "frequency", @@ -3333,6 +3345,7 @@ async def put_datafeed( *, datafeed_id: str, aggregations: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, + aggs: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, allow_no_indices: t.Optional[bool] = None, chunking_config: t.Optional[t.Mapping[str, t.Any]] = None, delayed_data_check_config: t.Optional[t.Mapping[str, t.Any]] = None, @@ -3386,6 +3399,8 @@ async def put_datafeed( :param aggregations: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. + :param aggs: If set, the datafeed performs aggregation searches. Support for + aggregations is limited and should be used only with low cardinality data. :param allow_no_indices: If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. @@ -3473,6 +3488,8 @@ async def put_datafeed( if not __body: if aggregations is not None: __body["aggregations"] = aggregations + if aggs is not None: + __body["aggs"] = aggs if chunking_config is not None: __body["chunking_config"] = chunking_config if delayed_data_check_config is not None: @@ -3595,6 +3612,7 @@ async def put_job( analysis_config: t.Optional[t.Mapping[str, t.Any]] = None, data_description: t.Optional[t.Mapping[str, t.Any]] = None, allow_lazy_open: t.Optional[bool] = None, + allow_no_indices: t.Optional[bool] = None, analysis_limits: t.Optional[t.Mapping[str, t.Any]] = None, background_persist_interval: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] @@ -3604,9 +3622,19 @@ async def put_job( datafeed_config: t.Optional[t.Mapping[str, t.Any]] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, + expand_wildcards: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] + ], + t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], + ] + ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, groups: t.Optional[t.Sequence[str]] = None, human: t.Optional[bool] = None, + ignore_throttled: t.Optional[bool] = None, + ignore_unavailable: t.Optional[bool] = None, model_plot_config: t.Optional[t.Mapping[str, t.Any]] = None, model_snapshot_retention_days: t.Optional[int] = None, pretty: t.Optional[bool] = None, @@ -3641,6 +3669,9 @@ async def put_job( to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. + :param allow_no_indices: If `true`, wildcard indices expressions that resolve + into no concrete indices are ignored. This includes the `_all` string or + when no indices are specified. :param analysis_limits: Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for @@ -3664,7 +3695,20 @@ async def put_job( using those same roles. If you provide secondary authorization headers, those credentials are used instead. :param description: A description of the job. + :param expand_wildcards: Type of index that wildcard patterns can match. If the + request can target data streams, this argument determines whether wildcard + expressions match hidden data streams. Supports comma-separated values. Valid + values are: * `all`: Match any data stream or index, including hidden ones. + * `closed`: Match closed, non-hidden indices. Also matches any non-hidden + data stream. Data streams cannot be closed. * `hidden`: Match hidden data + streams and hidden indices. Must be combined with `open`, `closed`, or both. + * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden + indices. Also matches any non-hidden data stream. :param groups: A list of job groups. A job can belong to no groups or many. + :param ignore_throttled: If `true`, concrete, expanded or aliased indices are + ignored when frozen. + :param ignore_unavailable: If `true`, unavailable indices (missing or closed) + are ignored. :param model_plot_config: This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance @@ -3704,12 +3748,20 @@ async def put_job( __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} + if allow_no_indices is not None: + __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace + if expand_wildcards is not None: + __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if ignore_throttled is not None: + __query["ignore_throttled"] = ignore_throttled + if ignore_unavailable is not None: + __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if not __body: @@ -5469,7 +5521,7 @@ async def validate_detector( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Validates an anomaly detection detector. + Validate an anomaly detection job. ``_ diff --git a/elasticsearch/_async/client/monitoring.py b/elasticsearch/_async/client/monitoring.py index e5eeacc3a..8c2d962fd 100644 --- a/elasticsearch/_async/client/monitoring.py +++ b/elasticsearch/_async/client/monitoring.py @@ -42,7 +42,8 @@ async def bulk( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Used by the monitoring features to send monitoring data. + Send monitoring data. This API is used by the monitoring features to send monitoring + data. ``_ diff --git a/elasticsearch/_async/client/rollup.py b/elasticsearch/_async/client/rollup.py index 154090dd2..11b4c5cda 100644 --- a/elasticsearch/_async/client/rollup.py +++ b/elasticsearch/_async/client/rollup.py @@ -43,7 +43,20 @@ async def delete_job( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing rollup job. + Delete a rollup job. A job must be stopped before it can be deleted. If you attempt + to delete a started job, an error occurs. Similarly, if you attempt to delete + a nonexistent job, an exception occurs. IMPORTANT: When you delete a job, you + remove only the process that is actively monitoring and rolling up data. The + API does not delete any previously rolled up data. This is by design; a user + may wish to roll up a static data set. Because the data set is static, after + it has been fully rolled up there is no need to keep the indexing rollup job + around (as there will be no new data). Thus the job can be deleted, leaving behind + the rolled up data for analysis. If you wish to also remove the rollup data and + the rollup index contains the data for only a single job, you can delete the + whole rollup index. If the rollup index stores data from several jobs, you must + issue a delete-by-query that targets the rollup job's identifier in the rollup + index. For example: ``` POST my_rollup_index/_delete_by_query { "query": { "term": + { "_rollup.id": "the_rollup_job_id" } } } ``` ``_ @@ -84,7 +97,11 @@ async def get_jobs( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the configuration, stats, and status of rollup jobs. + Get rollup job information. Get the configuration, stats, and status of rollup + jobs. NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. + If a job was created, ran for a while, then was deleted, the API does not return + any details about it. For details about a historical rollup job, the rollup capabilities + API may be more useful. ``_ @@ -129,8 +146,15 @@ async def get_rollup_caps( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the capabilities of any rollup jobs that have been configured for a specific - index or index pattern. + Get the rollup job capabilities. Get the capabilities of any rollup jobs that + have been configured for a specific index or index pattern. This API is useful + because a rollup job is often configured to rollup only a subset of fields from + the source index. Furthermore, only certain aggregations can be configured for + various fields, leading to a limited subset of functionality depending on that + configuration. This API enables you to inspect an index and determine: 1. Does + this index have associated rollup data somewhere in the cluster? 2. If yes to + the first question, what fields were rolled up, what aggregations can be performed, + and where does the data live? ``_ @@ -175,8 +199,12 @@ async def get_rollup_index_caps( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the rollup capabilities of all jobs inside of a rollup index (for example, - the index where rollup data is stored). + Get the rollup index capabilities. Get the rollup capabilities of all jobs inside + of a rollup index. A single rollup index may store the data for multiple rollup + jobs and may have a variety of capabilities depending on those jobs. This API + enables you to determine: * What jobs are stored in an index (or indices specified + via a pattern)? * What target indices were rolled up, what fields were used in + those rollups, and what aggregations can be performed on each job? ``_ @@ -239,7 +267,16 @@ async def put_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a rollup job. + Create a rollup job. WARNING: From 8.15.0, calling this API in a cluster with + no rollup usage will fail with a message about the deprecation and planned removal + of rollup features. A cluster needs to contain either a rollup job or a rollup + index in order for this API to be allowed to run. The rollup job configuration + contains all the details about how the job should run, when it indexes documents, + and what future queries will be able to run against the rollup index. There are + three main sections to the job configuration: the logistical details about the + job (for example, the cron schedule), the fields that are used for grouping, + and what metrics to collect for each group. Jobs are created in a `STOPPED` state. + You can start them with the start rollup jobs API. ``_ @@ -356,7 +393,11 @@ async def rollup_search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables searching rolled-up data using the standard Query DSL. + Search rolled-up data. The rollup search endpoint is needed because, internally, + rolled-up documents utilize a different document structure than the original + data. It rewrites standard Query DSL into a format that matches the rollup documents + then takes the response and rewrites it back to what a client would expect given + the original query. ``_ @@ -420,7 +461,8 @@ async def start_job( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts an existing, stopped rollup job. + Start rollup jobs. If you try to start a job that does not exist, an exception + occurs. If you try to start a job that is already started, nothing happens. ``_ @@ -463,7 +505,8 @@ async def stop_job( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops an existing, started rollup job. + Stop rollup jobs. If you try to stop a job that does not exist, an exception + occurs. If you try to stop a job that is already stopped, nothing happens. ``_ diff --git a/elasticsearch/_async/client/search_application.py b/elasticsearch/_async/client/search_application.py index b8462f575..7ef00087f 100644 --- a/elasticsearch/_async/client/search_application.py +++ b/elasticsearch/_async/client/search_application.py @@ -216,7 +216,7 @@ async def list( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the existing search applications. + Get search applications. Get information about search applications. ``_ @@ -251,6 +251,71 @@ async def list( path_parts=__path_parts, ) + @_rewrite_parameters( + body_name="payload", + ) + @_stability_warning(Stability.EXPERIMENTAL) + async def post_behavioral_analytics_event( + self, + *, + collection_name: str, + event_type: t.Union[str, t.Literal["page_view", "search", "search_click"]], + payload: t.Optional[t.Any] = None, + body: t.Optional[t.Any] = None, + debug: t.Optional[bool] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Create a behavioral analytics collection event. + + ``_ + + :param collection_name: The name of the behavioral analytics collection. + :param event_type: The analytics event type. + :param payload: + :param debug: Whether the response type has to include more details + """ + if collection_name in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'collection_name'") + if event_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'event_type'") + if payload is None and body is None: + raise ValueError( + "Empty value passed for parameters 'payload' and 'body', one of them should be set." + ) + elif payload is not None and body is not None: + raise ValueError("Cannot set both 'payload' and 'body'") + __path_parts: t.Dict[str, str] = { + "collection_name": _quote(collection_name), + "event_type": _quote(event_type), + } + __path = f'/_application/analytics/{__path_parts["collection_name"]}/event/{__path_parts["event_type"]}' + __query: t.Dict[str, t.Any] = {} + if debug is not None: + __query["debug"] = debug + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __body = payload if payload is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="search_application.post_behavioral_analytics_event", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_name="search_application", ) @@ -351,6 +416,70 @@ async def put_behavioral_analytics( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("params",), + ignore_deprecated_options={"params"}, + ) + @_stability_warning(Stability.EXPERIMENTAL) + async def render_query( + self, + *, + name: str, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + params: t.Optional[t.Mapping[str, t.Any]] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Render a search application query. Generate an Elasticsearch query using the + specified query parameters and the search template associated with the search + application or a default template if none is specified. If a parameter used in + the search template is not specified in `params`, the parameter's default value + will be used. The API returns the specific Elasticsearch query that would be + generated and run by calling the search application search API. You must have + `read` privileges on the backing alias of the search application. + + ``_ + + :param name: The name of the search application to render teh query for. + :param params: + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'name'") + __path_parts: t.Dict[str, str] = {"name": _quote(name)} + __path = ( + f'/_application/search_application/{__path_parts["name"]}/_render_query' + ) + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if params is not None: + __body["params"] = params + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="search_application.render_query", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=("params",), ignore_deprecated_options={"params"}, diff --git a/elasticsearch/_async/client/searchable_snapshots.py b/elasticsearch/_async/client/searchable_snapshots.py index 092e29ede..dbef68c7b 100644 --- a/elasticsearch/_async/client/searchable_snapshots.py +++ b/elasticsearch/_async/client/searchable_snapshots.py @@ -44,7 +44,8 @@ async def cache_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieve node-level cache statistics about searchable snapshots. + Get cache statistics. Get statistics about the shared cache for partially mounted + indices. ``_ @@ -103,7 +104,8 @@ async def clear_cache( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear the cache of searchable snapshots. + Clear the cache. Clear indices and data streams from the shared cache for partially + mounted indices. ``_ @@ -175,7 +177,9 @@ async def mount( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Mount a snapshot as a searchable index. + Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use + this API for snapshots managed by index lifecycle management (ILM). Manually + mounting ILM-managed snapshots can interfere with ILM processes. ``_ @@ -255,7 +259,7 @@ async def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieve shard-level statistics about searchable snapshots. + Get searchable snapshot statistics. ``_ diff --git a/elasticsearch/_async/client/security.py b/elasticsearch/_async/client/security.py index 2304eb2cf..816fba31e 100644 --- a/elasticsearch/_async/client/security.py +++ b/elasticsearch/_async/client/security.py @@ -2326,6 +2326,230 @@ async def invalidate_token( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("nonce", "redirect_uri", "state", "realm"), + ) + async def oidc_authenticate( + self, + *, + nonce: t.Optional[str] = None, + redirect_uri: t.Optional[str] = None, + state: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + realm: t.Optional[str] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Authenticate OpenID Connect. Exchange an OpenID Connect authentication response + message for an Elasticsearch internal access token and refresh token that can + be subsequently used for authentication. Elasticsearch exposes all the necessary + OpenID Connect related functionality with the OpenID Connect APIs. These APIs + are used internally by Kibana in order to provide OpenID Connect based authentication, + but can also be used by other, custom web applications or other clients. + + ``_ + + :param nonce: Associate a client session with an ID token and mitigate replay + attacks. This value needs to be the same as the one that was provided to + the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch + and included in the response to that call. + :param redirect_uri: The URL to which the OpenID Connect Provider redirected + the User Agent in response to an authentication request after a successful + authentication. This URL must be provided as-is (URL encoded), taken from + the body of the response or as the value of a location header in the response + from the OpenID Connect Provider. + :param state: Maintain state between the authentication request and the response. + This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` + API or the one that was generated by Elasticsearch and included in the response + to that call. + :param realm: The name of the OpenID Connect realm. This property is useful in + cases where multiple realms are defined. + """ + if nonce is None and body is None: + raise ValueError("Empty value passed for parameter 'nonce'") + if redirect_uri is None and body is None: + raise ValueError("Empty value passed for parameter 'redirect_uri'") + if state is None and body is None: + raise ValueError("Empty value passed for parameter 'state'") + __path_parts: t.Dict[str, str] = {} + __path = "/_security/oidc/authenticate" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if nonce is not None: + __body["nonce"] = nonce + if redirect_uri is not None: + __body["redirect_uri"] = redirect_uri + if state is not None: + __body["state"] = state + if realm is not None: + __body["realm"] = realm + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.oidc_authenticate", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("access_token", "refresh_token"), + ) + async def oidc_logout( + self, + *, + access_token: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + refresh_token: t.Optional[str] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Logout of OpenID Connect. Invalidate an access token and a refresh token that + were generated as a response to the `/_security/oidc/authenticate` API. If the + OpenID Connect authentication realm in Elasticsearch is accordingly configured, + the response to this call will contain a URI pointing to the end session endpoint + of the OpenID Connect Provider in order to perform single logout. Elasticsearch + exposes all the necessary OpenID Connect related functionality with the OpenID + Connect APIs. These APIs are used internally by Kibana in order to provide OpenID + Connect based authentication, but can also be used by other, custom web applications + or other clients. + + ``_ + + :param access_token: The access token to be invalidated. + :param refresh_token: The refresh token to be invalidated. + """ + if access_token is None and body is None: + raise ValueError("Empty value passed for parameter 'access_token'") + __path_parts: t.Dict[str, str] = {} + __path = "/_security/oidc/logout" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if access_token is not None: + __body["access_token"] = access_token + if refresh_token is not None: + __body["refresh_token"] = refresh_token + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.oidc_logout", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("iss", "login_hint", "nonce", "realm", "state"), + ) + async def oidc_prepare_authentication( + self, + *, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + iss: t.Optional[str] = None, + login_hint: t.Optional[str] = None, + nonce: t.Optional[str] = None, + pretty: t.Optional[bool] = None, + realm: t.Optional[str] = None, + state: t.Optional[str] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Prepare OpenID connect authentication. Create an oAuth 2.0 authentication request + as a URL string based on the configuration of the OpenID Connect authentication + realm in Elasticsearch. The response of this API is a URL pointing to the Authorization + Endpoint of the configured OpenID Connect Provider, which can be used to redirect + the browser of the user in order to continue the authentication process. Elasticsearch + exposes all the necessary OpenID Connect related functionality with the OpenID + Connect APIs. These APIs are used internally by Kibana in order to provide OpenID + Connect based authentication, but can also be used by other, custom web applications + or other clients. + + ``_ + + :param iss: In the case of a third party initiated single sign on, this is the + issuer identifier for the OP that the RP is to send the authentication request + to. It cannot be specified when *realm* is specified. One of *realm* or *iss* + is required. + :param login_hint: In the case of a third party initiated single sign on, it + is a string value that is included in the authentication request as the *login_hint* + parameter. This parameter is not valid when *realm* is specified. + :param nonce: The value used to associate a client session with an ID token and + to mitigate replay attacks. If the caller of the API does not provide a value, + Elasticsearch will generate one with sufficient entropy and return it in + the response. + :param realm: The name of the OpenID Connect realm in Elasticsearch the configuration + of which should be used in order to generate the authentication request. + It cannot be specified when *iss* is specified. One of *realm* or *iss* is + required. + :param state: The value used to maintain state between the authentication request + and the response, typically used as a Cross-Site Request Forgery mitigation. + If the caller of the API does not provide a value, Elasticsearch will generate + one with sufficient entropy and return it in the response. + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_security/oidc/prepare" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if iss is not None: + __body["iss"] = iss + if login_hint is not None: + __body["login_hint"] = login_hint + if nonce is not None: + __body["nonce"] = nonce + if realm is not None: + __body["realm"] = realm + if state is not None: + __body["state"] = state + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.oidc_prepare_authentication", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_name="privileges", ) diff --git a/elasticsearch/_async/client/shutdown.py b/elasticsearch/_async/client/shutdown.py index 0301435c9..e4117bff8 100644 --- a/elasticsearch/_async/client/shutdown.py +++ b/elasticsearch/_async/client/shutdown.py @@ -42,8 +42,13 @@ async def delete_node( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes a node from the shutdown list. Designed for indirect use by ECE/ESS and - ECK. Direct use is not supported. + Cancel node shutdown preparations. Remove a node from the shutdown list so it + can resume normal operations. You must explicitly clear the shutdown request + when a node rejoins the cluster or when a node has permanently left the cluster. + Shutdown requests are never removed automatically by Elasticsearch. NOTE: This + feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, + and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator + privileges feature is enabled, you must be an operator to use this API. ``_ @@ -98,8 +103,13 @@ async def get_node( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieve status of a node or nodes that are currently marked as shutting down. - Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + Get the shutdown status. Get information about nodes that are ready to be shut + down, have shut down preparations still in progress, or have stalled. The API + returns status information for each part of the shut down process. NOTE: This + feature is designed for indirect use by Elasticsearch Service, Elastic Cloud + Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If + the operator privileges feature is enabled, you must be an operator to use this + API. ``_ @@ -166,8 +176,17 @@ async def put_node( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. Direct - use is not supported. + Prepare a node to be shut down. NOTE: This feature is designed for indirect use + by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. + Direct use is not supported. If the operator privileges feature is enabled, you + must be an operator to use this API. The API migrates ongoing tasks and index + shards to other nodes as needed to prepare a node to be restarted or shut down + and removed from the cluster. This ensures that Elasticsearch can be stopped + safely with minimal disruption to the cluster. You must specify the type of shutdown: + `restart`, `remove`, or `replace`. If a node is already being prepared for shutdown, + you can use this API to change the shutdown type. IMPORTANT: This API does NOT + terminate the Elasticsearch process. Monitor the node shutdown status to determine + when it is safe to stop Elasticsearch. ``_ diff --git a/elasticsearch/_async/client/slm.py b/elasticsearch/_async/client/slm.py index bbf64654b..180ff26e5 100644 --- a/elasticsearch/_async/client/slm.py +++ b/elasticsearch/_async/client/slm.py @@ -36,7 +36,9 @@ async def delete_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing snapshot lifecycle policy. + Delete a policy. Delete a snapshot lifecycle policy definition. This operation + prevents any future snapshots from being taken but does not cancel in-progress + snapshots or remove previously-taken snapshots. ``_ @@ -76,8 +78,10 @@ async def execute_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Immediately creates a snapshot according to the lifecycle policy, without waiting - for the scheduled time. + Run a policy. Immediately create a snapshot according to the snapshot lifecycle + policy without waiting for the scheduled time. The snapshot policy is normally + applied according to its schedule, but you might want to manually run a policy + before performing an upgrade or other maintenance. ``_ @@ -116,7 +120,9 @@ async def execute_retention( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes any snapshots that are expired according to the policy's retention rules. + Run a retention policy. Manually apply the retention policy to force immediate + removal of snapshots that are expired according to the snapshot lifecycle policy + retention rules. The retention policy is normally applied according to its schedule. ``_ """ @@ -152,8 +158,8 @@ async def get_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves one or more snapshot lifecycle policy definitions and information about - the latest snapshot attempts. + Get policy information. Get snapshot lifecycle policy definitions and information + about the latest snapshot attempts. ``_ @@ -195,8 +201,8 @@ async def get_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns global and policy-level statistics about actions taken by snapshot lifecycle - management. + Get snapshot lifecycle management statistics. Get global and policy-level statistics + about actions taken by snapshot lifecycle management. ``_ """ @@ -231,7 +237,7 @@ async def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the status of snapshot lifecycle management (SLM). + Get the snapshot lifecycle management status. ``_ """ @@ -277,12 +283,14 @@ async def put_lifecycle( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a snapshot lifecycle policy. + Create or update a policy. Create or update a snapshot lifecycle policy. If the + policy already exists, this request increments the policy version. Only the latest + version of a policy is stored. ``_ - :param policy_id: ID for the snapshot lifecycle policy you want to create or - update. + :param policy_id: The identifier for the snapshot lifecycle policy you want to + create or update. :param config: Configuration for each snapshot created by the policy. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -354,7 +362,9 @@ async def start( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Turns on snapshot lifecycle management (SLM). + Start snapshot lifecycle management. Snapshot lifecycle management (SLM) starts + automatically when a cluster is formed. Manually starting SLM is necessary only + if it has been stopped using the stop SLM API. ``_ """ @@ -389,7 +399,15 @@ async def stop( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Turns off snapshot lifecycle management (SLM). + Stop snapshot lifecycle management. Stop all snapshot lifecycle management (SLM) + operations and the SLM plugin. This API is useful when you are performing maintenance + on a cluster and need to prevent SLM from performing any actions on your data + streams or indices. Stopping SLM does not stop any snapshots that are in progress. + You can manually trigger snapshots with the run snapshot lifecycle policy API + even if SLM is stopped. The API returns a response as soon as the request is + acknowledged, but the plugin might continue to run until in-progress operations + complete and it can be safely stopped. Use the get snapshot lifecycle management + status API to see if SLM is running. ``_ """ diff --git a/elasticsearch/_async/client/snapshot.py b/elasticsearch/_async/client/snapshot.py index b9dfdc634..15e443077 100644 --- a/elasticsearch/_async/client/snapshot.py +++ b/elasticsearch/_async/client/snapshot.py @@ -44,8 +44,8 @@ async def cleanup_repository( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Triggers the review of a snapshot repository’s contents and deletes any stale - data not referenced by existing snapshots. + Clean up the snapshot repository. Trigger the review of the contents of a snapshot + repository and delete any stale data not referenced by existing snapshots. ``_ @@ -99,7 +99,8 @@ async def clone( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clones indices from one snapshot into another snapshot in the same repository. + Clone a snapshot. Clone part of all of a snapshot into another snapshot in the + same repository. ``_ @@ -182,7 +183,7 @@ async def create( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a snapshot in a repository. + Create a snapshot. Take a snapshot of a cluster or of data streams and indices. ``_ @@ -286,7 +287,11 @@ async def create_repository( verify: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a repository. + Create or update a snapshot repository. IMPORTANT: If you are migrating searchable + snapshots, the repository name must be identical in the source and destination + clusters. To register a snapshot repository, the cluster's global metadata must + be writeable. Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` + and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. ``_ @@ -346,7 +351,7 @@ async def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes one or more snapshots. + Delete snapshots. ``_ @@ -397,7 +402,9 @@ async def delete_repository( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a repository. + Delete snapshot repositories. When a repository is unregistered, Elasticsearch + removes only the reference to the location where the repository is storing the + snapshots. The snapshots themselves are left untouched and in place. ``_ @@ -471,7 +478,7 @@ async def get( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about a snapshot. + Get snapshot information. ``_ @@ -583,7 +590,7 @@ async def get_repository( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about a repository. + Get snapshot repository information. ``_ @@ -642,7 +649,40 @@ async def repository_verify_integrity( verify_blob_contents: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Verifies the integrity of the contents of a snapshot repository + Verify the repository integrity. Verify the integrity of the contents of a snapshot + repository. This API enables you to perform a comprehensive check of the contents + of a repository, looking for any anomalies in its data or metadata which might + prevent you from restoring snapshots from the repository or which might cause + future snapshot create or delete operations to fail. If you suspect the integrity + of the contents of one of your snapshot repositories, cease all write activity + to this repository immediately, set its `read_only` option to `true`, and use + this API to verify its integrity. Until you do so: * It may not be possible to + restore some snapshots from this repository. * Searchable snapshots may report + errors when searched or may have unassigned shards. * Taking snapshots into this + repository may fail or may appear to succeed but have created a snapshot which + cannot be restored. * Deleting snapshots from this repository may fail or may + appear to succeed but leave the underlying data on disk. * Continuing to write + to the repository while it is in an invalid state may causing additional damage + to its contents. If the API finds any problems with the integrity of the contents + of your repository, Elasticsearch will not be able to repair the damage. The + only way to bring the repository back into a fully working state after its contents + have been damaged is by restoring its contents from a repository backup which + was taken before the damage occurred. You must also identify what caused the + damage and take action to prevent it from happening again. If you cannot restore + a repository backup, register a new repository and use this for all future snapshot + operations. In some cases it may be possible to recover some of the contents + of a damaged repository, either by restoring as many of its snapshots as needed + and taking new snapshots of the restored data, or by using the reindex API to + copy data from any searchable snapshots mounted from the damaged repository. + Avoid all operations which write to the repository while the verify repository + integrity API is running. If something changes the repository contents while + an integrity verification is running then Elasticsearch may incorrectly report + having detected some anomalies in its contents due to the concurrent writes. + It may also incorrectly fail to report some anomalies that the concurrent writes + prevented it from detecting. NOTE: This API is intended for exploratory use by + humans. You should expect the request parameters and the response format to vary + in future versions. NOTE: This API may not work correctly in a mixed-version + cluster. ``_ @@ -739,7 +779,20 @@ async def restore( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Restores a snapshot. + Restore a snapshot. Restore a snapshot of a cluster or data streams and indices. + You can restore a snapshot only to a running cluster with an elected master node. + The snapshot repository must be registered and available to the cluster. The + snapshot and cluster versions must be compatible. To restore a snapshot, the + cluster's global metadata must be writable. Ensure there are't any cluster blocks + that prevent writes. The restore operation ignores index blocks. Before you restore + a data stream, ensure the cluster contains a matching index template with data + streams enabled. To check, use the index management feature in Kibana or the + get index template API: ``` GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream + ``` If no such template exists, you can create one or restore a cluster state + that contains one. Without a matching index template, a data stream can't roll + over or create backing indices. If your snapshot contains data from App Search + or Workplace Search, you must restore the Enterprise Search encryption key before + you restore the snapshot. ``_ @@ -832,7 +885,18 @@ async def status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about the status of a snapshot. + Get the snapshot status. Get a detailed description of the current state for + each shard participating in the snapshot. Note that this API should be used only + to obtain detailed shard-level information for ongoing snapshots. If this detail + is not needed or you want to obtain information about one or more existing snapshots, + use the get snapshot API. WARNING: Using the API to return the status of any + snapshots other than currently running snapshots can be expensive. The API requires + a read from the repository for each shard in each snapshot. For example, if you + have 100 snapshots with 1,000 shards each, an API request that includes all snapshots + will require 100,000 reads (100 snapshots x 1,000 shards). Depending on the latency + of your storage, such requests can take an extremely long time to return results. + These requests can also tax machine resources and, when using cloud storage, + incur high processing costs. ``_ @@ -891,7 +955,8 @@ async def verify_repository( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Verifies a repository. + Verify a snapshot repository. Check for common misconfigurations in a snapshot + repository. ``_ diff --git a/elasticsearch/_async/client/tasks.py b/elasticsearch/_async/client/tasks.py index ffeb14f40..1c32896b4 100644 --- a/elasticsearch/_async/client/tasks.py +++ b/elasticsearch/_async/client/tasks.py @@ -47,7 +47,17 @@ async def cancel( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Cancels a task, if it can be cancelled through an API. + Cancel a task. A task may continue to run for some time after it has been cancelled + because it may not be able to safely stop its current activity straight away. + It is also possible that Elasticsearch must complete its work on other tasks + before it can process the cancellation. The get task information API will continue + to list these cancelled tasks until they complete. The cancelled flag in the + response indicates that the cancellation command has been processed and the task + will stop as soon as possible. To troubleshoot why a cancelled task does not + complete promptly, use the get task information API with the `?detailed` parameter + to identify the other tasks the system is running. You can also use the node + hot threads API to obtain detailed information about the work the system is doing + instead of completing the cancelled task. ``_ @@ -107,8 +117,7 @@ async def get( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get task information. Returns information about the tasks currently executing - in the cluster. + Get task information. Get information about a task currently running in the cluster. ``_ @@ -166,15 +175,16 @@ async def list( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The task management API returns information about tasks currently executing on - one or more nodes in the cluster. + Get all tasks. Get information about the tasks currently running on one or more + nodes in the cluster. ``_ :param actions: Comma-separated list or wildcard expression of actions used to limit the request. :param detailed: If `true`, the response includes detailed information about - shard recoveries. + shard recoveries. This information is useful to distinguish tasks from each + other but is more costly to run. :param group_by: Key used to group tasks in the response. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and diff --git a/elasticsearch/_async/client/text_structure.py b/elasticsearch/_async/client/text_structure.py index 406592946..3e537da41 100644 --- a/elasticsearch/_async/client/text_structure.py +++ b/elasticsearch/_async/client/text_structure.py @@ -25,6 +25,349 @@ class TextStructureClient(NamespacedClient): + @_rewrite_parameters() + async def find_field_structure( + self, + *, + field: str, + index: str, + column_names: t.Optional[str] = None, + delimiter: t.Optional[str] = None, + documents_to_sample: t.Optional[int] = None, + ecs_compatibility: t.Optional[t.Union[str, t.Literal["disabled", "v1"]]] = None, + error_trace: t.Optional[bool] = None, + explain: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + format: t.Optional[ + t.Union[ + str, t.Literal["delimited", "ndjson", "semi_structured_text", "xml"] + ] + ] = None, + grok_pattern: t.Optional[str] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + quote: t.Optional[str] = None, + should_trim_fields: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + timestamp_field: t.Optional[str] = None, + timestamp_format: t.Optional[str] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Find the structure of a text field. Find the structure of a text field in an + Elasticsearch index. + + ``_ + + :param field: The field that should be analyzed. + :param index: The name of the index that contains the analyzed field. + :param column_names: If `format` is set to `delimited`, you can specify the column + names in a comma-separated list. If this parameter is not specified, the + structure finder uses the column names from the header row of the text. If + the text does not have a header row, columns are named "column1", "column2", + "column3", for example. + :param delimiter: If you have set `format` to `delimited`, you can specify the + character used to delimit the values in each row. Only a single character + is supported; the delimiter cannot have multiple characters. By default, + the API considers the following possibilities: comma, tab, semi-colon, and + pipe (`|`). In this default scenario, all rows must have the same number + of fields for the delimited format to be detected. If you specify a delimiter, + up to 10% of the rows can have a different number of columns than the first + row. + :param documents_to_sample: The number of documents to include in the structural + analysis. The minimum value is 2. + :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns. + Use this parameter to specify whether to use ECS Grok patterns instead of + legacy ones when the structure finder creates a Grok pattern. This setting + primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` + matches the input. If the structure finder identifies a common structure + but has no idea of the meaning then generic field names such as `path`, `ipaddress`, + `field1`, and `field2` are used in the `grok_pattern` output. The intention + in that situation is that a user who knows the meanings will rename the fields + before using them. + :param explain: If true, the response includes a field named `explanation`, which + is an array of strings that indicate how the structure finder produced its + result. + :param format: The high level structure of the text. By default, the API chooses + the format. In this default scenario, all rows must have the same number + of fields for a delimited format to be detected. If the format is set to + delimited and the delimiter is not set, however, the API tolerates up to + 5% of rows that have a different number of columns than the first row. + :param grok_pattern: If the format is `semi_structured_text`, you can specify + a Grok pattern that is used to extract fields from every message in the text. + The name of the timestamp field in the Grok pattern must match what is specified + in the `timestamp_field` parameter. If that parameter is not specified, the + name of the timestamp field in the Grok pattern must match "timestamp". If + `grok_pattern` is not specified, the structure finder creates a Grok pattern. + :param quote: If the format is `delimited`, you can specify the character used + to quote the values in each row if they contain newlines or the delimiter + character. Only a single character is supported. If this parameter is not + specified, the default value is a double quote (`"`). If your delimited text + format does not use quoting, a workaround is to set this argument to a character + that does not appear anywhere in the sample. + :param should_trim_fields: If the format is `delimited`, you can specify whether + values between delimiters should have whitespace trimmed from them. If this + parameter is not specified and the delimiter is pipe (`|`), the default value + is true. Otherwise, the default value is false. + :param timeout: The maximum amount of time that the structure analysis can take. + If the analysis is still running when the timeout expires, it will be stopped. + :param timestamp_field: The name of the field that contains the primary timestamp + of each record in the text. In particular, if the text was ingested into + an index, this is the field that would be used to populate the `@timestamp` + field. If the format is `semi_structured_text`, this field must match the + name of the appropriate extraction in the `grok_pattern`. Therefore, for + semi-structured text, it is best not to specify this parameter unless `grok_pattern` + is also specified. For structured text, if you specify this parameter, the + field must exist within the text. If this parameter is not specified, the + structure finder makes a decision about which field (if any) is the primary + timestamp field. For structured text, it is not compulsory to have a timestamp + in the text. + :param timestamp_format: The Java time format of the timestamp field in the text. + Only a subset of Java time format letter groups are supported: * `a` * `d` + * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` + * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter + groups (fractional seconds) of length one to nine are supported providing + they occur after `ss` and are separated from the `ss` by a period (`.`), + comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with + the exception a question mark (`?`), newline, and carriage return, together + with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS + 'in' yyyy` is a valid override format. One valuable use case for this parameter + is when the format is semi-structured text, there are multiple timestamp + formats in the text, and you know which format corresponds to the primary + timestamp, but you do not want to specify the full `grok_pattern`. Another + is when the timestamp format is one that the structure finder does not consider + by default. If this parameter is not specified, the structure finder chooses + the best format from a built-in set. If the special value `null` is specified, + the structure finder will not look for a primary timestamp in the text. When + the format is semi-structured text, this will result in the structure finder + treating the text as single-line messages. + """ + if field is None: + raise ValueError("Empty value passed for parameter 'field'") + if index is None: + raise ValueError("Empty value passed for parameter 'index'") + __path_parts: t.Dict[str, str] = {} + __path = "/_text_structure/find_field_structure" + __query: t.Dict[str, t.Any] = {} + if field is not None: + __query["field"] = field + if index is not None: + __query["index"] = index + if column_names is not None: + __query["column_names"] = column_names + if delimiter is not None: + __query["delimiter"] = delimiter + if documents_to_sample is not None: + __query["documents_to_sample"] = documents_to_sample + if ecs_compatibility is not None: + __query["ecs_compatibility"] = ecs_compatibility + if error_trace is not None: + __query["error_trace"] = error_trace + if explain is not None: + __query["explain"] = explain + if filter_path is not None: + __query["filter_path"] = filter_path + if format is not None: + __query["format"] = format + if grok_pattern is not None: + __query["grok_pattern"] = grok_pattern + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if quote is not None: + __query["quote"] = quote + if should_trim_fields is not None: + __query["should_trim_fields"] = should_trim_fields + if timeout is not None: + __query["timeout"] = timeout + if timestamp_field is not None: + __query["timestamp_field"] = timestamp_field + if timestamp_format is not None: + __query["timestamp_format"] = timestamp_format + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="text_structure.find_field_structure", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("messages",), + ) + async def find_message_structure( + self, + *, + messages: t.Optional[t.Sequence[str]] = None, + column_names: t.Optional[str] = None, + delimiter: t.Optional[str] = None, + ecs_compatibility: t.Optional[t.Union[str, t.Literal["disabled", "v1"]]] = None, + error_trace: t.Optional[bool] = None, + explain: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + format: t.Optional[ + t.Union[ + str, t.Literal["delimited", "ndjson", "semi_structured_text", "xml"] + ] + ] = None, + grok_pattern: t.Optional[str] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + quote: t.Optional[str] = None, + should_trim_fields: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + timestamp_field: t.Optional[str] = None, + timestamp_format: t.Optional[str] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Find the structure of text messages. Find the structure of a list of text messages. + The messages must contain data that is suitable to be ingested into Elasticsearch. + This API provides a starting point for ingesting data into Elasticsearch in a + format that is suitable for subsequent use with other Elastic Stack functionality. + Use this API rather than the find text structure API if your input text has already + been split up into separate messages by some other process. The response from + the API contains: * Sample messages. * Statistics that reveal the most common + values for all fields detected within the text and basic numeric statistics for + numeric fields. * Information about the structure of the text, which is useful + when you write ingest configurations to index it or similarly formatted text. + Appropriate mappings for an Elasticsearch index, which you could use to ingest + the text. All this information can be calculated by the structure finder with + no guidance. However, you can optionally override some of the decisions about + the text structure by specifying one or more query parameters. + + ``_ + + :param messages: The list of messages you want to analyze. + :param column_names: If the format is `delimited`, you can specify the column + names in a comma-separated list. If this parameter is not specified, the + structure finder uses the column names from the header row of the text. If + the text does not have a header role, columns are named "column1", "column2", + "column3", for example. + :param delimiter: If you the format is `delimited`, you can specify the character + used to delimit the values in each row. Only a single character is supported; + the delimiter cannot have multiple characters. By default, the API considers + the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this + default scenario, all rows must have the same number of fields for the delimited + format to be detected. If you specify a delimiter, up to 10% of the rows + can have a different number of columns than the first row. + :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns. + Use this parameter to specify whether to use ECS Grok patterns instead of + legacy ones when the structure finder creates a Grok pattern. This setting + primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` + matches the input. If the structure finder identifies a common structure + but has no idea of meaning then generic field names such as `path`, `ipaddress`, + `field1`, and `field2` are used in the `grok_pattern` output, with the intention + that a user who knows the meanings rename these fields before using it. + :param explain: If this parameter is set to true, the response includes a field + named `explanation`, which is an array of strings that indicate how the structure + finder produced its result. + :param format: The high level structure of the text. By default, the API chooses + the format. In this default scenario, all rows must have the same number + of fields for a delimited format to be detected. If the format is `delimited` + and the delimiter is not set, however, the API tolerates up to 5% of rows + that have a different number of columns than the first row. + :param grok_pattern: If the format is `semi_structured_text`, you can specify + a Grok pattern that is used to extract fields from every message in the text. + The name of the timestamp field in the Grok pattern must match what is specified + in the `timestamp_field` parameter. If that parameter is not specified, the + name of the timestamp field in the Grok pattern must match "timestamp". If + `grok_pattern` is not specified, the structure finder creates a Grok pattern. + :param quote: If the format is `delimited`, you can specify the character used + to quote the values in each row if they contain newlines or the delimiter + character. Only a single character is supported. If this parameter is not + specified, the default value is a double quote (`"`). If your delimited text + format does not use quoting, a workaround is to set this argument to a character + that does not appear anywhere in the sample. + :param should_trim_fields: If the format is `delimited`, you can specify whether + values between delimiters should have whitespace trimmed from them. If this + parameter is not specified and the delimiter is pipe (`|`), the default value + is true. Otherwise, the default value is false. + :param timeout: The maximum amount of time that the structure analysis can take. + If the analysis is still running when the timeout expires, it will be stopped. + :param timestamp_field: The name of the field that contains the primary timestamp + of each record in the text. In particular, if the text was ingested into + an index, this is the field that would be used to populate the `@timestamp` + field. If the format is `semi_structured_text`, this field must match the + name of the appropriate extraction in the `grok_pattern`. Therefore, for + semi-structured text, it is best not to specify this parameter unless `grok_pattern` + is also specified. For structured text, if you specify this parameter, the + field must exist within the text. If this parameter is not specified, the + structure finder makes a decision about which field (if any) is the primary + timestamp field. For structured text, it is not compulsory to have a timestamp + in the text. + :param timestamp_format: The Java time format of the timestamp field in the text. + Only a subset of Java time format letter groups are supported: * `a` * `d` + * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` + * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter + groups (fractional seconds) of length one to nine are supported providing + they occur after `ss` and are separated from the `ss` by a period (`.`), + comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with + the exception a question mark (`?`), newline, and carriage return, together + with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS + 'in' yyyy` is a valid override format. One valuable use case for this parameter + is when the format is semi-structured text, there are multiple timestamp + formats in the text, and you know which format corresponds to the primary + timestamp, but you do not want to specify the full `grok_pattern`. Another + is when the timestamp format is one that the structure finder does not consider + by default. If this parameter is not specified, the structure finder chooses + the best format from a built-in set. If the special value `null` is specified, + the structure finder will not look for a primary timestamp in the text. When + the format is semi-structured text, this will result in the structure finder + treating the text as single-line messages. + """ + if messages is None and body is None: + raise ValueError("Empty value passed for parameter 'messages'") + __path_parts: t.Dict[str, str] = {} + __path = "/_text_structure/find_message_structure" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if column_names is not None: + __query["column_names"] = column_names + if delimiter is not None: + __query["delimiter"] = delimiter + if ecs_compatibility is not None: + __query["ecs_compatibility"] = ecs_compatibility + if error_trace is not None: + __query["error_trace"] = error_trace + if explain is not None: + __query["explain"] = explain + if filter_path is not None: + __query["filter_path"] = filter_path + if format is not None: + __query["format"] = format + if grok_pattern is not None: + __query["grok_pattern"] = grok_pattern + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if quote is not None: + __query["quote"] = quote + if should_trim_fields is not None: + __query["should_trim_fields"] = should_trim_fields + if timeout is not None: + __query["timeout"] = timeout + if timestamp_field is not None: + __query["timestamp_field"] = timestamp_field + if timestamp_format is not None: + __query["timestamp_format"] = timestamp_format + if not __body: + if messages is not None: + __body["messages"] = messages + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="text_structure.find_message_structure", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_name="text_files", ) @@ -50,8 +393,22 @@ async def find_structure( timestamp_format: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Finds the structure of a text file. The text file must contain data that is suitable - to be ingested into Elasticsearch. + Find the structure of a text file. The text file must contain data that is suitable + to be ingested into Elasticsearch. This API provides a starting point for ingesting + data into Elasticsearch in a format that is suitable for subsequent use with + other Elastic Stack functionality. Unlike other Elasticsearch endpoints, the + data that is posted to this endpoint does not need to be UTF-8 encoded and in + JSON format. It must, however, be text; binary text formats are not currently + supported. The size is limited to the Elasticsearch HTTP receive buffer size, + which defaults to 100 Mb. The response from the API contains: * A couple of messages + from the beginning of the text. * Statistics that reveal the most common values + for all fields detected within the text and basic numeric statistics for numeric + fields. * Information about the structure of the text, which is useful when you + write ingest configurations to index it or similarly formatted text. * Appropriate + mappings for an Elasticsearch index, which you could use to ingest the text. + All this information can be calculated by the structure finder with no guidance. + However, you can optionally override some of the decisions about the text structure + by specifying one or more query parameters. ``_ @@ -64,7 +421,7 @@ async def find_structure( column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", - "column3", etc. + "column3", for example. :param delimiter: If you have set format to delimited, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers @@ -76,7 +433,9 @@ async def find_structure( (disabled or v1, default: disabled). :param explain: If this parameter is set to true, the response includes a field named explanation, which is an array of strings that indicate how the structure - finder produced its result. + finder produced its result. If the structure finder produces unexpected results + for some text, use this query parameter to help you determine why the returned + structure was chosen. :param format: The high level structure of the text. Valid values are ndjson, xml, delimited, and semi_structured_text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields @@ -114,9 +473,9 @@ async def find_structure( whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (|), the default value is true. Otherwise, the default value is false. - :param timeout: Sets the maximum amount of time that the structure analysis make + :param timeout: Sets the maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires then it will - be aborted. + be stopped. :param timestamp_field: Optional parameter to specify the timestamp field in the file :param timestamp_format: The Java time format of the timestamp field in the text. @@ -191,7 +550,9 @@ async def test_grok_pattern( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Tests a Grok pattern on some text. + Test a Grok pattern. Test a Grok pattern on one or more lines of text. The API + indicates whether the lines match the pattern together with the offsets and lengths + of the matched substrings. ``_ diff --git a/elasticsearch/_async/client/transform.py b/elasticsearch/_async/client/transform.py index fb12e6a04..5482ad4c1 100644 --- a/elasticsearch/_async/client/transform.py +++ b/elasticsearch/_async/client/transform.py @@ -844,13 +844,20 @@ async def upgrade_transforms( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Upgrades all transforms. This API identifies transforms that have a legacy configuration + Upgrade all transforms. Transforms are compatible across minor versions and between + supported major versions. However, over time, the format of transform configuration + information may change. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not affect the source and destination indices. The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains - unchanged. + unchanged. If a transform upgrade step fails, the upgrade stops and an error + is returned about the underlying issue. Resolve the issue then re-run the process + again. A summary is returned when the upgrade is finished. To ensure continuous + transforms remain running during a major version upgrade of the cluster – for + example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading + the cluster. You may want to perform a recent cluster backup prior to the upgrade. ``_ diff --git a/elasticsearch/_async/client/watcher.py b/elasticsearch/_async/client/watcher.py index 7b63b0cac..fa92b9f68 100644 --- a/elasticsearch/_async/client/watcher.py +++ b/elasticsearch/_async/client/watcher.py @@ -37,7 +37,11 @@ async def ack_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Acknowledges a watch, manually throttling the execution of the watch's actions. + Acknowledge a watch. Acknowledging a watch enables you to manually throttle the + execution of the watch's actions. The acknowledgement state of an action is stored + in the `status.actions..ack.state` structure. IMPORTANT: If the specified + watch is currently being executed, this API will return an error The reason for + this behavior is to prevent overwriting the watch status from a watch execution. ``_ @@ -88,7 +92,7 @@ async def activate_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Activates a currently inactive watch. + Activate a watch. A watch can be either active or inactive. ``_ @@ -128,7 +132,7 @@ async def deactivate_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deactivates a currently active watch. + Deactivate a watch. A watch can be either active or inactive. ``_ @@ -168,7 +172,13 @@ async def delete_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes a watch from Watcher. + Delete a watch. When the watch is removed, the document representing the watch + in the `.watches` index is gone and it will never be run again. Deleting a watch + does not delete any watch execution records related to this watch from the watch + history. IMPORTANT: Deleting a watch must be done by using only this API. Do + not delete the watch directly from the `.watches` index using the Elasticsearch + delete document API When Elasticsearch security features are enabled, make sure + no write privileges are granted to anyone for the `.watches` index. ``_ @@ -237,13 +247,15 @@ async def execute_watch( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - This API can be used to force execution of the watch outside of its triggering - logic or to simulate the watch execution for debugging purposes. For testing - and debugging purposes, you also have fine-grained control on how the watch runs. - You can execute the watch without executing all of its actions or alternatively + Run a watch. This API can be used to force execution of the watch outside of + its triggering logic or to simulate the watch execution for debugging purposes. + For testing and debugging purposes, you also have fine-grained control on how + the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after - execution. + it runs. You can use the run watch API to run watches that are not yet registered + by specifying the watch definition inline. This serves as great tool for testing + and debugging your watches prior to adding them to Watcher. ``_ @@ -326,7 +338,7 @@ async def get_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a watch by its ID. + Get a watch. ``_ @@ -388,7 +400,17 @@ async def put_watch( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new watch, or updates an existing one. + Create or update a watch. When a watch is registered, a new document that represents + the watch is added to the `.watches` index and its trigger is immediately registered + with the relevant trigger engine. Typically for the `schedule` trigger, the scheduler + is the trigger engine. IMPORTANT: You must use Kibana or this API to create a + watch. Do not add a watch directly to the `.watches` index by using the Elasticsearch + index API. If Elasticsearch security features are enabled, do not give users + write privileges on the `.watches` index. When you add a watch you can also define + its initial active state by setting the *active* parameter. When Elasticsearch + security features are enabled, your watch can index or search only on indices + for which the user that stored the watch has privileges. If the user is able + to read index `a`, but not index `b`, the same will apply when the watch runs. ``_ @@ -485,7 +507,8 @@ async def query_watches( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves stored watches. + Query watches. Get all registered watches in a paginated manner and optionally + filter watches by a query. ``_ @@ -555,7 +578,7 @@ async def start( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts Watcher if it is not already running. + Start the watch service. Start the Watcher service if it is not already running. ``_ """ @@ -612,7 +635,7 @@ async def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the current Watcher metrics. + Get Watcher statistics. ``_ @@ -658,7 +681,7 @@ async def stop( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops Watcher if it is running. + Stop the watch service. Stop the Watcher service if it is running. ``_ """ diff --git a/elasticsearch/_async/client/xpack.py b/elasticsearch/_async/client/xpack.py index 08ca0f37d..b7de2e83f 100644 --- a/elasticsearch/_async/client/xpack.py +++ b/elasticsearch/_async/client/xpack.py @@ -43,7 +43,10 @@ async def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides general information about the installed X-Pack features. + Get information. The information provided by the API includes: * Build information + including the build number and timestamp. * License information about the currently + installed license. * Feature information for the features that are currently + enabled and available under the current license. ``_ @@ -87,8 +90,9 @@ async def usage( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - This API provides information about which features are currently enabled and - available under the current license and some usage statistics. + Get usage information. Get information about the features that are currently + enabled and available under the current license. The API also provides some usage + statistics. ``_ diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index 12d6f3fc5..bebd41cd8 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -624,12 +624,14 @@ def bulk( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + list_executed_pipelines: t.Optional[bool] = None, pipeline: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, require_alias: t.Optional[bool] = None, + require_data_stream: t.Optional[bool] = None, routing: t.Optional[str] = None, source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, @@ -649,6 +651,8 @@ def bulk( :param operations: :param index: Name of the data stream, index, or index alias to perform bulk actions on. + :param list_executed_pipelines: If `true`, the response will include the ingest + pipelines that were executed for each index or create. :param pipeline: ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final @@ -659,6 +663,8 @@ def bulk( make this operation visible to search, if `false` do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. :param require_alias: If `true`, the request’s actions must target an index alias. + :param require_data_stream: If `true`, the request's actions must target a data + stream (existing or to-be-created). :param routing: Custom value used to route operations to a specific shard. :param source: `true` or `false` to return the `_source` field or not, or a list of fields to return. @@ -692,6 +698,8 @@ def bulk( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if list_executed_pipelines is not None: + __query["list_executed_pipelines"] = list_executed_pipelines if pipeline is not None: __query["pipeline"] = pipeline if pretty is not None: @@ -700,6 +708,8 @@ def bulk( __query["refresh"] = refresh if require_alias is not None: __query["require_alias"] = require_alias + if require_data_stream is not None: + __query["require_data_stream"] = require_data_stream if routing is not None: __query["routing"] = routing if source is not None: diff --git a/elasticsearch/_sync/client/cat.py b/elasticsearch/_sync/client/cat.py index c3ddf4dc6..082a4105d 100644 --- a/elasticsearch/_sync/client/cat.py +++ b/elasticsearch/_sync/client/cat.py @@ -308,8 +308,6 @@ def count( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, @@ -332,11 +330,6 @@ def count( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -362,10 +355,6 @@ def count( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -396,8 +385,6 @@ def fielddata( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, @@ -418,11 +405,6 @@ def fielddata( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -450,10 +432,6 @@ def fielddata( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -480,8 +458,6 @@ def health( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, time: t.Optional[ @@ -510,11 +486,6 @@ def health( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -537,10 +508,6 @@ def health( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -562,66 +529,15 @@ def health( ) @_rewrite_parameters() - def help( - self, - *, - error_trace: t.Optional[bool] = None, - filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, - format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, - help: t.Optional[bool] = None, - human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - pretty: t.Optional[bool] = None, - s: t.Optional[t.Union[str, t.Sequence[str]]] = None, - v: t.Optional[bool] = None, - ) -> TextApiResponse: + def help(self) -> TextApiResponse: """ Get CAT help. Returns help for the CAT APIs. ``_ - - :param format: Specifies the format to return the columnar data in, can be set - to `text`, `json`, `cbor`, `yaml`, or `smile`. - :param h: List of columns to appear in the response. Supports simple wildcards. - :param help: When set to `true` will output available columns. This option can't - be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. - :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat" __query: t.Dict[str, t.Any] = {} - if error_trace is not None: - __query["error_trace"] = error_trace - if filter_path is not None: - __query["filter_path"] = filter_path - if format is not None: - __query["format"] = format - if h is not None: - __query["h"] = h - if help is not None: - __query["help"] = help - if human is not None: - __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout - if pretty is not None: - __query["pretty"] = pretty - if s is not None: - __query["s"] = s - if v is not None: - __query["v"] = v __headers = {"accept": "text/plain"} return self.perform_request( # type: ignore[return-value] "GET", @@ -656,7 +572,6 @@ def indices( help: t.Optional[bool] = None, human: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, - local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, pri: t.Optional[bool] = None, @@ -694,10 +609,6 @@ def indices( be combined with any other query string option. :param include_unloaded_segments: If true, the response includes information from segments that are not loaded into memory. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param pri: If true, the response only includes information from primary shards. :param s: List of columns that determine how the table should be sorted. Sorting @@ -734,8 +645,6 @@ def indices( __query["human"] = human if include_unloaded_segments is not None: __query["include_unloaded_segments"] = include_unloaded_segments - if local is not None: - __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: @@ -894,8 +803,6 @@ def ml_data_frame_analytics( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -945,7 +852,9 @@ def ml_data_frame_analytics( ], ] ] = None, - time: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -966,11 +875,6 @@ def ml_data_frame_analytics( :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: Comma-separated list of column names or column aliases used to sort the response. :param time: Unit used to display time values. @@ -1000,10 +904,6 @@ def ml_data_frame_analytics( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -1073,8 +973,6 @@ def ml_datafeeds( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -1145,11 +1043,6 @@ def ml_datafeeds( :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: Comma-separated list of column names or column aliases used to sort the response. :param time: The unit used to display time values. @@ -1177,10 +1070,6 @@ def ml_datafeeds( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -1349,8 +1238,6 @@ def ml_jobs( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -1518,11 +1405,6 @@ def ml_jobs( :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: Comma-separated list of column names or column aliases used to sort the response. :param time: The unit used to display time values. @@ -1552,10 +1434,6 @@ def ml_jobs( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -1635,8 +1513,6 @@ def ml_trained_models( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -1683,6 +1559,9 @@ def ml_trained_models( ] ] = None, size: t.Optional[int] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -1708,14 +1587,10 @@ def ml_trained_models( :param h: A comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: A comma-separated list of column names or aliases used to sort the response. :param size: The maximum number of transforms to display. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -1744,16 +1619,14 @@ def ml_trained_models( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if size is not None: __query["size"] = size + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -1855,10 +1728,12 @@ def nodes( help: t.Optional[bool] = None, human: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, - local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -1879,14 +1754,11 @@ def nodes( be combined with any other query string option. :param include_unloaded_segments: If true, the response includes information from segments that are not loaded into memory. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} @@ -1910,14 +1782,14 @@ def nodes( __query["human"] = human if include_unloaded_segments is not None: __query["include_unloaded_segments"] = include_unloaded_segments - if local is not None: - __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -1944,6 +1816,9 @@ def pending_tasks( master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -1967,6 +1842,7 @@ def pending_tasks( :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} @@ -1992,6 +1868,8 @@ def pending_tasks( __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -2014,6 +1892,7 @@ def plugins( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, + include_bootstrap: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, @@ -2033,6 +1912,7 @@ def plugins( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. + :param include_bootstrap: Include bootstrap plugins in the response :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating @@ -2058,6 +1938,8 @@ def plugins( __query["help"] = help if human is not None: __query["human"] = human + if include_bootstrap is not None: + __query["include_bootstrap"] = include_bootstrap if local is not None: __query["local"] = local if master_timeout is not None: @@ -2094,10 +1976,11 @@ def recovery( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -2124,14 +2007,10 @@ def recovery( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2160,14 +2039,12 @@ def recovery( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -2358,10 +2235,12 @@ def shards( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -2381,14 +2260,11 @@ def shards( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2413,14 +2289,14 @@ def shards( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -2445,10 +2321,12 @@ def snapshots( help: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, - local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ @@ -2470,14 +2348,11 @@ def snapshots( be combined with any other query string option. :param ignore_unavailable: If `true`, the response does not include information from unavailable snapshots. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2502,14 +2377,14 @@ def snapshots( __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable - if local is not None: - __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} @@ -2535,13 +2410,16 @@ def tasks( h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - node_id: t.Optional[t.Sequence[str]] = None, + nodes: t.Optional[t.Sequence[str]] = None, parent_task_id: t.Optional[str] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, + time: t.Optional[ + t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] + ] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, v: t.Optional[bool] = None, + wait_for_completion: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ Returns information about tasks currently executing in the cluster. IMPORTANT: @@ -2559,18 +2437,18 @@ def tasks( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. - :param node_id: Unique node identifiers, which are used to limit the response. + :param nodes: Unique node identifiers, which are used to limit the response. :param parent_task_id: The parent task identifier, which is used to limit the response. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. + :param time: Unit used to display time values. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. :param v: When set to `true` will enable verbose output. + :param wait_for_completion: If `true`, the request blocks until the task has + completed. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat/tasks" @@ -2591,20 +2469,22 @@ def tasks( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout - if node_id is not None: - __query["node_id"] = node_id + if nodes is not None: + __query["nodes"] = nodes if parent_task_id is not None: __query["parent_task_id"] = parent_task_id if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s + if time is not None: + __query["time"] = time + if timeout is not None: + __query["timeout"] = timeout if v is not None: __query["v"] = v + if wait_for_completion is not None: + __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", @@ -2883,8 +2763,6 @@ def transforms( ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, - local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ @@ -2998,11 +2876,6 @@ def transforms( :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param local: If `true`, the request computes the list of selected nodes from - the local cluster state. If `false` the list of selected nodes are computed - from the cluster state of the master node. In both cases the coordinating - node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. :param s: Comma-separated list of column names or column aliases used to sort the response. :param size: The maximum number of transforms to obtain. @@ -3033,10 +2906,6 @@ def transforms( __query["help"] = help if human is not None: __query["human"] = human - if local is not None: - __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: diff --git a/elasticsearch/_sync/client/ccr.py b/elasticsearch/_sync/client/ccr.py index f3b54acbd..f51ad4d0b 100644 --- a/elasticsearch/_sync/client/ccr.py +++ b/elasticsearch/_sync/client/ccr.py @@ -36,7 +36,8 @@ def delete_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes auto-follow patterns. + Delete auto-follow patterns. Delete a collection of cross-cluster replication + auto-follow patterns. ``_ @@ -68,6 +69,8 @@ def delete_auto_follow_pattern( @_rewrite_parameters( body_fields=( "leader_index", + "remote_cluster", + "data_stream_name", "max_outstanding_read_requests", "max_outstanding_write_requests", "max_read_request_operation_count", @@ -78,59 +81,89 @@ def delete_auto_follow_pattern( "max_write_request_operation_count", "max_write_request_size", "read_poll_timeout", - "remote_cluster", + "settings", ), ) def follow( self, *, index: str, + leader_index: t.Optional[str] = None, + remote_cluster: t.Optional[str] = None, + data_stream_name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, - leader_index: t.Optional[str] = None, max_outstanding_read_requests: t.Optional[int] = None, max_outstanding_write_requests: t.Optional[int] = None, max_read_request_operation_count: t.Optional[int] = None, - max_read_request_size: t.Optional[str] = None, + max_read_request_size: t.Optional[t.Union[int, str]] = None, max_retry_delay: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, max_write_buffer_count: t.Optional[int] = None, - max_write_buffer_size: t.Optional[str] = None, + max_write_buffer_size: t.Optional[t.Union[int, str]] = None, max_write_request_operation_count: t.Optional[int] = None, - max_write_request_size: t.Optional[str] = None, + max_write_request_size: t.Optional[t.Union[int, str]] = None, pretty: t.Optional[bool] = None, read_poll_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - remote_cluster: t.Optional[str] = None, + settings: t.Optional[t.Mapping[str, t.Any]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new follower index configured to follow the referenced leader index. + Create a follower. Create a cross-cluster replication follower index that follows + a specific leader index. When the API returns, the follower index exists and + cross-cluster replication starts replicating operations from the leader index + to the follower index. ``_ - :param index: The name of the follower index - :param leader_index: - :param max_outstanding_read_requests: - :param max_outstanding_write_requests: - :param max_read_request_operation_count: - :param max_read_request_size: - :param max_retry_delay: - :param max_write_buffer_count: - :param max_write_buffer_size: - :param max_write_request_operation_count: - :param max_write_request_size: - :param read_poll_timeout: - :param remote_cluster: - :param wait_for_active_shards: Sets the number of shard copies that must be active - before returning. Defaults to 0. Set to `all` for all shard copies, otherwise - set to any non-negative value less than or equal to the total number of copies - for the shard (number of replicas + 1) + :param index: The name of the follower index. + :param leader_index: The name of the index in the leader cluster to follow. + :param remote_cluster: The remote cluster containing the leader index. + :param data_stream_name: If the leader index is part of a data stream, the name + to which the local data stream for the followed index should be renamed. + :param max_outstanding_read_requests: The maximum number of outstanding reads + requests from the remote cluster. + :param max_outstanding_write_requests: The maximum number of outstanding write + requests on the follower. + :param max_read_request_operation_count: The maximum number of operations to + pull per read from the remote cluster. + :param max_read_request_size: The maximum size in bytes of per read of a batch + of operations pulled from the remote cluster. + :param max_retry_delay: The maximum time to wait before retrying an operation + that failed exceptionally. An exponential backoff strategy is employed when + retrying. + :param max_write_buffer_count: The maximum number of operations that can be queued + for writing. When this limit is reached, reads from the remote cluster will + be deferred until the number of queued operations goes below the limit. + :param max_write_buffer_size: The maximum total bytes of operations that can + be queued for writing. When this limit is reached, reads from the remote + cluster will be deferred until the total bytes of queued operations goes + below the limit. + :param max_write_request_operation_count: The maximum number of operations per + bulk write request executed on the follower. + :param max_write_request_size: The maximum total bytes of operations per bulk + write request executed on the follower. + :param read_poll_timeout: The maximum time to wait for new operations on the + remote cluster when the follower index is synchronized with the leader index. + When the timeout has elapsed, the poll for operations will return to the + follower so that it can update some statistics. Then the follower will immediately + attempt to read from the leader again. + :param settings: Settings to override from the leader index. + :param wait_for_active_shards: Specifies the number of shards to wait on being + active before responding. This defaults to waiting on none of the shards + to be active. A shard must be restored from the leader index before being + active. Restoring a follower shard requires transferring all the remote Lucene + segment files to the follower index. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") + if leader_index is None and body is None: + raise ValueError("Empty value passed for parameter 'leader_index'") + if remote_cluster is None and body is None: + raise ValueError("Empty value passed for parameter 'remote_cluster'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_ccr/follow' __query: t.Dict[str, t.Any] = {} @@ -148,6 +181,10 @@ def follow( if not __body: if leader_index is not None: __body["leader_index"] = leader_index + if remote_cluster is not None: + __body["remote_cluster"] = remote_cluster + if data_stream_name is not None: + __body["data_stream_name"] = data_stream_name if max_outstanding_read_requests is not None: __body["max_outstanding_read_requests"] = max_outstanding_read_requests if max_outstanding_write_requests is not None: @@ -174,8 +211,8 @@ def follow( __body["max_write_request_size"] = max_write_request_size if read_poll_timeout is not None: __body["read_poll_timeout"] = read_poll_timeout - if remote_cluster is not None: - __body["remote_cluster"] = remote_cluster + if settings is not None: + __body["settings"] = settings __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", @@ -198,8 +235,10 @@ def follow_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about all follower indices, including parameters and status - for each follower index + Get follower information. Get information about all cross-cluster replication + follower indices. For example, the results include follower index names, leader + index names, replication options, and whether the follower indices are active + or paused. ``_ @@ -240,8 +279,9 @@ def follow_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves follower stats. return shard-level stats about the following tasks - associated with each shard for the specified indices. + Get follower stats. Get cross-cluster replication follower stats. The API returns + shard-level stats about the "following tasks" associated with each shard for + the specified indices. ``_ @@ -294,7 +334,23 @@ def forget_follower( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes the follower retention leases from the leader. + Forget a follower. Remove the cross-cluster replication follower retention leases + from the leader. A following index takes out retention leases on its leader index. + These leases are used to increase the likelihood that the shards of the leader + index retain the history of operations that the shards of the following index + need to run replication. When a follower index is converted to a regular index + by the unfollow API (either by directly calling the API or by index lifecycle + management tasks), these leases are removed. However, removal of the leases can + fail, for example when the remote cluster containing the leader index is unavailable. + While the leases will eventually expire on their own, their extended existence + can cause the leader index to hold more history than necessary and prevent index + lifecycle management from performing some operations on the leader index. This + API exists to enable manually removing the leases when the unfollow API is unable + to do so. NOTE: This API does not stop replication by a following index. If you + use this API with a follower index that is still actively following, the following + index will add back retention leases on the leader. The only purpose of this + API is to handle the case of failure to remove the following retention leases + after the unfollow API is invoked. ``_ @@ -350,8 +406,7 @@ def get_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets configured auto-follow patterns. Returns the specified auto-follow pattern - collection. + Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. ``_ @@ -395,7 +450,14 @@ def pause_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Pauses an auto-follow pattern + Pause an auto-follow pattern. Pause a cross-cluster replication auto-follow pattern. + When the API returns, the auto-follow pattern is inactive. New indices that are + created on the remote cluster and match the auto-follow patterns are ignored. + You can resume auto-following with the resume auto-follow pattern API. When it + resumes, the auto-follow pattern is active again and automatically configures + follower indices for newly created indices on the remote cluster that match its + patterns. Remote indices that were created while the pattern was paused will + also be followed, unless they have been deleted or closed in the interim. ``_ @@ -436,8 +498,10 @@ def pause_follow( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Pauses a follower index. The follower index will not fetch any additional operations - from the leader index. + Pause a follower. Pause a cross-cluster replication follower index. The follower + index will not fetch any additional operations from the leader index. You can + resume following with the resume follower API. You can pause and resume a follower + index to change the configuration of the following task. ``_ @@ -512,9 +576,14 @@ def put_auto_follow_pattern( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new named collection of auto-follow patterns against a specified remote - cluster. Newly created indices on the remote cluster matching any of the specified - patterns will be automatically configured as follower indices. + Create or update auto-follow patterns. Create a collection of cross-cluster replication + auto-follow patterns for a remote cluster. Newly created indices on the remote + cluster that match any of the patterns are automatically configured as follower + indices. Indices on the remote cluster that were created before the auto-follow + pattern was created will not be auto-followed even if they match the pattern. + This API can also be used to update auto-follow patterns. NOTE: Follower indices + that were configured automatically before updating an auto-follow pattern will + remain unchanged even if they do not match against the new patterns. ``_ @@ -638,7 +707,11 @@ def resume_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resumes an auto-follow pattern that has been paused + Resume an auto-follow pattern. Resume a cross-cluster replication auto-follow + pattern that was paused. The auto-follow pattern will resume configuring following + indices for newly created indices that match its patterns on the remote cluster. + Remote indices created while the pattern was paused will also be followed unless + they have been deleted or closed in the interim. ``_ @@ -703,7 +776,11 @@ def resume_follow( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Resumes a follower index that has been paused + Resume a follower. Resume a cross-cluster replication follower index that was + paused. The follower index could have been paused with the pause follower API. + Alternatively it could be paused due to replication that cannot be retried due + to failures during following tasks. When this API returns, the follower index + will resume fetching operations from the leader index. ``_ @@ -785,7 +862,8 @@ def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets all stats related to cross-cluster replication. + Get cross-cluster replication stats. This API returns stats about auto-following + and the same shard-level stats as the get follower stats API. ``_ """ @@ -821,8 +899,13 @@ def unfollow( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops the following task associated with a follower index and removes index metadata - and settings associated with cross-cluster replication. + Unfollow an index. Convert a cross-cluster replication follower index to a regular + index. The API stops the following task associated with a follower index and + removes index metadata and settings associated with cross-cluster replication. + The follower index must be paused and closed before you call the unfollow API. + NOTE: Currently cross-cluster replication does not support converting an existing + regular index to a follower index. Converting a follower index to a regular index + is an irreversible operation. ``_ diff --git a/elasticsearch/_sync/client/connector.py b/elasticsearch/_sync/client/connector.py index 58d551bb7..6df64b55f 100644 --- a/elasticsearch/_sync/client/connector.py +++ b/elasticsearch/_sync/client/connector.py @@ -589,6 +589,125 @@ def sync_job_cancel( path_parts=__path_parts, ) + @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) + def sync_job_check_in( + self, + *, + connector_sync_job_id: str, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Check in a connector sync job. Check in a connector sync job and set the `last_seen` + field to the current time before updating it in the internal index. To sync data + using self-managed connectors, you need to deploy the Elastic connector service + on your own infrastructure. This service runs automatically on Elastic Cloud + for Elastic managed connectors. + + ``_ + + :param connector_sync_job_id: The unique identifier of the connector sync job + to be checked in. + """ + if connector_sync_job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") + __path_parts: t.Dict[str, str] = { + "connector_sync_job_id": _quote(connector_sync_job_id) + } + __path = ( + f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_check_in' + ) + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + endpoint_id="connector.sync_job_check_in", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("worker_hostname", "sync_cursor"), + ) + @_stability_warning(Stability.EXPERIMENTAL) + def sync_job_claim( + self, + *, + connector_sync_job_id: str, + worker_hostname: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + sync_cursor: t.Optional[t.Any] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Claim a connector sync job. This action updates the job status to `in_progress` + and sets the `last_seen` and `started_at` timestamps to the current time. Additionally, + it can set the `sync_cursor` property for the sync job. This API is not intended + for direct connector management by users. It supports the implementation of services + that utilize the connector protocol to communicate with Elasticsearch. To sync + data using self-managed connectors, you need to deploy the Elastic connector + service on your own infrastructure. This service runs automatically on Elastic + Cloud for Elastic managed connectors. + + ``_ + + :param connector_sync_job_id: The unique identifier of the connector sync job. + :param worker_hostname: The host name of the current system that will run the + job. + :param sync_cursor: The cursor object from the last incremental sync job. This + should reference the `sync_cursor` field in the connector state for which + the job runs. + """ + if connector_sync_job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") + if worker_hostname is None and body is None: + raise ValueError("Empty value passed for parameter 'worker_hostname'") + __path_parts: t.Dict[str, str] = { + "connector_sync_job_id": _quote(connector_sync_job_id) + } + __path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_claim' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if worker_hostname is not None: + __body["worker_hostname"] = worker_hostname + if sync_cursor is not None: + __body["sync_cursor"] = sync_cursor + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="connector.sync_job_claim", + path_parts=__path_parts, + ) + @_rewrite_parameters() @_stability_warning(Stability.BETA) def sync_job_delete( @@ -634,6 +753,64 @@ def sync_job_delete( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("error",), + ) + @_stability_warning(Stability.EXPERIMENTAL) + def sync_job_error( + self, + *, + connector_sync_job_id: str, + error: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Set a connector sync job error. Set the `error` field for a connector sync job + and set its `status` to `error`. To sync data using self-managed connectors, + you need to deploy the Elastic connector service on your own infrastructure. + This service runs automatically on Elastic Cloud for Elastic managed connectors. + + ``_ + + :param connector_sync_job_id: The unique identifier for the connector sync job. + :param error: The error for the connector sync job error field. + """ + if connector_sync_job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") + if error is None and body is None: + raise ValueError("Empty value passed for parameter 'error'") + __path_parts: t.Dict[str, str] = { + "connector_sync_job_id": _quote(connector_sync_job_id) + } + __path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_error' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if error is not None: + __body["error"] = error + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="connector.sync_job_error", + path_parts=__path_parts, + ) + @_rewrite_parameters() @_stability_warning(Stability.BETA) def sync_job_get( @@ -1032,6 +1209,66 @@ def update_error( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("features",), + ) + @_stability_warning(Stability.EXPERIMENTAL) + def update_features( + self, + *, + connector_id: str, + features: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Update the connector features. Update the connector features in the connector + document. This API can be used to control the following aspects of a connector: + * document-level security * incremental syncs * advanced sync rules * basic sync + rules Normally, the running connector service automatically manages these features. + However, you can use this API to override the default behavior. To sync data + using self-managed connectors, you need to deploy the Elastic connector service + on your own infrastructure. This service runs automatically on Elastic Cloud + for Elastic managed connectors. + + ``_ + + :param connector_id: The unique identifier of the connector to be updated. + :param features: + """ + if connector_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_id'") + if features is None and body is None: + raise ValueError("Empty value passed for parameter 'features'") + __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} + __path = f'/_connector/{__path_parts["connector_id"]}/_features' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if features is not None: + __body["features"] = features + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="connector.update_features", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=("advanced_snippet", "filtering", "rules"), ) diff --git a/elasticsearch/_sync/client/eql.py b/elasticsearch/_sync/client/eql.py index 2610b3261..82b085ee2 100644 --- a/elasticsearch/_sync/client/eql.py +++ b/elasticsearch/_sync/client/eql.py @@ -167,6 +167,8 @@ def get_status( @_rewrite_parameters( body_fields=( "query", + "allow_partial_search_results", + "allow_partial_sequence_results", "case_sensitive", "event_category_field", "fetch_size", @@ -174,6 +176,7 @@ def get_status( "filter", "keep_alive", "keep_on_completion", + "max_samples_per_key", "result_position", "runtime_mappings", "size", @@ -188,6 +191,8 @@ def search( index: t.Union[str, t.Sequence[str]], query: t.Optional[str] = None, allow_no_indices: t.Optional[bool] = None, + allow_partial_search_results: t.Optional[bool] = None, + allow_partial_sequence_results: t.Optional[bool] = None, case_sensitive: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, event_category_field: t.Optional[str] = None, @@ -211,6 +216,7 @@ def search( ignore_unavailable: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, keep_on_completion: t.Optional[bool] = None, + max_samples_per_key: t.Optional[int] = None, pretty: t.Optional[bool] = None, result_position: t.Optional[t.Union[str, t.Literal["head", "tail"]]] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, @@ -232,6 +238,8 @@ def search( :param index: The name of the index to scope the operation :param query: EQL query you wish to run. :param allow_no_indices: + :param allow_partial_search_results: + :param allow_partial_sequence_results: :param case_sensitive: :param event_category_field: Field containing the event classification, such as process, file, or network. @@ -246,6 +254,11 @@ def search( in the response. :param keep_alive: :param keep_on_completion: + :param max_samples_per_key: By default, the response of a sample query contains + up to `10` samples, with one sample per unique set of join keys. Use the + `size` parameter to get a smaller or larger set of samples. To retrieve more + than one sample per set of join keys, use the `max_samples_per_key` parameter. + Pipes are not supported for sample queries. :param result_position: :param runtime_mappings: :param size: For basic queries, the maximum number of matching events to return. @@ -280,6 +293,12 @@ def search( if not __body: if query is not None: __body["query"] = query + if allow_partial_search_results is not None: + __body["allow_partial_search_results"] = allow_partial_search_results + if allow_partial_sequence_results is not None: + __body["allow_partial_sequence_results"] = ( + allow_partial_sequence_results + ) if case_sensitive is not None: __body["case_sensitive"] = case_sensitive if event_category_field is not None: @@ -294,6 +313,8 @@ def search( __body["keep_alive"] = keep_alive if keep_on_completion is not None: __body["keep_on_completion"] = keep_on_completion + if max_samples_per_key is not None: + __body["max_samples_per_key"] = max_samples_per_key if result_position is not None: __body["result_position"] = result_position if runtime_mappings is not None: diff --git a/elasticsearch/_sync/client/features.py b/elasticsearch/_sync/client/features.py index 83aa4127e..5b2fcaab7 100644 --- a/elasticsearch/_sync/client/features.py +++ b/elasticsearch/_sync/client/features.py @@ -35,8 +35,17 @@ def get_features( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Gets a list of features which can be included in snapshots using the feature_states - field when creating a snapshot + Get the features. Get a list of features that can be included in snapshots using + the `feature_states` field when creating a snapshot. You can use this API to + determine which feature states to include when taking a snapshot. By default, + all feature states are included in a snapshot if that snapshot includes the global + state, or none if it does not. A feature state includes one or more system indices + necessary for a given feature to function. In order to ensure data integrity, + all system indices that comprise a feature state are snapshotted and restored + together. The features listed by this API are a combination of built-in features + and features defined by plugins. In order for a feature state to be listed in + this API and recognized as a valid feature state by the create snapshot API, + the plugin that defines that feature must be installed on the master node. ``_ """ @@ -72,7 +81,20 @@ def reset_features( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resets the internal state of features, usually by deleting system indices + Reset the features. Clear all of the state information stored in system indices + by Elasticsearch features, including the security and machine learning indices. + WARNING: Intended for development and testing use only. Do not reset features + on a production cluster. Return a cluster to the same state as a new installation + by resetting the feature state for all Elasticsearch features. This deletes all + state information stored in system indices. The response code is HTTP 200 if + the state is successfully reset for all features. It is HTTP 500 if the reset + operation failed for any feature. Note that select features might provide a way + to reset particular system indices. Using this API resets all features, both + those that are built-in and implemented as plugins. To list the features that + will be affected, use the get features API. IMPORTANT: The features installed + on the node you submit this request to are the features that will be reset. Run + on the master node if you have any doubts about which plugins are installed on + individual nodes. ``_ """ diff --git a/elasticsearch/_sync/client/ilm.py b/elasticsearch/_sync/client/ilm.py index 4f8196869..6ace9ee5c 100644 --- a/elasticsearch/_sync/client/ilm.py +++ b/elasticsearch/_sync/client/ilm.py @@ -38,9 +38,9 @@ def delete_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes the specified lifecycle policy definition. You cannot delete policies - that are currently in use. If the policy is being used to manage any indices, - the request fails and returns an error. + Delete a lifecycle policy. You cannot delete policies that are currently in use. + If the policy is being used to manage any indices, the request fails and returns + an error. ``_ @@ -93,9 +93,11 @@ def explain_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the index’s current lifecycle state, such as the - currently executing phase, action, and step. Shows when the index entered each - one, the definition of the running phase, and information about any failures. + Explain the lifecycle state. Get the current lifecycle status for one or more + indices. For data streams, the API retrieves the current lifecycle status for + the stream's backing indices. The response indicates when the index entered each + lifecycle state, provides the definition of the running phase, and information + about any failures. ``_ @@ -157,7 +159,7 @@ def get_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a lifecycle policy. + Get lifecycle policies. ``_ @@ -208,7 +210,7 @@ def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the current index lifecycle management (ILM) status. + Get the ILM status. Get the current index lifecycle management status. ``_ """ @@ -249,10 +251,18 @@ def migrate_to_data_tiers( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Switches the indices, ILM policies, and legacy, composable and component templates - from using custom node attributes and attribute-based allocation filters to using - data tiers, and optionally deletes one legacy index template.+ Using node roles - enables ILM to automatically move the indices between data tiers. + Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, + composable, and component templates from using custom node attributes and attribute-based + allocation filters to using data tiers. Optionally, delete one legacy index template. + Using node roles enables ILM to automatically move the indices between data tiers. + Migrating away from custom node attributes routing can be manually performed. + This API provides an automated way of performing three out of the four manual + steps listed in the migration guide: 1. Stop setting the custom hot attribute + on new indices. 1. Remove custom allocation settings from existing ILM policies. + 1. Replace custom allocation settings from existing indices with the corresponding + tier preference. ILM must be stopped before performing the migration. Use the + stop ILM and get ILM status APIs to wait until the reported operation mode is + `STOPPED`. ``_ @@ -312,7 +322,21 @@ def move_to_step( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Manually moves an index into the specified step and executes that step. + Move to a lifecycle step. Manually move an index into a specific step in the + lifecycle policy and run that step. WARNING: This operation can result in the + loss of data. Manually moving an index into a specific step runs that step even + if it has already been performed. This is a potentially destructive action and + this should be considered an expert level API. You must specify both the current + step and the step to be executed in the body of the request. The request will + fail if the current step does not match the step currently running for the index + This is to prevent the index from being moved from an unexpected step into the + next step. When specifying the target (`next_step`) to which the index will be + moved, either the name or both the action and name fields are optional. If only + the phase is specified, the index will move to the first step of the first action + in the target phase. If the phase and action are specified, the index will move + to the first step of the specified action in the specified phase. Only actions + specified in the ILM policy are considered valid. An index cannot move to a step + that is not part of its policy. ``_ @@ -375,8 +399,9 @@ def put_lifecycle( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a lifecycle policy. If the specified policy exists, the policy is replaced - and the policy version is incremented. + Create or update a lifecycle policy. If the specified policy exists, it is replaced + and the policy version is incremented. NOTE: Only the latest version of the policy + is stored, you cannot revert to previous versions. ``_ @@ -435,7 +460,8 @@ def remove_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes the assigned lifecycle policy and stops managing the specified index + Remove policies from an index. Remove the assigned lifecycle policies from an + index or a data stream's backing indices. It also stops managing the indices. ``_ @@ -475,7 +501,10 @@ def retry( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retries executing the policy for an index that is in the ERROR step. + Retry a policy. Retry running the lifecycle policy for an index that is in the + ERROR step. The API sets the policy back to the step where the error occurred + and runs the step. Use the explain lifecycle state API to determine whether an + index is in the ERROR step. ``_ @@ -517,7 +546,9 @@ def start( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Start the index lifecycle management (ILM) plugin. + Start the ILM plugin. Start the index lifecycle management plugin if it is currently + stopped. ILM is started automatically when the cluster is formed. Restarting + ILM is necessary only when it has been stopped using the stop ILM API. ``_ @@ -561,8 +592,12 @@ def stop( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Halts all lifecycle management operations and stops the index lifecycle management - (ILM) plugin + Stop the ILM plugin. Halt all lifecycle management operations and stop the index + lifecycle management plugin. This is useful when you are performing maintenance + on the cluster and need to prevent ILM from performing any actions on your indices. + The API returns as soon as the stop request has been acknowledged, but the plugin + might continue to run until in-progress operations complete and the plugin can + be safely stopped. Use the get ILM status API to check whether ILM is running. ``_ diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index b27909af1..964721138 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -245,8 +245,8 @@ def clear_cache( request: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clears the caches of one or more indices. For data streams, the API clears the - caches of the stream’s backing indices. + Clear the cache. Clear the cache of one or more indices. For data streams, the + API clears the caches of the stream's backing indices. ``_ @@ -331,7 +331,26 @@ def clone( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clones an existing index. + Clone an index. Clone an existing index into a new index. Each original primary + shard is cloned into a new primary shard in the new index. IMPORTANT: Elasticsearch + does not apply index templates to the resulting index. The API also does not + copy index metadata from the original index. Index metadata includes aliases, + index lifecycle management phase definitions, and cross-cluster replication (CCR) + follower information. For example, if you clone a CCR follower index, the resulting + clone will not be a follower index. The clone API copies most index settings + from the source index to the resulting index, with the exception of `index.number_of_replicas` + and `index.auto_expand_replicas`. To set the number of replicas in the resulting + index, configure these settings in the clone request. Cloning works as follows: + * First, it creates a new target index with the same definition as the source + index. * Then it hard-links segments from the source index into the target index. + If the file system does not support hard-linking, all segments are copied into + the new index, which is a much more time consuming process. * Finally, it recovers + the target index as though it were a closed index which had just been re-opened. + IMPORTANT: Indices can only be cloned if they meet the following requirements: + * The target index must not exist. * The source index must have the same number + of primary shards as the target index. * The node handling the clone process + must have sufficient free disk space to accommodate a second copy of the existing + index. ``_ @@ -419,7 +438,24 @@ def close( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Closes an index. + Close an index. A closed index is blocked for read or write operations and does + not allow all operations that opened indices allow. It is not possible to index + documents or to search for documents in a closed index. Closed indices do not + have to maintain internal data structures for indexing or searching documents, + which results in a smaller overhead on the cluster. When opening or closing an + index, the master node is responsible for restarting the index shards to reflect + the new state of the index. The shards will then go through the normal recovery + process. The data of opened and closed indices is automatically replicated by + the cluster to ensure that enough shard copies are safely kept around at all + times. You can open and close multiple indices. An error is thrown if the request + explicitly refers to a missing index. This behaviour can be turned off using + the `ignore_unavailable=true` parameter. By default, you must explicitly name + the indices you are opening or closing. To open or close indices with `_all`, + `*`, or other wildcard expressions, change the` action.destructive_requires_name` + setting to `false`. This setting can also be changed with the cluster update + settings API. Closed indices consume a significant amount of disk-space which + can cause problems in managed environments. Closing indices can be turned off + with the cluster settings API by setting `cluster.indices.close.enable` to `false`. ``_ @@ -1061,7 +1097,10 @@ def disk_usage( run_expensive_tasks: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Analyzes the disk usage of each field of an index or data stream. + Analyze the index disk usage. Analyze the disk usage of each field of an index + or data stream. This API might not support indices created in previous Elasticsearch + versions. The result of a small index can be inaccurate as some parts of an index + might not be analyzed by the API. ``_ @@ -1135,9 +1174,14 @@ def downsample( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Aggregates a time series (TSDS) index and stores pre-computed statistical summaries - (`min`, `max`, `sum`, `value_count` and `avg`) for each metric field grouped - by a configured time interval. + Downsample an index. Aggregate a time series (TSDS) index and store pre-computed + statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each + metric field grouped by a configured time interval. For example, a TSDS index + that contains metrics sampled every 10 seconds can be downsampled to an hourly + index. All documents within an hour interval are summarized and stored as a single + document in the downsample index. NOTE: Only indices in a time series data stream + are supported. Neither field nor document level security can be defined on the + source index. The source index must be read only (`index.blocks.write: true`). ``_ @@ -1456,8 +1500,8 @@ def explain_data_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the status for a data stream lifecycle. Retrieves information about an index - or data stream’s current data stream lifecycle status, such as time since index + Get the status for a data stream lifecycle. Get information about an index or + data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. @@ -1523,7 +1567,10 @@ def field_usage_stats( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns field usage information for each shard and field of an index. + Get field usage stats. Get field usage information for each shard and field of + an index. Field usage statistics are automatically captured when queries are + running on a cluster. A shard-level search request that accesses a given field, + even if multiple times during that request, is counted as a single use. ``_ @@ -1611,7 +1658,22 @@ def flush( wait_if_ongoing: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Flushes one or more data streams or indices. + Flush data streams or indices. Flushing a data stream or index is the process + of making sure that any data that is currently only stored in the transaction + log is also permanently stored in the Lucene index. When restarting, Elasticsearch + replays any unflushed operations from the transaction log into the Lucene index + to bring it back into the state that it was in before the restart. Elasticsearch + automatically triggers flushes as needed, using heuristics that trade off the + size of the unflushed transaction log against the cost of performing each flush. + After each operation has been flushed it is permanently stored in the Lucene + index. This may mean that there is no need to maintain an additional copy of + it in the transaction log. The transaction log is made up of multiple files, + called generations, and Elasticsearch will delete any generation files when they + are no longer needed, freeing up disk space. It is also possible to trigger a + flush on one or more indices using the flush API, although it is rare for users + to need to call this API directly. If you call the flush API after indexing some + documents then a successful response indicates that Elasticsearch has flushed + all the documents that were indexed before the flush API was called. ``_ @@ -1694,7 +1756,21 @@ def forcemerge( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Performs the force merge operation on one or more indices. + Force a merge. Perform the force merge operation on the shards of one or more + indices. For data streams, the API forces a merge on the shards of the stream's + backing indices. Merging reduces the number of segments in each shard by merging + some of them together and also frees up the space used by deleted documents. + Merging normally happens automatically, but sometimes it is useful to trigger + a merge manually. WARNING: We recommend force merging only a read-only index + (meaning the index is no longer receiving writes). When documents are updated + or deleted, the old version is not immediately removed but instead soft-deleted + and marked with a "tombstone". These soft-deleted documents are automatically + cleaned up during regular segment merges. But force merge can cause very large + (greater than 5 GB) segments to be produced, which are not eligible for regular + merges. So the number of soft-deleted documents can then grow rapidly, resulting + in higher disk usage and worse search performance. If you regularly force merge + an index receiving writes, this can also make snapshots more expensive, since + the new documents can't be backed up incrementally. ``_ @@ -2679,8 +2755,18 @@ def promote_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Promotes a data stream from a replicated data stream managed by CCR to a regular - data stream + Promote a data stream. Promote a data stream from a replicated data stream managed + by cross-cluster replication (CCR) to a regular data stream. With CCR auto following, + a data stream from a remote cluster can be replicated to the local cluster. These + data streams can't be rolled over in the local cluster. These replicated data + streams roll over only if the upstream data stream rolls over. In the event that + the remote cluster is no longer available, the data stream in the local cluster + can be promoted to a regular data stream, which allows these data streams to + be rolled over in the local cluster. NOTE: When promoting a data stream, ensure + the local cluster has a data stream enabled index template that matches the data + stream. If this is missing, the data stream will not be able to roll over until + a matching index template is created. This will affect the lifecycle management + of the data stream and interfere with the data stream size and retention. ``_ @@ -2819,14 +2905,14 @@ def put_alias( ) @_rewrite_parameters( - body_fields=("data_retention", "downsampling"), + body_name="lifecycle", ) def put_data_lifecycle( self, *, name: t.Union[str, t.Sequence[str]], - data_retention: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - downsampling: t.Optional[t.Mapping[str, t.Any]] = None, + lifecycle: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ @@ -2841,7 +2927,6 @@ def put_data_lifecycle( master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ Update data stream lifecycles. Update the data stream lifecycle of the specified @@ -2851,13 +2936,7 @@ def put_data_lifecycle( :param name: Comma-separated list of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. - :param data_retention: If defined, every document added to this data stream will - be stored at least for this time frame. Any time after this duration the - document could be deleted. When empty, every document in this data stream - will be stored indefinitely. - :param downsampling: If defined, every backing index will execute the configured - downsampling configuration after the backing index is not the data stream - write index anymore. + :param lifecycle: :param expand_wildcards: Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `hidden`, `open`, `closed`, `none`. @@ -2869,10 +2948,15 @@ def put_data_lifecycle( """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") + if lifecycle is None and body is None: + raise ValueError( + "Empty value passed for parameters 'lifecycle' and 'body', one of them should be set." + ) + elif lifecycle is not None and body is not None: + raise ValueError("Cannot set both 'lifecycle' and 'body'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}/_lifecycle' __query: t.Dict[str, t.Any] = {} - __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: @@ -2887,16 +2971,8 @@ def put_data_lifecycle( __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout - if not __body: - if data_retention is not None: - __body["data_retention"] = data_retention - if downsampling is not None: - __body["downsampling"] = downsampling - if not __body: - __body = None # type: ignore[assignment] - __headers = {"accept": "application/json"} - if __body is not None: - __headers["content-type"] = "application/json" + __body = lifecycle if lifecycle is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, @@ -3343,7 +3419,16 @@ def put_template( ) -> ObjectApiResponse[t.Any]: """ Create or update an index template. Index templates define settings, mappings, - and aliases that can be applied automatically to new indices. + and aliases that can be applied automatically to new indices. Elasticsearch applies + templates to new indices based on an index pattern that matches the index name. + IMPORTANT: This documentation is about legacy index templates, which are deprecated + and will be replaced by the composable templates introduced in Elasticsearch + 7.8. Composable templates always take precedence over legacy templates. If no + composable template matches a new index, matching legacy templates are applied + according to their order. Index templates are only applied during index creation. + Changes to index templates do not affect existing indices. Settings and mappings + specified in create index API requests override any settings or mappings specified + in an index template. ``_ @@ -3423,9 +3508,25 @@ def recovery( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about ongoing and completed shard recoveries for one or more - indices. For data streams, the API returns information for the stream’s backing - indices. + Get index recovery information. Get information about ongoing and completed shard + recoveries for one or more indices. For data streams, the API returns information + for the stream's backing indices. Shard recovery is the process of initializing + a shard copy, such as restoring a primary shard from a snapshot or creating a + replica shard from a primary shard. When a shard recovery completes, the recovered + shard is available for search and indexing. Recovery automatically occurs during + the following processes: * When creating an index for the first time. * When + a node rejoins the cluster and starts up any missing primary shard copies using + the data that it holds in its data path. * Creation of new replica shard copies + from the primary. * Relocation of a shard copy to a different node in the same + cluster. * A snapshot restore operation. * A clone, shrink, or split operation. + You can determine the cause of a shard recovery using the recovery or cat recovery + APIs. The index recovery API reports information about completed recoveries only + for shard copies that currently exist in the cluster. It only reports the last + recovery for each shard copy and does not report historical information about + earlier recoveries, nor does it report information about the recoveries of shard + copies that no longer exist. This means that if a shard copy completes a recovery + and then Elasticsearch relocates it onto a different node then the information + about the original recovery will not be shown in the recovery API. ``_ @@ -3559,7 +3660,21 @@ def reload_search_analyzers( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Reloads an index's search analyzers and their resources. + Reload search analyzers. Reload an index's search analyzers and their resources. + For data streams, the API reloads search analyzers and resources for the stream's + backing indices. IMPORTANT: After reloading the search analyzers you should clear + the request cache to make sure it doesn't contain responses derived from the + previous versions of the analyzer. You can use the reload search analyzers API + to pick up changes to synonym files used in the `synonym_graph` or `synonym` + token filter of a search analyzer. To be eligible, the token filter must have + an `updateable` flag of `true` and only be used in search analyzers. NOTE: This + API does not perform a reload for each shard of an index. Instead, it performs + a reload for each node containing index shards. As a result, the total shard + count returned by the API can differ from the number of index shards. Because + reloading affects every node with an index shard, it is important to update the + synonym file on every data node in the cluster--including nodes that don't contain + a shard replica--before using this API. This ensures the synonym file is updated + everywhere in the cluster in case shards are relocated in the future. ``_ @@ -3623,9 +3738,20 @@ def resolve_cluster( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resolves the specified index expressions to return information about each cluster, - including the local cluster, if included. Multiple patterns and remote clusters - are supported. + Resolve the cluster. Resolve the specified index expressions to return information + about each cluster, including the local cluster, if included. Multiple patterns + and remote clusters are supported. This endpoint is useful before doing a cross-cluster + search in order to determine which remote clusters should be included in a search. + You use the same index expression with this endpoint as you would for cross-cluster + search. Index and cluster exclusions are also supported with this endpoint. For + each cluster in the index expression, information is returned about: * Whether + the querying ("local") cluster is currently connected to each remote cluster + in the index expression scope. * Whether each remote cluster is configured with + `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, + or data streams on that cluster that match the index expression. * Whether the + search is likely to have errors returned when you do the cross-cluster search + (including any authorization errors if you do not have permission to query the + index). * Cluster version information, including the Elasticsearch server version. ``_ @@ -3877,8 +4003,9 @@ def segments( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns low-level information about the Lucene segments in index shards. For - data streams, the API returns information about the stream’s backing indices. + Get index segments. Get low-level information about the Lucene segments in index + shards. For data streams, the API returns information about the stream's backing + indices. ``_ @@ -3957,8 +4084,14 @@ def shard_stores( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves store information about replica shards in one or more indices. For - data streams, the API retrieves store information for the stream’s backing indices. + Get index shard stores. Get store information about replica shards in one or + more indices. For data streams, the API retrieves store information for the stream's + backing indices. The index shard stores API returns the following information: + * The node on which each replica shard exists. * The allocation ID for each replica + shard. * A unique ID for each replica shard. * Any errors encountered while opening + the shard index or from an earlier failure. By default, the API returns store + information only for primary shards that are unassigned or have one or more unassigned + replica shards. ``_ @@ -4029,7 +4162,39 @@ def shrink( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Shrinks an existing index into a new index with fewer primary shards. + Shrink an index. Shrink an index into a new index with fewer primary shards. + Before you can shrink an index: * The index must be read-only. * A copy of every + shard in the index must reside on the same node. * The index must have a green + health status. To make shard allocation easier, we recommend you also remove + the index's replica shards. You can later re-add replica shards as part of the + shrink operation. The requested number of primary shards in the target index + must be a factor of the number of shards in the source index. For example an + index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an + index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards + in the index is a prime number it can only be shrunk into a single primary shard + Before shrinking, a (primary or replica) copy of every shard in the index must + be present on the same node. The current write index on a data stream cannot + be shrunk. In order to shrink the current write index, the data stream must first + be rolled over so that a new write index is created and then the previous write + index can be shrunk. A shrink operation: * Creates a new target index with the + same definition as the source index, but with a smaller number of primary shards. + * Hard-links segments from the source index into the target index. If the file + system does not support hard-linking, then all segments are copied into the new + index, which is a much more time consuming process. Also if using multiple data + paths, shards on different data paths require a full copy of segment files if + they are not on the same disk since hardlinks do not work across disks. * Recovers + the target index as though it were a closed index which had just been re-opened. + Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. + IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: + * The target index must not exist. * The source index must have more primary + shards than the target index. * The number of primary shards in the target index + must be a factor of the number of primary shards in the source index. The source + index must have more primary shards than the target index. * The index must not + contain more than 2,147,483,519 documents in total across all shards that will + be shrunk into a single shard on the target index as this is the maximum number + of docs that can fit into a single shard. * The node handling the shrink process + must have sufficient free disk space to accommodate a second copy of the existing + index. ``_ @@ -4314,7 +4479,27 @@ def split( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Splits an existing index into a new index with more primary shards. + Split an index. Split an index into a new index with more primary shards. * Before + you can split an index: * The index must be read-only. * The cluster health status + must be green. The number of times the index can be split (and the number of + shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` + setting. The number of routing shards specifies the hashing space that is used + internally to distribute documents across shards with consistent hashing. For + instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x + 3) could be split by a factor of 2 or 3. A split operation: * Creates a new target + index with the same definition as the source index, but with a larger number + of primary shards. * Hard-links segments from the source index into the target + index. If the file system doesn't support hard-linking, all segments are copied + into the new index, which is a much more time consuming process. * Hashes all + documents again, after low level files are created, to delete documents that + belong to a different shard. * Recovers the target index as though it were a + closed index which had just been re-opened. IMPORTANT: Indices can only be split + if they satisfy the following requirements: * The target index must not exist. + * The source index must have fewer primary shards than the target index. * The + number of primary shards in the target index must be a multiple of the number + of primary shards in the source index. * The node handling the split process + must have sufficient free disk space to accommodate a second copy of the existing + index. ``_ @@ -4406,8 +4591,14 @@ def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns statistics for one or more indices. For data streams, the API retrieves - statistics for the stream’s backing indices. + Get index statistics. For data streams, the API retrieves statistics for the + stream's backing indices. By default, the returned statistics are index-level + with `primaries` and `total` aggregations. `primaries` are the values for only + the primary shards. `total` are the accumulated values for both primary and replica + shards. To get shard-level statistics, set the `level` parameter to `shards`. + NOTE: When moving to another node, the shard-level statistics for a shard are + cleared. Although the shard is no longer part of the node, that node retains + any node-level statistics to which the shard contributed. ``_ @@ -4510,7 +4701,8 @@ def unfreeze( wait_for_active_shards: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Unfreezes an index. + Unfreeze an index. When a frozen index is unfrozen, the index goes through the + normal recovery process and becomes writeable again. ``_ diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index 08f9da4aa..905d75294 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -255,7 +255,21 @@ def put( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Create an inference endpoint + Create an inference endpoint. When you create an inference endpoint, the associated + machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before + using it. To verify the deployment status, use the get trained model statistics + API. Look for `"state": "fully_allocated"` in the response and ensure that the + `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating + multiple endpoints for the same model unless required, as each endpoint consumes + significant resources. IMPORTANT: The inference APIs enable you to use certain + services, such as built-in machine learning models (ELSER, E5), models uploaded + through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google + Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models + uploaded through Eland, the inference APIs offer an alternative way to use and + manage trained models. However, if you do not plan to use the inference APIs + to use these models or if you want to use non-NLP models, use the machine learning + trained model APIs. ``_ diff --git a/elasticsearch/_sync/client/ingest.py b/elasticsearch/_sync/client/ingest.py index db211c1c3..513d62bb2 100644 --- a/elasticsearch/_sync/client/ingest.py +++ b/elasticsearch/_sync/client/ingest.py @@ -77,6 +77,59 @@ def delete_geoip_database( path_parts=__path_parts, ) + @_rewrite_parameters() + def delete_ip_location_database( + self, + *, + id: t.Union[str, t.Sequence[str]], + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Delete IP geolocation database configurations. + + ``_ + + :param id: A comma-separated list of IP location database configurations. + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. A value of `-1` indicates that the request should never + time out. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. A value + of `-1` indicates that the request should never time out. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "DELETE", + __path, + params=__query, + headers=__headers, + endpoint_id="ingest.delete_ip_location_database", + path_parts=__path_parts, + ) + @_rewrite_parameters() def delete_pipeline( self, @@ -217,6 +270,58 @@ def get_geoip_database( path_parts=__path_parts, ) + @_rewrite_parameters() + def get_ip_location_database( + self, + *, + id: t.Optional[t.Union[str, t.Sequence[str]]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Get IP geolocation database configurations. + + ``_ + + :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard + (`*`) expressions are supported. To get all database configurations, omit + this parameter or use `*`. + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. A value of `-1` indicates that the request should never + time out. + """ + __path_parts: t.Dict[str, str] + if id not in SKIP_IN_PATH: + __path_parts = {"id": _quote(id)} + __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' + else: + __path_parts = {} + __path = "/_ingest/ip_location/database" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="ingest.get_ip_location_database", + path_parts=__path_parts, + ) + @_rewrite_parameters() def get_pipeline( self, @@ -328,8 +433,8 @@ def put_geoip_database( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update GeoIP database configurations. Create or update IP geolocation - database configurations. + Create or update a GeoIP database configuration. Refer to the create or update + IP geolocation database configuration API. ``_ @@ -384,6 +489,74 @@ def put_geoip_database( path_parts=__path_parts, ) + @_rewrite_parameters( + body_name="configuration", + ) + def put_ip_location_database( + self, + *, + id: str, + configuration: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Create or update an IP geolocation database configuration. + + ``_ + + :param id: The database configuration identifier. + :param configuration: + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. A value of `-1` indicates that the request should never + time out. + :param timeout: The period to wait for a response from all relevant nodes in + the cluster after updating the cluster metadata. If no response is received + before the timeout expires, the cluster metadata update still applies but + the response indicates that it was not completely acknowledged. A value of + `-1` indicates that the request should never time out. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + if configuration is None and body is None: + raise ValueError( + "Empty value passed for parameters 'configuration' and 'body', one of them should be set." + ) + elif configuration is not None and body is not None: + raise ValueError("Cannot set both 'configuration' and 'body'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + __body = configuration if configuration is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="ingest.put_ip_location_database", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=( "deprecated", diff --git a/elasticsearch/_sync/client/license.py b/elasticsearch/_sync/client/license.py index 28c51de20..2f499dc3a 100644 --- a/elasticsearch/_sync/client/license.py +++ b/elasticsearch/_sync/client/license.py @@ -35,7 +35,9 @@ def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes licensing information for the cluster + Delete the license. When the license expires, your subscription level reverts + to Basic. If the operator privileges feature is enabled, only operator users + can use this API. ``_ """ @@ -72,9 +74,11 @@ def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get license information. Returns information about your Elastic license, including - its type, its status, when it was issued, and when it expires. For more information - about the different types of licenses, refer to [Elastic Stack subscriptions](https://www.elastic.co/subscriptions). + Get license information. Get information about your Elastic license including + its type, its status, when it was issued, and when it expires. NOTE: If the master + node is generating a new cluster state, the get license API may return a `404 + Not Found` response. If you receive an unexpected 404 response after cluster + startup, wait a short period and retry the request. ``_ @@ -120,7 +124,7 @@ def get_basic_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the status of the basic license. + Get the basic license status. ``_ """ @@ -155,7 +159,7 @@ def get_trial_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about the status of the trial license. + Get the trial status. ``_ """ @@ -196,7 +200,14 @@ def post( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Updates the license for the cluster. + Update the license. You can update your license at runtime without shutting down + your nodes. License updates take effect immediately. If the license you are installing + does not support all of the features that were available with your previous license, + however, you are notified in the response. You must then re-submit the API request + with the acknowledge parameter set to true. NOTE: If Elasticsearch security features + are enabled and you are installing a gold or higher license, you must enable + TLS on the transport networking layer before you install the license. If the + operator privileges feature is enabled, only operator users can use this API. ``_ @@ -250,12 +261,13 @@ def post_start_basic( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The start basic API enables you to initiate an indefinite basic license, which - gives access to all the basic features. If the basic license does not support - all of the features that are available with your current license, however, you - are notified in the response. You must then re-submit the API request with the - acknowledge parameter set to true. To check the status of your basic license, - use the following API: [Get basic status](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-basic-status.html). + Start a basic license. Start an indefinite basic license, which gives access + to all the basic features. NOTE: In order to start a basic license, you must + not currently have a basic license. If the basic license does not support all + of the features that are available with your current license, however, you are + notified in the response. You must then re-submit the API request with the `acknowledge` + parameter set to `true`. To check the status of your basic license, use the get + basic license API. ``_ @@ -297,8 +309,12 @@ def post_start_trial( type_query_string: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - The start trial API enables you to start a 30-day trial, which gives access to - all subscription features. + Start a trial. Start a 30-day trial, which gives access to all subscription features. + NOTE: You are allowed to start a trial only if your cluster has not already activated + a trial for the current major product version. For example, if you have already + activated a trial for v8.0, you cannot start a new trial until v9.0. You can, + however, request an extended trial at https://www.elastic.co/trialextension. + To check the status of your trial, use the get trial status API. ``_ diff --git a/elasticsearch/_sync/client/logstash.py b/elasticsearch/_sync/client/logstash.py index bba034538..5aee9c834 100644 --- a/elasticsearch/_sync/client/logstash.py +++ b/elasticsearch/_sync/client/logstash.py @@ -36,11 +36,12 @@ def delete_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a pipeline used for Logstash Central Management. + Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central + Management. ``_ - :param id: Identifier for the pipeline. + :param id: An identifier for the pipeline. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -76,11 +77,11 @@ def get_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves pipelines used for Logstash Central Management. + Get Logstash pipelines. Get pipelines that are used for Logstash Central Management. ``_ - :param id: Comma-separated list of pipeline identifiers. + :param id: A comma-separated list of pipeline identifiers. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: @@ -123,11 +124,12 @@ def put_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a pipeline used for Logstash Central Management. + Create or update a Logstash pipeline. Create a pipeline that is used for Logstash + Central Management. If the specified pipeline exists, it is replaced. ``_ - :param id: Identifier for the pipeline. + :param id: An identifier for the pipeline. :param pipeline: """ if id in SKIP_IN_PATH: diff --git a/elasticsearch/_sync/client/migration.py b/elasticsearch/_sync/client/migration.py index bb6f718d6..e11cf7ca0 100644 --- a/elasticsearch/_sync/client/migration.py +++ b/elasticsearch/_sync/client/migration.py @@ -36,9 +36,10 @@ def deprecations( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves information about different cluster, node, and index level settings - that use deprecated features that will be removed or changed in the next major - version. + Get deprecation information. Get information about different cluster, node, and + index level settings that use deprecated features that will be removed or changed + in the next major version. TIP: This APIs is designed for indirect use by the + Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. ``_ @@ -81,7 +82,11 @@ def get_feature_upgrade_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Find out whether system features need to be upgraded or not + Get feature migration information. Version upgrades sometimes require changes + to how features store configuration information and data in system indices. Check + which features need to be migrated and the status of any migrations that are + in progress. TIP: This API is designed for indirect use by the Upgrade Assistant. + We strongly recommend you use the Upgrade Assistant. ``_ """ @@ -116,7 +121,11 @@ def post_feature_upgrade( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Begin upgrades for system features + Start the feature migration. Version upgrades sometimes require changes to how + features store configuration information and data in system indices. This API + starts the automatic migration process. Some functionality might be temporarily + unavailable during the migration process. TIP: The API is designed for indirect + use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. ``_ """ diff --git a/elasticsearch/_sync/client/ml.py b/elasticsearch/_sync/client/ml.py index ebf72ef18..9115844dd 100644 --- a/elasticsearch/_sync/client/ml.py +++ b/elasticsearch/_sync/client/ml.py @@ -2488,6 +2488,7 @@ def get_trained_models( ], ] ] = None, + include_model_definition: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, tags: t.Optional[t.Union[str, t.Sequence[str]]] = None, @@ -2514,6 +2515,8 @@ def get_trained_models( :param from_: Skips the specified number of models. :param include: A comma delimited string of optional fields to include in the response body. + :param include_model_definition: parameter is deprecated! Use [include=definition] + instead :param size: Specifies the maximum number of models to obtain. :param tags: A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied @@ -2543,6 +2546,8 @@ def get_trained_models( __query["human"] = human if include is not None: __query["include"] = include + if include_model_definition is not None: + __query["include_model_definition"] = include_model_definition if pretty is not None: __query["pretty"] = pretty if size is not None: @@ -2697,7 +2702,7 @@ def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Return ML defaults and limits. Returns defaults and limits used by machine learning. + Get machine learning information. Get defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out @@ -3169,9 +3174,11 @@ def put_calendar_job( "description", "headers", "max_num_threads", + "meta", "model_memory_limit", "version", ), + parameter_aliases={"_meta": "meta"}, ignore_deprecated_options={"headers"}, ) def put_data_frame_analytics( @@ -3189,6 +3196,7 @@ def put_data_frame_analytics( headers: t.Optional[t.Mapping[str, t.Union[str, t.Sequence[str]]]] = None, human: t.Optional[bool] = None, max_num_threads: t.Optional[int] = None, + meta: t.Optional[t.Mapping[str, t.Any]] = None, model_memory_limit: t.Optional[str] = None, pretty: t.Optional[bool] = None, version: t.Optional[str] = None, @@ -3249,6 +3257,7 @@ def put_data_frame_analytics( Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. + :param meta: :param model_memory_limit: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs @@ -3293,6 +3302,8 @@ def put_data_frame_analytics( __body["headers"] = headers if max_num_threads is not None: __body["max_num_threads"] = max_num_threads + if meta is not None: + __body["_meta"] = meta if model_memory_limit is not None: __body["model_memory_limit"] = model_memory_limit if version is not None: @@ -3311,6 +3322,7 @@ def put_data_frame_analytics( @_rewrite_parameters( body_fields=( "aggregations", + "aggs", "chunking_config", "delayed_data_check_config", "frequency", @@ -3333,6 +3345,7 @@ def put_datafeed( *, datafeed_id: str, aggregations: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, + aggs: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, allow_no_indices: t.Optional[bool] = None, chunking_config: t.Optional[t.Mapping[str, t.Any]] = None, delayed_data_check_config: t.Optional[t.Mapping[str, t.Any]] = None, @@ -3386,6 +3399,8 @@ def put_datafeed( :param aggregations: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. + :param aggs: If set, the datafeed performs aggregation searches. Support for + aggregations is limited and should be used only with low cardinality data. :param allow_no_indices: If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. @@ -3473,6 +3488,8 @@ def put_datafeed( if not __body: if aggregations is not None: __body["aggregations"] = aggregations + if aggs is not None: + __body["aggs"] = aggs if chunking_config is not None: __body["chunking_config"] = chunking_config if delayed_data_check_config is not None: @@ -3595,6 +3612,7 @@ def put_job( analysis_config: t.Optional[t.Mapping[str, t.Any]] = None, data_description: t.Optional[t.Mapping[str, t.Any]] = None, allow_lazy_open: t.Optional[bool] = None, + allow_no_indices: t.Optional[bool] = None, analysis_limits: t.Optional[t.Mapping[str, t.Any]] = None, background_persist_interval: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] @@ -3604,9 +3622,19 @@ def put_job( datafeed_config: t.Optional[t.Mapping[str, t.Any]] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, + expand_wildcards: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] + ], + t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], + ] + ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, groups: t.Optional[t.Sequence[str]] = None, human: t.Optional[bool] = None, + ignore_throttled: t.Optional[bool] = None, + ignore_unavailable: t.Optional[bool] = None, model_plot_config: t.Optional[t.Mapping[str, t.Any]] = None, model_snapshot_retention_days: t.Optional[int] = None, pretty: t.Optional[bool] = None, @@ -3641,6 +3669,9 @@ def put_job( to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. + :param allow_no_indices: If `true`, wildcard indices expressions that resolve + into no concrete indices are ignored. This includes the `_all` string or + when no indices are specified. :param analysis_limits: Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for @@ -3664,7 +3695,20 @@ def put_job( using those same roles. If you provide secondary authorization headers, those credentials are used instead. :param description: A description of the job. + :param expand_wildcards: Type of index that wildcard patterns can match. If the + request can target data streams, this argument determines whether wildcard + expressions match hidden data streams. Supports comma-separated values. Valid + values are: * `all`: Match any data stream or index, including hidden ones. + * `closed`: Match closed, non-hidden indices. Also matches any non-hidden + data stream. Data streams cannot be closed. * `hidden`: Match hidden data + streams and hidden indices. Must be combined with `open`, `closed`, or both. + * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden + indices. Also matches any non-hidden data stream. :param groups: A list of job groups. A job can belong to no groups or many. + :param ignore_throttled: If `true`, concrete, expanded or aliased indices are + ignored when frozen. + :param ignore_unavailable: If `true`, unavailable indices (missing or closed) + are ignored. :param model_plot_config: This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance @@ -3704,12 +3748,20 @@ def put_job( __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} + if allow_no_indices is not None: + __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace + if expand_wildcards is not None: + __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if ignore_throttled is not None: + __query["ignore_throttled"] = ignore_throttled + if ignore_unavailable is not None: + __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if not __body: @@ -5469,7 +5521,7 @@ def validate_detector( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Validates an anomaly detection detector. + Validate an anomaly detection job. ``_ diff --git a/elasticsearch/_sync/client/monitoring.py b/elasticsearch/_sync/client/monitoring.py index 0a97e0202..2de29f47c 100644 --- a/elasticsearch/_sync/client/monitoring.py +++ b/elasticsearch/_sync/client/monitoring.py @@ -42,7 +42,8 @@ def bulk( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Used by the monitoring features to send monitoring data. + Send monitoring data. This API is used by the monitoring features to send monitoring + data. ``_ diff --git a/elasticsearch/_sync/client/rollup.py b/elasticsearch/_sync/client/rollup.py index 75d86e361..6dd9f386e 100644 --- a/elasticsearch/_sync/client/rollup.py +++ b/elasticsearch/_sync/client/rollup.py @@ -43,7 +43,20 @@ def delete_job( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing rollup job. + Delete a rollup job. A job must be stopped before it can be deleted. If you attempt + to delete a started job, an error occurs. Similarly, if you attempt to delete + a nonexistent job, an exception occurs. IMPORTANT: When you delete a job, you + remove only the process that is actively monitoring and rolling up data. The + API does not delete any previously rolled up data. This is by design; a user + may wish to roll up a static data set. Because the data set is static, after + it has been fully rolled up there is no need to keep the indexing rollup job + around (as there will be no new data). Thus the job can be deleted, leaving behind + the rolled up data for analysis. If you wish to also remove the rollup data and + the rollup index contains the data for only a single job, you can delete the + whole rollup index. If the rollup index stores data from several jobs, you must + issue a delete-by-query that targets the rollup job's identifier in the rollup + index. For example: ``` POST my_rollup_index/_delete_by_query { "query": { "term": + { "_rollup.id": "the_rollup_job_id" } } } ``` ``_ @@ -84,7 +97,11 @@ def get_jobs( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the configuration, stats, and status of rollup jobs. + Get rollup job information. Get the configuration, stats, and status of rollup + jobs. NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. + If a job was created, ran for a while, then was deleted, the API does not return + any details about it. For details about a historical rollup job, the rollup capabilities + API may be more useful. ``_ @@ -129,8 +146,15 @@ def get_rollup_caps( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the capabilities of any rollup jobs that have been configured for a specific - index or index pattern. + Get the rollup job capabilities. Get the capabilities of any rollup jobs that + have been configured for a specific index or index pattern. This API is useful + because a rollup job is often configured to rollup only a subset of fields from + the source index. Furthermore, only certain aggregations can be configured for + various fields, leading to a limited subset of functionality depending on that + configuration. This API enables you to inspect an index and determine: 1. Does + this index have associated rollup data somewhere in the cluster? 2. If yes to + the first question, what fields were rolled up, what aggregations can be performed, + and where does the data live? ``_ @@ -175,8 +199,12 @@ def get_rollup_index_caps( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the rollup capabilities of all jobs inside of a rollup index (for example, - the index where rollup data is stored). + Get the rollup index capabilities. Get the rollup capabilities of all jobs inside + of a rollup index. A single rollup index may store the data for multiple rollup + jobs and may have a variety of capabilities depending on those jobs. This API + enables you to determine: * What jobs are stored in an index (or indices specified + via a pattern)? * What target indices were rolled up, what fields were used in + those rollups, and what aggregations can be performed on each job? ``_ @@ -239,7 +267,16 @@ def put_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a rollup job. + Create a rollup job. WARNING: From 8.15.0, calling this API in a cluster with + no rollup usage will fail with a message about the deprecation and planned removal + of rollup features. A cluster needs to contain either a rollup job or a rollup + index in order for this API to be allowed to run. The rollup job configuration + contains all the details about how the job should run, when it indexes documents, + and what future queries will be able to run against the rollup index. There are + three main sections to the job configuration: the logistical details about the + job (for example, the cron schedule), the fields that are used for grouping, + and what metrics to collect for each group. Jobs are created in a `STOPPED` state. + You can start them with the start rollup jobs API. ``_ @@ -356,7 +393,11 @@ def rollup_search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Enables searching rolled-up data using the standard Query DSL. + Search rolled-up data. The rollup search endpoint is needed because, internally, + rolled-up documents utilize a different document structure than the original + data. It rewrites standard Query DSL into a format that matches the rollup documents + then takes the response and rewrites it back to what a client would expect given + the original query. ``_ @@ -420,7 +461,8 @@ def start_job( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts an existing, stopped rollup job. + Start rollup jobs. If you try to start a job that does not exist, an exception + occurs. If you try to start a job that is already started, nothing happens. ``_ @@ -463,7 +505,8 @@ def stop_job( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops an existing, started rollup job. + Stop rollup jobs. If you try to stop a job that does not exist, an exception + occurs. If you try to stop a job that is already stopped, nothing happens. ``_ diff --git a/elasticsearch/_sync/client/search_application.py b/elasticsearch/_sync/client/search_application.py index 42b042434..88e4b5531 100644 --- a/elasticsearch/_sync/client/search_application.py +++ b/elasticsearch/_sync/client/search_application.py @@ -216,7 +216,7 @@ def list( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the existing search applications. + Get search applications. Get information about search applications. ``_ @@ -251,6 +251,71 @@ def list( path_parts=__path_parts, ) + @_rewrite_parameters( + body_name="payload", + ) + @_stability_warning(Stability.EXPERIMENTAL) + def post_behavioral_analytics_event( + self, + *, + collection_name: str, + event_type: t.Union[str, t.Literal["page_view", "search", "search_click"]], + payload: t.Optional[t.Any] = None, + body: t.Optional[t.Any] = None, + debug: t.Optional[bool] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Create a behavioral analytics collection event. + + ``_ + + :param collection_name: The name of the behavioral analytics collection. + :param event_type: The analytics event type. + :param payload: + :param debug: Whether the response type has to include more details + """ + if collection_name in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'collection_name'") + if event_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'event_type'") + if payload is None and body is None: + raise ValueError( + "Empty value passed for parameters 'payload' and 'body', one of them should be set." + ) + elif payload is not None and body is not None: + raise ValueError("Cannot set both 'payload' and 'body'") + __path_parts: t.Dict[str, str] = { + "collection_name": _quote(collection_name), + "event_type": _quote(event_type), + } + __path = f'/_application/analytics/{__path_parts["collection_name"]}/event/{__path_parts["event_type"]}' + __query: t.Dict[str, t.Any] = {} + if debug is not None: + __query["debug"] = debug + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __body = payload if payload is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="search_application.post_behavioral_analytics_event", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_name="search_application", ) @@ -351,6 +416,70 @@ def put_behavioral_analytics( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("params",), + ignore_deprecated_options={"params"}, + ) + @_stability_warning(Stability.EXPERIMENTAL) + def render_query( + self, + *, + name: str, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + params: t.Optional[t.Mapping[str, t.Any]] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Render a search application query. Generate an Elasticsearch query using the + specified query parameters and the search template associated with the search + application or a default template if none is specified. If a parameter used in + the search template is not specified in `params`, the parameter's default value + will be used. The API returns the specific Elasticsearch query that would be + generated and run by calling the search application search API. You must have + `read` privileges on the backing alias of the search application. + + ``_ + + :param name: The name of the search application to render teh query for. + :param params: + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'name'") + __path_parts: t.Dict[str, str] = {"name": _quote(name)} + __path = ( + f'/_application/search_application/{__path_parts["name"]}/_render_query' + ) + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if params is not None: + __body["params"] = params + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="search_application.render_query", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=("params",), ignore_deprecated_options={"params"}, diff --git a/elasticsearch/_sync/client/searchable_snapshots.py b/elasticsearch/_sync/client/searchable_snapshots.py index 45104eefc..b4acbbef3 100644 --- a/elasticsearch/_sync/client/searchable_snapshots.py +++ b/elasticsearch/_sync/client/searchable_snapshots.py @@ -44,7 +44,8 @@ def cache_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieve node-level cache statistics about searchable snapshots. + Get cache statistics. Get statistics about the shared cache for partially mounted + indices. ``_ @@ -103,7 +104,8 @@ def clear_cache( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear the cache of searchable snapshots. + Clear the cache. Clear indices and data streams from the shared cache for partially + mounted indices. ``_ @@ -175,7 +177,9 @@ def mount( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Mount a snapshot as a searchable index. + Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use + this API for snapshots managed by index lifecycle management (ILM). Manually + mounting ILM-managed snapshots can interfere with ILM processes. ``_ @@ -255,7 +259,7 @@ def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieve shard-level statistics about searchable snapshots. + Get searchable snapshot statistics. ``_ diff --git a/elasticsearch/_sync/client/security.py b/elasticsearch/_sync/client/security.py index c139f2868..5c962f5f8 100644 --- a/elasticsearch/_sync/client/security.py +++ b/elasticsearch/_sync/client/security.py @@ -2326,6 +2326,230 @@ def invalidate_token( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("nonce", "redirect_uri", "state", "realm"), + ) + def oidc_authenticate( + self, + *, + nonce: t.Optional[str] = None, + redirect_uri: t.Optional[str] = None, + state: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + realm: t.Optional[str] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Authenticate OpenID Connect. Exchange an OpenID Connect authentication response + message for an Elasticsearch internal access token and refresh token that can + be subsequently used for authentication. Elasticsearch exposes all the necessary + OpenID Connect related functionality with the OpenID Connect APIs. These APIs + are used internally by Kibana in order to provide OpenID Connect based authentication, + but can also be used by other, custom web applications or other clients. + + ``_ + + :param nonce: Associate a client session with an ID token and mitigate replay + attacks. This value needs to be the same as the one that was provided to + the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch + and included in the response to that call. + :param redirect_uri: The URL to which the OpenID Connect Provider redirected + the User Agent in response to an authentication request after a successful + authentication. This URL must be provided as-is (URL encoded), taken from + the body of the response or as the value of a location header in the response + from the OpenID Connect Provider. + :param state: Maintain state between the authentication request and the response. + This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` + API or the one that was generated by Elasticsearch and included in the response + to that call. + :param realm: The name of the OpenID Connect realm. This property is useful in + cases where multiple realms are defined. + """ + if nonce is None and body is None: + raise ValueError("Empty value passed for parameter 'nonce'") + if redirect_uri is None and body is None: + raise ValueError("Empty value passed for parameter 'redirect_uri'") + if state is None and body is None: + raise ValueError("Empty value passed for parameter 'state'") + __path_parts: t.Dict[str, str] = {} + __path = "/_security/oidc/authenticate" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if nonce is not None: + __body["nonce"] = nonce + if redirect_uri is not None: + __body["redirect_uri"] = redirect_uri + if state is not None: + __body["state"] = state + if realm is not None: + __body["realm"] = realm + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.oidc_authenticate", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("access_token", "refresh_token"), + ) + def oidc_logout( + self, + *, + access_token: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + refresh_token: t.Optional[str] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Logout of OpenID Connect. Invalidate an access token and a refresh token that + were generated as a response to the `/_security/oidc/authenticate` API. If the + OpenID Connect authentication realm in Elasticsearch is accordingly configured, + the response to this call will contain a URI pointing to the end session endpoint + of the OpenID Connect Provider in order to perform single logout. Elasticsearch + exposes all the necessary OpenID Connect related functionality with the OpenID + Connect APIs. These APIs are used internally by Kibana in order to provide OpenID + Connect based authentication, but can also be used by other, custom web applications + or other clients. + + ``_ + + :param access_token: The access token to be invalidated. + :param refresh_token: The refresh token to be invalidated. + """ + if access_token is None and body is None: + raise ValueError("Empty value passed for parameter 'access_token'") + __path_parts: t.Dict[str, str] = {} + __path = "/_security/oidc/logout" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if access_token is not None: + __body["access_token"] = access_token + if refresh_token is not None: + __body["refresh_token"] = refresh_token + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.oidc_logout", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("iss", "login_hint", "nonce", "realm", "state"), + ) + def oidc_prepare_authentication( + self, + *, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + iss: t.Optional[str] = None, + login_hint: t.Optional[str] = None, + nonce: t.Optional[str] = None, + pretty: t.Optional[bool] = None, + realm: t.Optional[str] = None, + state: t.Optional[str] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Prepare OpenID connect authentication. Create an oAuth 2.0 authentication request + as a URL string based on the configuration of the OpenID Connect authentication + realm in Elasticsearch. The response of this API is a URL pointing to the Authorization + Endpoint of the configured OpenID Connect Provider, which can be used to redirect + the browser of the user in order to continue the authentication process. Elasticsearch + exposes all the necessary OpenID Connect related functionality with the OpenID + Connect APIs. These APIs are used internally by Kibana in order to provide OpenID + Connect based authentication, but can also be used by other, custom web applications + or other clients. + + ``_ + + :param iss: In the case of a third party initiated single sign on, this is the + issuer identifier for the OP that the RP is to send the authentication request + to. It cannot be specified when *realm* is specified. One of *realm* or *iss* + is required. + :param login_hint: In the case of a third party initiated single sign on, it + is a string value that is included in the authentication request as the *login_hint* + parameter. This parameter is not valid when *realm* is specified. + :param nonce: The value used to associate a client session with an ID token and + to mitigate replay attacks. If the caller of the API does not provide a value, + Elasticsearch will generate one with sufficient entropy and return it in + the response. + :param realm: The name of the OpenID Connect realm in Elasticsearch the configuration + of which should be used in order to generate the authentication request. + It cannot be specified when *iss* is specified. One of *realm* or *iss* is + required. + :param state: The value used to maintain state between the authentication request + and the response, typically used as a Cross-Site Request Forgery mitigation. + If the caller of the API does not provide a value, Elasticsearch will generate + one with sufficient entropy and return it in the response. + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_security/oidc/prepare" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if iss is not None: + __body["iss"] = iss + if login_hint is not None: + __body["login_hint"] = login_hint + if nonce is not None: + __body["nonce"] = nonce + if realm is not None: + __body["realm"] = realm + if state is not None: + __body["state"] = state + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.oidc_prepare_authentication", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_name="privileges", ) diff --git a/elasticsearch/_sync/client/shutdown.py b/elasticsearch/_sync/client/shutdown.py index e08eb469a..bfa561089 100644 --- a/elasticsearch/_sync/client/shutdown.py +++ b/elasticsearch/_sync/client/shutdown.py @@ -42,8 +42,13 @@ def delete_node( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes a node from the shutdown list. Designed for indirect use by ECE/ESS and - ECK. Direct use is not supported. + Cancel node shutdown preparations. Remove a node from the shutdown list so it + can resume normal operations. You must explicitly clear the shutdown request + when a node rejoins the cluster or when a node has permanently left the cluster. + Shutdown requests are never removed automatically by Elasticsearch. NOTE: This + feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, + and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator + privileges feature is enabled, you must be an operator to use this API. ``_ @@ -98,8 +103,13 @@ def get_node( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieve status of a node or nodes that are currently marked as shutting down. - Designed for indirect use by ECE/ESS and ECK. Direct use is not supported. + Get the shutdown status. Get information about nodes that are ready to be shut + down, have shut down preparations still in progress, or have stalled. The API + returns status information for each part of the shut down process. NOTE: This + feature is designed for indirect use by Elasticsearch Service, Elastic Cloud + Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If + the operator privileges feature is enabled, you must be an operator to use this + API. ``_ @@ -166,8 +176,17 @@ def put_node( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Adds a node to be shut down. Designed for indirect use by ECE/ESS and ECK. Direct - use is not supported. + Prepare a node to be shut down. NOTE: This feature is designed for indirect use + by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. + Direct use is not supported. If the operator privileges feature is enabled, you + must be an operator to use this API. The API migrates ongoing tasks and index + shards to other nodes as needed to prepare a node to be restarted or shut down + and removed from the cluster. This ensures that Elasticsearch can be stopped + safely with minimal disruption to the cluster. You must specify the type of shutdown: + `restart`, `remove`, or `replace`. If a node is already being prepared for shutdown, + you can use this API to change the shutdown type. IMPORTANT: This API does NOT + terminate the Elasticsearch process. Monitor the node shutdown status to determine + when it is safe to stop Elasticsearch. ``_ diff --git a/elasticsearch/_sync/client/slm.py b/elasticsearch/_sync/client/slm.py index 5b6054d72..024264344 100644 --- a/elasticsearch/_sync/client/slm.py +++ b/elasticsearch/_sync/client/slm.py @@ -36,7 +36,9 @@ def delete_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes an existing snapshot lifecycle policy. + Delete a policy. Delete a snapshot lifecycle policy definition. This operation + prevents any future snapshots from being taken but does not cancel in-progress + snapshots or remove previously-taken snapshots. ``_ @@ -76,8 +78,10 @@ def execute_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Immediately creates a snapshot according to the lifecycle policy, without waiting - for the scheduled time. + Run a policy. Immediately create a snapshot according to the snapshot lifecycle + policy without waiting for the scheduled time. The snapshot policy is normally + applied according to its schedule, but you might want to manually run a policy + before performing an upgrade or other maintenance. ``_ @@ -116,7 +120,9 @@ def execute_retention( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes any snapshots that are expired according to the policy's retention rules. + Run a retention policy. Manually apply the retention policy to force immediate + removal of snapshots that are expired according to the snapshot lifecycle policy + retention rules. The retention policy is normally applied according to its schedule. ``_ """ @@ -152,8 +158,8 @@ def get_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves one or more snapshot lifecycle policy definitions and information about - the latest snapshot attempts. + Get policy information. Get snapshot lifecycle policy definitions and information + about the latest snapshot attempts. ``_ @@ -195,8 +201,8 @@ def get_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns global and policy-level statistics about actions taken by snapshot lifecycle - management. + Get snapshot lifecycle management statistics. Get global and policy-level statistics + about actions taken by snapshot lifecycle management. ``_ """ @@ -231,7 +237,7 @@ def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the status of snapshot lifecycle management (SLM). + Get the snapshot lifecycle management status. ``_ """ @@ -277,12 +283,14 @@ def put_lifecycle( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates or updates a snapshot lifecycle policy. + Create or update a policy. Create or update a snapshot lifecycle policy. If the + policy already exists, this request increments the policy version. Only the latest + version of a policy is stored. ``_ - :param policy_id: ID for the snapshot lifecycle policy you want to create or - update. + :param policy_id: The identifier for the snapshot lifecycle policy you want to + create or update. :param config: Configuration for each snapshot created by the policy. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -354,7 +362,9 @@ def start( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Turns on snapshot lifecycle management (SLM). + Start snapshot lifecycle management. Snapshot lifecycle management (SLM) starts + automatically when a cluster is formed. Manually starting SLM is necessary only + if it has been stopped using the stop SLM API. ``_ """ @@ -389,7 +399,15 @@ def stop( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Turns off snapshot lifecycle management (SLM). + Stop snapshot lifecycle management. Stop all snapshot lifecycle management (SLM) + operations and the SLM plugin. This API is useful when you are performing maintenance + on a cluster and need to prevent SLM from performing any actions on your data + streams or indices. Stopping SLM does not stop any snapshots that are in progress. + You can manually trigger snapshots with the run snapshot lifecycle policy API + even if SLM is stopped. The API returns a response as soon as the request is + acknowledged, but the plugin might continue to run until in-progress operations + complete and it can be safely stopped. Use the get snapshot lifecycle management + status API to see if SLM is running. ``_ """ diff --git a/elasticsearch/_sync/client/snapshot.py b/elasticsearch/_sync/client/snapshot.py index e65dee0ec..8d6665239 100644 --- a/elasticsearch/_sync/client/snapshot.py +++ b/elasticsearch/_sync/client/snapshot.py @@ -44,8 +44,8 @@ def cleanup_repository( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Triggers the review of a snapshot repository’s contents and deletes any stale - data not referenced by existing snapshots. + Clean up the snapshot repository. Trigger the review of the contents of a snapshot + repository and delete any stale data not referenced by existing snapshots. ``_ @@ -99,7 +99,8 @@ def clone( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clones indices from one snapshot into another snapshot in the same repository. + Clone a snapshot. Clone part of all of a snapshot into another snapshot in the + same repository. ``_ @@ -182,7 +183,7 @@ def create( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a snapshot in a repository. + Create a snapshot. Take a snapshot of a cluster or of data streams and indices. ``_ @@ -286,7 +287,11 @@ def create_repository( verify: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a repository. + Create or update a snapshot repository. IMPORTANT: If you are migrating searchable + snapshots, the repository name must be identical in the source and destination + clusters. To register a snapshot repository, the cluster's global metadata must + be writeable. Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` + and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. ``_ @@ -346,7 +351,7 @@ def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes one or more snapshots. + Delete snapshots. ``_ @@ -397,7 +402,9 @@ def delete_repository( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a repository. + Delete snapshot repositories. When a repository is unregistered, Elasticsearch + removes only the reference to the location where the repository is storing the + snapshots. The snapshots themselves are left untouched and in place. ``_ @@ -471,7 +478,7 @@ def get( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about a snapshot. + Get snapshot information. ``_ @@ -583,7 +590,7 @@ def get_repository( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about a repository. + Get snapshot repository information. ``_ @@ -642,7 +649,40 @@ def repository_verify_integrity( verify_blob_contents: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Verifies the integrity of the contents of a snapshot repository + Verify the repository integrity. Verify the integrity of the contents of a snapshot + repository. This API enables you to perform a comprehensive check of the contents + of a repository, looking for any anomalies in its data or metadata which might + prevent you from restoring snapshots from the repository or which might cause + future snapshot create or delete operations to fail. If you suspect the integrity + of the contents of one of your snapshot repositories, cease all write activity + to this repository immediately, set its `read_only` option to `true`, and use + this API to verify its integrity. Until you do so: * It may not be possible to + restore some snapshots from this repository. * Searchable snapshots may report + errors when searched or may have unassigned shards. * Taking snapshots into this + repository may fail or may appear to succeed but have created a snapshot which + cannot be restored. * Deleting snapshots from this repository may fail or may + appear to succeed but leave the underlying data on disk. * Continuing to write + to the repository while it is in an invalid state may causing additional damage + to its contents. If the API finds any problems with the integrity of the contents + of your repository, Elasticsearch will not be able to repair the damage. The + only way to bring the repository back into a fully working state after its contents + have been damaged is by restoring its contents from a repository backup which + was taken before the damage occurred. You must also identify what caused the + damage and take action to prevent it from happening again. If you cannot restore + a repository backup, register a new repository and use this for all future snapshot + operations. In some cases it may be possible to recover some of the contents + of a damaged repository, either by restoring as many of its snapshots as needed + and taking new snapshots of the restored data, or by using the reindex API to + copy data from any searchable snapshots mounted from the damaged repository. + Avoid all operations which write to the repository while the verify repository + integrity API is running. If something changes the repository contents while + an integrity verification is running then Elasticsearch may incorrectly report + having detected some anomalies in its contents due to the concurrent writes. + It may also incorrectly fail to report some anomalies that the concurrent writes + prevented it from detecting. NOTE: This API is intended for exploratory use by + humans. You should expect the request parameters and the response format to vary + in future versions. NOTE: This API may not work correctly in a mixed-version + cluster. ``_ @@ -739,7 +779,20 @@ def restore( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Restores a snapshot. + Restore a snapshot. Restore a snapshot of a cluster or data streams and indices. + You can restore a snapshot only to a running cluster with an elected master node. + The snapshot repository must be registered and available to the cluster. The + snapshot and cluster versions must be compatible. To restore a snapshot, the + cluster's global metadata must be writable. Ensure there are't any cluster blocks + that prevent writes. The restore operation ignores index blocks. Before you restore + a data stream, ensure the cluster contains a matching index template with data + streams enabled. To check, use the index management feature in Kibana or the + get index template API: ``` GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream + ``` If no such template exists, you can create one or restore a cluster state + that contains one. Without a matching index template, a data stream can't roll + over or create backing indices. If your snapshot contains data from App Search + or Workplace Search, you must restore the Enterprise Search encryption key before + you restore the snapshot. ``_ @@ -832,7 +885,18 @@ def status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns information about the status of a snapshot. + Get the snapshot status. Get a detailed description of the current state for + each shard participating in the snapshot. Note that this API should be used only + to obtain detailed shard-level information for ongoing snapshots. If this detail + is not needed or you want to obtain information about one or more existing snapshots, + use the get snapshot API. WARNING: Using the API to return the status of any + snapshots other than currently running snapshots can be expensive. The API requires + a read from the repository for each shard in each snapshot. For example, if you + have 100 snapshots with 1,000 shards each, an API request that includes all snapshots + will require 100,000 reads (100 snapshots x 1,000 shards). Depending on the latency + of your storage, such requests can take an extremely long time to return results. + These requests can also tax machine resources and, when using cloud storage, + incur high processing costs. ``_ @@ -891,7 +955,8 @@ def verify_repository( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Verifies a repository. + Verify a snapshot repository. Check for common misconfigurations in a snapshot + repository. ``_ diff --git a/elasticsearch/_sync/client/tasks.py b/elasticsearch/_sync/client/tasks.py index ab15a6c0c..89f210a88 100644 --- a/elasticsearch/_sync/client/tasks.py +++ b/elasticsearch/_sync/client/tasks.py @@ -47,7 +47,17 @@ def cancel( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Cancels a task, if it can be cancelled through an API. + Cancel a task. A task may continue to run for some time after it has been cancelled + because it may not be able to safely stop its current activity straight away. + It is also possible that Elasticsearch must complete its work on other tasks + before it can process the cancellation. The get task information API will continue + to list these cancelled tasks until they complete. The cancelled flag in the + response indicates that the cancellation command has been processed and the task + will stop as soon as possible. To troubleshoot why a cancelled task does not + complete promptly, use the get task information API with the `?detailed` parameter + to identify the other tasks the system is running. You can also use the node + hot threads API to obtain detailed information about the work the system is doing + instead of completing the cancelled task. ``_ @@ -107,8 +117,7 @@ def get( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get task information. Returns information about the tasks currently executing - in the cluster. + Get task information. Get information about a task currently running in the cluster. ``_ @@ -166,15 +175,16 @@ def list( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - The task management API returns information about tasks currently executing on - one or more nodes in the cluster. + Get all tasks. Get information about the tasks currently running on one or more + nodes in the cluster. ``_ :param actions: Comma-separated list or wildcard expression of actions used to limit the request. :param detailed: If `true`, the response includes detailed information about - shard recoveries. + shard recoveries. This information is useful to distinguish tasks from each + other but is more costly to run. :param group_by: Key used to group tasks in the response. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and diff --git a/elasticsearch/_sync/client/text_structure.py b/elasticsearch/_sync/client/text_structure.py index a3e118d8f..3a480c87b 100644 --- a/elasticsearch/_sync/client/text_structure.py +++ b/elasticsearch/_sync/client/text_structure.py @@ -25,6 +25,349 @@ class TextStructureClient(NamespacedClient): + @_rewrite_parameters() + def find_field_structure( + self, + *, + field: str, + index: str, + column_names: t.Optional[str] = None, + delimiter: t.Optional[str] = None, + documents_to_sample: t.Optional[int] = None, + ecs_compatibility: t.Optional[t.Union[str, t.Literal["disabled", "v1"]]] = None, + error_trace: t.Optional[bool] = None, + explain: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + format: t.Optional[ + t.Union[ + str, t.Literal["delimited", "ndjson", "semi_structured_text", "xml"] + ] + ] = None, + grok_pattern: t.Optional[str] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + quote: t.Optional[str] = None, + should_trim_fields: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + timestamp_field: t.Optional[str] = None, + timestamp_format: t.Optional[str] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Find the structure of a text field. Find the structure of a text field in an + Elasticsearch index. + + ``_ + + :param field: The field that should be analyzed. + :param index: The name of the index that contains the analyzed field. + :param column_names: If `format` is set to `delimited`, you can specify the column + names in a comma-separated list. If this parameter is not specified, the + structure finder uses the column names from the header row of the text. If + the text does not have a header row, columns are named "column1", "column2", + "column3", for example. + :param delimiter: If you have set `format` to `delimited`, you can specify the + character used to delimit the values in each row. Only a single character + is supported; the delimiter cannot have multiple characters. By default, + the API considers the following possibilities: comma, tab, semi-colon, and + pipe (`|`). In this default scenario, all rows must have the same number + of fields for the delimited format to be detected. If you specify a delimiter, + up to 10% of the rows can have a different number of columns than the first + row. + :param documents_to_sample: The number of documents to include in the structural + analysis. The minimum value is 2. + :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns. + Use this parameter to specify whether to use ECS Grok patterns instead of + legacy ones when the structure finder creates a Grok pattern. This setting + primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` + matches the input. If the structure finder identifies a common structure + but has no idea of the meaning then generic field names such as `path`, `ipaddress`, + `field1`, and `field2` are used in the `grok_pattern` output. The intention + in that situation is that a user who knows the meanings will rename the fields + before using them. + :param explain: If true, the response includes a field named `explanation`, which + is an array of strings that indicate how the structure finder produced its + result. + :param format: The high level structure of the text. By default, the API chooses + the format. In this default scenario, all rows must have the same number + of fields for a delimited format to be detected. If the format is set to + delimited and the delimiter is not set, however, the API tolerates up to + 5% of rows that have a different number of columns than the first row. + :param grok_pattern: If the format is `semi_structured_text`, you can specify + a Grok pattern that is used to extract fields from every message in the text. + The name of the timestamp field in the Grok pattern must match what is specified + in the `timestamp_field` parameter. If that parameter is not specified, the + name of the timestamp field in the Grok pattern must match "timestamp". If + `grok_pattern` is not specified, the structure finder creates a Grok pattern. + :param quote: If the format is `delimited`, you can specify the character used + to quote the values in each row if they contain newlines or the delimiter + character. Only a single character is supported. If this parameter is not + specified, the default value is a double quote (`"`). If your delimited text + format does not use quoting, a workaround is to set this argument to a character + that does not appear anywhere in the sample. + :param should_trim_fields: If the format is `delimited`, you can specify whether + values between delimiters should have whitespace trimmed from them. If this + parameter is not specified and the delimiter is pipe (`|`), the default value + is true. Otherwise, the default value is false. + :param timeout: The maximum amount of time that the structure analysis can take. + If the analysis is still running when the timeout expires, it will be stopped. + :param timestamp_field: The name of the field that contains the primary timestamp + of each record in the text. In particular, if the text was ingested into + an index, this is the field that would be used to populate the `@timestamp` + field. If the format is `semi_structured_text`, this field must match the + name of the appropriate extraction in the `grok_pattern`. Therefore, for + semi-structured text, it is best not to specify this parameter unless `grok_pattern` + is also specified. For structured text, if you specify this parameter, the + field must exist within the text. If this parameter is not specified, the + structure finder makes a decision about which field (if any) is the primary + timestamp field. For structured text, it is not compulsory to have a timestamp + in the text. + :param timestamp_format: The Java time format of the timestamp field in the text. + Only a subset of Java time format letter groups are supported: * `a` * `d` + * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` + * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter + groups (fractional seconds) of length one to nine are supported providing + they occur after `ss` and are separated from the `ss` by a period (`.`), + comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with + the exception a question mark (`?`), newline, and carriage return, together + with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS + 'in' yyyy` is a valid override format. One valuable use case for this parameter + is when the format is semi-structured text, there are multiple timestamp + formats in the text, and you know which format corresponds to the primary + timestamp, but you do not want to specify the full `grok_pattern`. Another + is when the timestamp format is one that the structure finder does not consider + by default. If this parameter is not specified, the structure finder chooses + the best format from a built-in set. If the special value `null` is specified, + the structure finder will not look for a primary timestamp in the text. When + the format is semi-structured text, this will result in the structure finder + treating the text as single-line messages. + """ + if field is None: + raise ValueError("Empty value passed for parameter 'field'") + if index is None: + raise ValueError("Empty value passed for parameter 'index'") + __path_parts: t.Dict[str, str] = {} + __path = "/_text_structure/find_field_structure" + __query: t.Dict[str, t.Any] = {} + if field is not None: + __query["field"] = field + if index is not None: + __query["index"] = index + if column_names is not None: + __query["column_names"] = column_names + if delimiter is not None: + __query["delimiter"] = delimiter + if documents_to_sample is not None: + __query["documents_to_sample"] = documents_to_sample + if ecs_compatibility is not None: + __query["ecs_compatibility"] = ecs_compatibility + if error_trace is not None: + __query["error_trace"] = error_trace + if explain is not None: + __query["explain"] = explain + if filter_path is not None: + __query["filter_path"] = filter_path + if format is not None: + __query["format"] = format + if grok_pattern is not None: + __query["grok_pattern"] = grok_pattern + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if quote is not None: + __query["quote"] = quote + if should_trim_fields is not None: + __query["should_trim_fields"] = should_trim_fields + if timeout is not None: + __query["timeout"] = timeout + if timestamp_field is not None: + __query["timestamp_field"] = timestamp_field + if timestamp_format is not None: + __query["timestamp_format"] = timestamp_format + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="text_structure.find_field_structure", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("messages",), + ) + def find_message_structure( + self, + *, + messages: t.Optional[t.Sequence[str]] = None, + column_names: t.Optional[str] = None, + delimiter: t.Optional[str] = None, + ecs_compatibility: t.Optional[t.Union[str, t.Literal["disabled", "v1"]]] = None, + error_trace: t.Optional[bool] = None, + explain: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + format: t.Optional[ + t.Union[ + str, t.Literal["delimited", "ndjson", "semi_structured_text", "xml"] + ] + ] = None, + grok_pattern: t.Optional[str] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + quote: t.Optional[str] = None, + should_trim_fields: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + timestamp_field: t.Optional[str] = None, + timestamp_format: t.Optional[str] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Find the structure of text messages. Find the structure of a list of text messages. + The messages must contain data that is suitable to be ingested into Elasticsearch. + This API provides a starting point for ingesting data into Elasticsearch in a + format that is suitable for subsequent use with other Elastic Stack functionality. + Use this API rather than the find text structure API if your input text has already + been split up into separate messages by some other process. The response from + the API contains: * Sample messages. * Statistics that reveal the most common + values for all fields detected within the text and basic numeric statistics for + numeric fields. * Information about the structure of the text, which is useful + when you write ingest configurations to index it or similarly formatted text. + Appropriate mappings for an Elasticsearch index, which you could use to ingest + the text. All this information can be calculated by the structure finder with + no guidance. However, you can optionally override some of the decisions about + the text structure by specifying one or more query parameters. + + ``_ + + :param messages: The list of messages you want to analyze. + :param column_names: If the format is `delimited`, you can specify the column + names in a comma-separated list. If this parameter is not specified, the + structure finder uses the column names from the header row of the text. If + the text does not have a header role, columns are named "column1", "column2", + "column3", for example. + :param delimiter: If you the format is `delimited`, you can specify the character + used to delimit the values in each row. Only a single character is supported; + the delimiter cannot have multiple characters. By default, the API considers + the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this + default scenario, all rows must have the same number of fields for the delimited + format to be detected. If you specify a delimiter, up to 10% of the rows + can have a different number of columns than the first row. + :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns. + Use this parameter to specify whether to use ECS Grok patterns instead of + legacy ones when the structure finder creates a Grok pattern. This setting + primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` + matches the input. If the structure finder identifies a common structure + but has no idea of meaning then generic field names such as `path`, `ipaddress`, + `field1`, and `field2` are used in the `grok_pattern` output, with the intention + that a user who knows the meanings rename these fields before using it. + :param explain: If this parameter is set to true, the response includes a field + named `explanation`, which is an array of strings that indicate how the structure + finder produced its result. + :param format: The high level structure of the text. By default, the API chooses + the format. In this default scenario, all rows must have the same number + of fields for a delimited format to be detected. If the format is `delimited` + and the delimiter is not set, however, the API tolerates up to 5% of rows + that have a different number of columns than the first row. + :param grok_pattern: If the format is `semi_structured_text`, you can specify + a Grok pattern that is used to extract fields from every message in the text. + The name of the timestamp field in the Grok pattern must match what is specified + in the `timestamp_field` parameter. If that parameter is not specified, the + name of the timestamp field in the Grok pattern must match "timestamp". If + `grok_pattern` is not specified, the structure finder creates a Grok pattern. + :param quote: If the format is `delimited`, you can specify the character used + to quote the values in each row if they contain newlines or the delimiter + character. Only a single character is supported. If this parameter is not + specified, the default value is a double quote (`"`). If your delimited text + format does not use quoting, a workaround is to set this argument to a character + that does not appear anywhere in the sample. + :param should_trim_fields: If the format is `delimited`, you can specify whether + values between delimiters should have whitespace trimmed from them. If this + parameter is not specified and the delimiter is pipe (`|`), the default value + is true. Otherwise, the default value is false. + :param timeout: The maximum amount of time that the structure analysis can take. + If the analysis is still running when the timeout expires, it will be stopped. + :param timestamp_field: The name of the field that contains the primary timestamp + of each record in the text. In particular, if the text was ingested into + an index, this is the field that would be used to populate the `@timestamp` + field. If the format is `semi_structured_text`, this field must match the + name of the appropriate extraction in the `grok_pattern`. Therefore, for + semi-structured text, it is best not to specify this parameter unless `grok_pattern` + is also specified. For structured text, if you specify this parameter, the + field must exist within the text. If this parameter is not specified, the + structure finder makes a decision about which field (if any) is the primary + timestamp field. For structured text, it is not compulsory to have a timestamp + in the text. + :param timestamp_format: The Java time format of the timestamp field in the text. + Only a subset of Java time format letter groups are supported: * `a` * `d` + * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` + * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter + groups (fractional seconds) of length one to nine are supported providing + they occur after `ss` and are separated from the `ss` by a period (`.`), + comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with + the exception a question mark (`?`), newline, and carriage return, together + with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS + 'in' yyyy` is a valid override format. One valuable use case for this parameter + is when the format is semi-structured text, there are multiple timestamp + formats in the text, and you know which format corresponds to the primary + timestamp, but you do not want to specify the full `grok_pattern`. Another + is when the timestamp format is one that the structure finder does not consider + by default. If this parameter is not specified, the structure finder chooses + the best format from a built-in set. If the special value `null` is specified, + the structure finder will not look for a primary timestamp in the text. When + the format is semi-structured text, this will result in the structure finder + treating the text as single-line messages. + """ + if messages is None and body is None: + raise ValueError("Empty value passed for parameter 'messages'") + __path_parts: t.Dict[str, str] = {} + __path = "/_text_structure/find_message_structure" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if column_names is not None: + __query["column_names"] = column_names + if delimiter is not None: + __query["delimiter"] = delimiter + if ecs_compatibility is not None: + __query["ecs_compatibility"] = ecs_compatibility + if error_trace is not None: + __query["error_trace"] = error_trace + if explain is not None: + __query["explain"] = explain + if filter_path is not None: + __query["filter_path"] = filter_path + if format is not None: + __query["format"] = format + if grok_pattern is not None: + __query["grok_pattern"] = grok_pattern + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if quote is not None: + __query["quote"] = quote + if should_trim_fields is not None: + __query["should_trim_fields"] = should_trim_fields + if timeout is not None: + __query["timeout"] = timeout + if timestamp_field is not None: + __query["timestamp_field"] = timestamp_field + if timestamp_format is not None: + __query["timestamp_format"] = timestamp_format + if not __body: + if messages is not None: + __body["messages"] = messages + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="text_structure.find_message_structure", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_name="text_files", ) @@ -50,8 +393,22 @@ def find_structure( timestamp_format: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Finds the structure of a text file. The text file must contain data that is suitable - to be ingested into Elasticsearch. + Find the structure of a text file. The text file must contain data that is suitable + to be ingested into Elasticsearch. This API provides a starting point for ingesting + data into Elasticsearch in a format that is suitable for subsequent use with + other Elastic Stack functionality. Unlike other Elasticsearch endpoints, the + data that is posted to this endpoint does not need to be UTF-8 encoded and in + JSON format. It must, however, be text; binary text formats are not currently + supported. The size is limited to the Elasticsearch HTTP receive buffer size, + which defaults to 100 Mb. The response from the API contains: * A couple of messages + from the beginning of the text. * Statistics that reveal the most common values + for all fields detected within the text and basic numeric statistics for numeric + fields. * Information about the structure of the text, which is useful when you + write ingest configurations to index it or similarly formatted text. * Appropriate + mappings for an Elasticsearch index, which you could use to ingest the text. + All this information can be calculated by the structure finder with no guidance. + However, you can optionally override some of the decisions about the text structure + by specifying one or more query parameters. ``_ @@ -64,7 +421,7 @@ def find_structure( column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", - "column3", etc. + "column3", for example. :param delimiter: If you have set format to delimited, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers @@ -76,7 +433,9 @@ def find_structure( (disabled or v1, default: disabled). :param explain: If this parameter is set to true, the response includes a field named explanation, which is an array of strings that indicate how the structure - finder produced its result. + finder produced its result. If the structure finder produces unexpected results + for some text, use this query parameter to help you determine why the returned + structure was chosen. :param format: The high level structure of the text. Valid values are ndjson, xml, delimited, and semi_structured_text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields @@ -114,9 +473,9 @@ def find_structure( whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (|), the default value is true. Otherwise, the default value is false. - :param timeout: Sets the maximum amount of time that the structure analysis make + :param timeout: Sets the maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires then it will - be aborted. + be stopped. :param timestamp_field: Optional parameter to specify the timestamp field in the file :param timestamp_format: The Java time format of the timestamp field in the text. @@ -191,7 +550,9 @@ def test_grok_pattern( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Tests a Grok pattern on some text. + Test a Grok pattern. Test a Grok pattern on one or more lines of text. The API + indicates whether the lines match the pattern together with the offsets and lengths + of the matched substrings. ``_ diff --git a/elasticsearch/_sync/client/transform.py b/elasticsearch/_sync/client/transform.py index fc5a64a4e..062e7ae3b 100644 --- a/elasticsearch/_sync/client/transform.py +++ b/elasticsearch/_sync/client/transform.py @@ -844,13 +844,20 @@ def upgrade_transforms( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Upgrades all transforms. This API identifies transforms that have a legacy configuration + Upgrade all transforms. Transforms are compatible across minor versions and between + supported major versions. However, over time, the format of transform configuration + information may change. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not affect the source and destination indices. The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains - unchanged. + unchanged. If a transform upgrade step fails, the upgrade stops and an error + is returned about the underlying issue. Resolve the issue then re-run the process + again. A summary is returned when the upgrade is finished. To ensure continuous + transforms remain running during a major version upgrade of the cluster – for + example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading + the cluster. You may want to perform a recent cluster backup prior to the upgrade. ``_ diff --git a/elasticsearch/_sync/client/watcher.py b/elasticsearch/_sync/client/watcher.py index 1b35a2f97..010fc26d8 100644 --- a/elasticsearch/_sync/client/watcher.py +++ b/elasticsearch/_sync/client/watcher.py @@ -37,7 +37,11 @@ def ack_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Acknowledges a watch, manually throttling the execution of the watch's actions. + Acknowledge a watch. Acknowledging a watch enables you to manually throttle the + execution of the watch's actions. The acknowledgement state of an action is stored + in the `status.actions..ack.state` structure. IMPORTANT: If the specified + watch is currently being executed, this API will return an error The reason for + this behavior is to prevent overwriting the watch status from a watch execution. ``_ @@ -88,7 +92,7 @@ def activate_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Activates a currently inactive watch. + Activate a watch. A watch can be either active or inactive. ``_ @@ -128,7 +132,7 @@ def deactivate_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deactivates a currently active watch. + Deactivate a watch. A watch can be either active or inactive. ``_ @@ -168,7 +172,13 @@ def delete_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Removes a watch from Watcher. + Delete a watch. When the watch is removed, the document representing the watch + in the `.watches` index is gone and it will never be run again. Deleting a watch + does not delete any watch execution records related to this watch from the watch + history. IMPORTANT: Deleting a watch must be done by using only this API. Do + not delete the watch directly from the `.watches` index using the Elasticsearch + delete document API When Elasticsearch security features are enabled, make sure + no write privileges are granted to anyone for the `.watches` index. ``_ @@ -237,13 +247,15 @@ def execute_watch( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - This API can be used to force execution of the watch outside of its triggering - logic or to simulate the watch execution for debugging purposes. For testing - and debugging purposes, you also have fine-grained control on how the watch runs. - You can execute the watch without executing all of its actions or alternatively + Run a watch. This API can be used to force execution of the watch outside of + its triggering logic or to simulate the watch execution for debugging purposes. + For testing and debugging purposes, you also have fine-grained control on how + the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after - execution. + it runs. You can use the run watch API to run watches that are not yet registered + by specifying the watch definition inline. This serves as great tool for testing + and debugging your watches prior to adding them to Watcher. ``_ @@ -326,7 +338,7 @@ def get_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves a watch by its ID. + Get a watch. ``_ @@ -388,7 +400,17 @@ def put_watch( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Creates a new watch, or updates an existing one. + Create or update a watch. When a watch is registered, a new document that represents + the watch is added to the `.watches` index and its trigger is immediately registered + with the relevant trigger engine. Typically for the `schedule` trigger, the scheduler + is the trigger engine. IMPORTANT: You must use Kibana or this API to create a + watch. Do not add a watch directly to the `.watches` index by using the Elasticsearch + index API. If Elasticsearch security features are enabled, do not give users + write privileges on the `.watches` index. When you add a watch you can also define + its initial active state by setting the *active* parameter. When Elasticsearch + security features are enabled, your watch can index or search only on indices + for which the user that stored the watch has privileges. If the user is able + to read index `a`, but not index `b`, the same will apply when the watch runs. ``_ @@ -485,7 +507,8 @@ def query_watches( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves stored watches. + Query watches. Get all registered watches in a paginated manner and optionally + filter watches by a query. ``_ @@ -555,7 +578,7 @@ def start( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Starts Watcher if it is not already running. + Start the watch service. Start the Watcher service if it is not already running. ``_ """ @@ -612,7 +635,7 @@ def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retrieves the current Watcher metrics. + Get Watcher statistics. ``_ @@ -658,7 +681,7 @@ def stop( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stops Watcher if it is running. + Stop the watch service. Stop the Watcher service if it is running. ``_ """ diff --git a/elasticsearch/_sync/client/xpack.py b/elasticsearch/_sync/client/xpack.py index f5bd3e897..bb81d554a 100644 --- a/elasticsearch/_sync/client/xpack.py +++ b/elasticsearch/_sync/client/xpack.py @@ -43,7 +43,10 @@ def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Provides general information about the installed X-Pack features. + Get information. The information provided by the API includes: * Build information + including the build number and timestamp. * License information about the currently + installed license. * Feature information for the features that are currently + enabled and available under the current license. ``_ @@ -87,8 +90,9 @@ def usage( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - This API provides information about which features are currently enabled and - available under the current license and some usage statistics. + Get usage information. Get information about the features that are currently + enabled and available under the current license. The API also provides some usage + statistics. ``_ From 72426e67e4c7cb64445b4fd443ee26a73340bba3 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 8 Jan 2025 16:26:06 +0400 Subject: [PATCH 17/65] Make pyarrow dependency optional for tests (#2733) (#2734) (cherry picked from commit d692c70e571f1311d04ac7b124c89b73d73606d4) Co-authored-by: danigm --- .../test_client/test_deprecated_options.py | 39 ++++++++------- .../test_client/test_serializers.py | 49 ++++++++----------- test_elasticsearch/test_serializer.py | 16 +++--- 3 files changed, 49 insertions(+), 55 deletions(-) diff --git a/test_elasticsearch/test_client/test_deprecated_options.py b/test_elasticsearch/test_client/test_deprecated_options.py index 810e75cf4..3b4e0b9ed 100644 --- a/test_elasticsearch/test_client/test_deprecated_options.py +++ b/test_elasticsearch/test_client/test_deprecated_options.py @@ -21,6 +21,23 @@ from elasticsearch import Elasticsearch, JsonSerializer +EXPECTED_SERIALIZERS = { + "application/vnd.mapbox-vector-tile", + "application/x-ndjson", + "application/json", + "text/*", + "application/vnd.elasticsearch+json", + "application/vnd.elasticsearch+x-ndjson", +} + + +try: + import pyarrow as pa + + EXPECTED_SERIALIZERS.add("application/vnd.apache.arrow.stream") +except ImportError: + pa = None + def test_sniff_on_connection_fail(): with warnings.catch_warnings(record=True) as w: @@ -129,15 +146,7 @@ class CustomSerializer(JsonSerializer): client.transport.serializers.get_serializer("application/json"), CustomSerializer, ) - assert set(client.transport.serializers.serializers.keys()) == { - "application/vnd.mapbox-vector-tile", - "application/x-ndjson", - "application/json", - "text/*", - "application/vnd.apache.arrow.stream", - "application/vnd.elasticsearch+json", - "application/vnd.elasticsearch+x-ndjson", - } + assert set(client.transport.serializers.serializers.keys()) == EXPECTED_SERIALIZERS client = Elasticsearch( "http://localhost:9200", @@ -150,13 +159,5 @@ class CustomSerializer(JsonSerializer): client.transport.serializers.get_serializer("application/json"), CustomSerializer, ) - assert set(client.transport.serializers.serializers.keys()) == { - "application/vnd.mapbox-vector-tile", - "application/x-ndjson", - "application/json", - "text/*", - "application/vnd.apache.arrow.stream", - "application/vnd.elasticsearch+json", - "application/vnd.elasticsearch+x-ndjson", - "application/cbor", - } + expected = EXPECTED_SERIALIZERS | {"application/cbor"} + assert set(client.transport.serializers.serializers.keys()) == expected diff --git a/test_elasticsearch/test_client/test_serializers.py b/test_elasticsearch/test_client/test_serializers.py index 9d13386ed..986160b92 100644 --- a/test_elasticsearch/test_client/test_serializers.py +++ b/test_elasticsearch/test_client/test_serializers.py @@ -20,6 +20,23 @@ from elasticsearch import Elasticsearch from test_elasticsearch.test_cases import DummyTransportTestCase +EXPECTED_SERIALIZERS = { + "application/json", + "text/*", + "application/x-ndjson", + "application/vnd.mapbox-vector-tile", + "application/vnd.elasticsearch+json", + "application/vnd.elasticsearch+x-ndjson", +} + + +try: + import pyarrow as pa + + EXPECTED_SERIALIZERS.add("application/vnd.apache.arrow.stream") +except ImportError: + pa = None + class TestSerializers(DummyTransportTestCase): def test_compat_mode_on_by_default(self): @@ -90,16 +107,8 @@ class CustomSerializer: "https://localhost:9200", serializers={f"application/{mime_subtype}": ser} ) serializers = client.transport.serializers.serializers - assert set(serializers.keys()) == { - "application/json", - "text/*", - "application/x-ndjson", - "application/vnd.apache.arrow.stream", - "application/vnd.mapbox-vector-tile", - "application/vnd.elasticsearch+json", - "application/vnd.elasticsearch+x-ndjson", - } + assert set(serializers.keys()) == EXPECTED_SERIALIZERS assert serializers[f"application/{mime_subtype}"] is ser assert serializers[f"application/vnd.elasticsearch+{mime_subtype}"] is ser @@ -118,16 +127,7 @@ class CustomSerializer: }, ) serializers = client.transport.serializers.serializers - assert set(serializers.keys()) == { - "application/json", - "text/*", - "application/x-ndjson", - "application/vnd.apache.arrow.stream", - "application/vnd.mapbox-vector-tile", - "application/vnd.elasticsearch+json", - "application/vnd.elasticsearch+x-ndjson", - } - + assert set(serializers.keys()) == EXPECTED_SERIALIZERS assert serializers[f"application/{mime_subtype}"] is ser1 assert serializers[f"application/vnd.elasticsearch+{mime_subtype}"] is ser2 @@ -138,15 +138,6 @@ class CustomSerializer: ser = CustomSerializer() client = Elasticsearch("https://localhost:9200", serializer=ser) serializers = client.transport.serializers.serializers - assert set(serializers.keys()) == { - "application/json", - "text/*", - "application/x-ndjson", - "application/vnd.apache.arrow.stream", - "application/vnd.mapbox-vector-tile", - "application/vnd.elasticsearch+json", - "application/vnd.elasticsearch+x-ndjson", - } - + assert set(serializers.keys()) == EXPECTED_SERIALIZERS assert serializers["application/json"] is ser assert serializers["application/vnd.elasticsearch+json"] is ser diff --git a/test_elasticsearch/test_serializer.py b/test_elasticsearch/test_serializer.py index 02723e8f4..21c6b94b5 100644 --- a/test_elasticsearch/test_serializer.py +++ b/test_elasticsearch/test_serializer.py @@ -19,9 +19,15 @@ from datetime import datetime from decimal import Decimal -import pyarrow as pa import pytest +try: + import pyarrow as pa + + from elasticsearch.serializer import PyArrowSerializer +except ImportError: + pa = None + try: import numpy as np import pandas as pd @@ -32,12 +38,7 @@ from elasticsearch import Elasticsearch from elasticsearch.exceptions import SerializationError -from elasticsearch.serializer import ( - JSONSerializer, - OrjsonSerializer, - PyArrowSerializer, - TextSerializer, -) +from elasticsearch.serializer import JSONSerializer, OrjsonSerializer, TextSerializer requires_numpy_and_pandas = pytest.mark.skipif( np is None or pd is None, reason="Test requires numpy and pandas to be available" @@ -163,6 +164,7 @@ def test_serializes_pandas_category(json_serializer): assert b'{"d":[1,2,3]}' == json_serializer.dumps({"d": cat}) +@pytest.mark.skipif(pa is None, reason="Test requires pyarrow to be available") def test_pyarrow_loads(): data = [ pa.array([1, 2, 3, 4]), From febb677fd1bcd80b2414e974c337809a578e807e Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 14 Jan 2025 11:32:06 +0400 Subject: [PATCH 18/65] Add simulate namespace (#2737) (#2738) (cherry picked from commit e4a9563566e47657e284b188ca179263e3989e04) Co-authored-by: Quentin Pradet --- docs/sphinx/api.rst | 1 + docs/sphinx/api/simulate.rst | 9 ++ elasticsearch/_async/client/__init__.py | 2 + elasticsearch/_async/client/simulate.py | 151 ++++++++++++++++++++++++ elasticsearch/_sync/client/__init__.py | 2 + elasticsearch/_sync/client/simulate.py | 151 ++++++++++++++++++++++++ 6 files changed, 316 insertions(+) create mode 100644 docs/sphinx/api/simulate.rst create mode 100644 elasticsearch/_async/client/simulate.py create mode 100644 elasticsearch/_sync/client/simulate.py diff --git a/docs/sphinx/api.rst b/docs/sphinx/api.rst index 979341034..61d3214e6 100644 --- a/docs/sphinx/api.rst +++ b/docs/sphinx/api.rst @@ -45,6 +45,7 @@ arguments are required for all calls. api/searchable-snapshots api/security api/shutdown + api/simulate api/snapshot-lifecycle-management api/snapshots api/snapshottable-features diff --git a/docs/sphinx/api/simulate.rst b/docs/sphinx/api/simulate.rst new file mode 100644 index 000000000..eabd3fc07 --- /dev/null +++ b/docs/sphinx/api/simulate.rst @@ -0,0 +1,9 @@ +.. _snapshot-lifecycle-management: + +Simulate +-------- +.. py:module:: elasticsearch.client + :no-index: + +.. autoclass:: SimulateClient + :members: diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index 802ec316f..a846119b5 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -70,6 +70,7 @@ from .searchable_snapshots import SearchableSnapshotsClient from .security import SecurityClient from .shutdown import ShutdownClient +from .simulate import SimulateClient from .slm import SlmClient from .snapshot import SnapshotClient from .sql import SqlClient @@ -465,6 +466,7 @@ def __init__( self.searchable_snapshots = SearchableSnapshotsClient(self) self.security = SecurityClient(self) self.slm = SlmClient(self) + self.simulate = SimulateClient(self) self.shutdown = ShutdownClient(self) self.sql = SqlClient(self) self.ssl = SslClient(self) diff --git a/elasticsearch/_async/client/simulate.py b/elasticsearch/_async/client/simulate.py new file mode 100644 index 000000000..c4beb9dda --- /dev/null +++ b/elasticsearch/_async/client/simulate.py @@ -0,0 +1,151 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import typing as t + +from elastic_transport import ObjectApiResponse + +from ._base import NamespacedClient +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) + + +class SimulateClient(NamespacedClient): + + @_rewrite_parameters( + body_fields=( + "docs", + "component_template_substitutions", + "index_template_subtitutions", + "mapping_addition", + "pipeline_substitutions", + ), + ) + @_stability_warning(Stability.EXPERIMENTAL) + async def ingest( + self, + *, + docs: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, + index: t.Optional[str] = None, + component_template_substitutions: t.Optional[ + t.Mapping[str, t.Mapping[str, t.Any]] + ] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + index_template_subtitutions: t.Optional[ + t.Mapping[str, t.Mapping[str, t.Any]] + ] = None, + mapping_addition: t.Optional[t.Mapping[str, t.Any]] = None, + pipeline: t.Optional[str] = None, + pipeline_substitutions: t.Optional[ + t.Mapping[str, t.Mapping[str, t.Any]] + ] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Simulate data ingestion. Run ingest pipelines against a set of provided documents, + optionally with substitute pipeline definitions, to simulate ingesting data into + an index. This API is meant to be used for troubleshooting or pipeline development, + as it does not actually index any data into Elasticsearch. The API runs the default + and final pipeline for that index against a set of documents provided in the + body of the request. If a pipeline contains a reroute processor, it follows that + reroute processor to the new index, running that index's pipelines as well the + same way that a non-simulated ingest would. No data is indexed into Elasticsearch. + Instead, the transformed document is returned, along with the list of pipelines + that have been run and the name of the index where the document would have been + indexed if this were not a simulation. The transformed document is validated + against the mappings that would apply to this index, and any validation error + is reported in the result. This API differs from the simulate pipeline API in + that you specify a single pipeline for that API, and it runs only that one pipeline. + The simulate pipeline API is more useful for developing a single pipeline, while + the simulate ingest API is more useful for troubleshooting the interaction of + the various pipelines that get applied when ingesting into an index. By default, + the pipeline definitions that are currently in the system are used. However, + you can supply substitute pipeline definitions in the body of the request. These + will be used in place of the pipeline definitions that are already in the system. + This can be used to replace existing pipeline definitions or to create new ones. + The pipeline substitutions are used only within this request. + + ``_ + + :param docs: Sample documents to test in the pipeline. + :param index: The index to simulate ingesting into. This value can be overridden + by specifying an index on each document. If you specify this parameter in + the request path, it is used for any documents that do not explicitly specify + an index argument. + :param component_template_substitutions: A map of component template names to + substitute component template definition objects. + :param index_template_subtitutions: A map of index template names to substitute + index template definition objects. + :param mapping_addition: + :param pipeline: The pipeline to use as the default pipeline. This value can + be used to override the default pipeline of the index. + :param pipeline_substitutions: Pipelines to test. If you don’t specify the `pipeline` + request path parameter, this parameter is required. If you specify both this + and the request path parameter, the API only uses the request path parameter. + """ + if docs is None and body is None: + raise ValueError("Empty value passed for parameter 'docs'") + __path_parts: t.Dict[str, str] + if index not in SKIP_IN_PATH: + __path_parts = {"index": _quote(index)} + __path = f'/_ingest/{__path_parts["index"]}/_simulate' + else: + __path_parts = {} + __path = "/_ingest/_simulate" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pipeline is not None: + __query["pipeline"] = pipeline + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if docs is not None: + __body["docs"] = docs + if component_template_substitutions is not None: + __body["component_template_substitutions"] = ( + component_template_substitutions + ) + if index_template_subtitutions is not None: + __body["index_template_subtitutions"] = index_template_subtitutions + if mapping_addition is not None: + __body["mapping_addition"] = mapping_addition + if pipeline_substitutions is not None: + __body["pipeline_substitutions"] = pipeline_substitutions + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="simulate.ingest", + path_parts=__path_parts, + ) diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index bebd41cd8..047404552 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -70,6 +70,7 @@ from .searchable_snapshots import SearchableSnapshotsClient from .security import SecurityClient from .shutdown import ShutdownClient +from .simulate import SimulateClient from .slm import SlmClient from .snapshot import SnapshotClient from .sql import SqlClient @@ -465,6 +466,7 @@ def __init__( self.searchable_snapshots = SearchableSnapshotsClient(self) self.security = SecurityClient(self) self.slm = SlmClient(self) + self.simulate = SimulateClient(self) self.shutdown = ShutdownClient(self) self.sql = SqlClient(self) self.ssl = SslClient(self) diff --git a/elasticsearch/_sync/client/simulate.py b/elasticsearch/_sync/client/simulate.py new file mode 100644 index 000000000..ac1f7cc90 --- /dev/null +++ b/elasticsearch/_sync/client/simulate.py @@ -0,0 +1,151 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import typing as t + +from elastic_transport import ObjectApiResponse + +from ._base import NamespacedClient +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) + + +class SimulateClient(NamespacedClient): + + @_rewrite_parameters( + body_fields=( + "docs", + "component_template_substitutions", + "index_template_subtitutions", + "mapping_addition", + "pipeline_substitutions", + ), + ) + @_stability_warning(Stability.EXPERIMENTAL) + def ingest( + self, + *, + docs: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, + index: t.Optional[str] = None, + component_template_substitutions: t.Optional[ + t.Mapping[str, t.Mapping[str, t.Any]] + ] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + index_template_subtitutions: t.Optional[ + t.Mapping[str, t.Mapping[str, t.Any]] + ] = None, + mapping_addition: t.Optional[t.Mapping[str, t.Any]] = None, + pipeline: t.Optional[str] = None, + pipeline_substitutions: t.Optional[ + t.Mapping[str, t.Mapping[str, t.Any]] + ] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Simulate data ingestion. Run ingest pipelines against a set of provided documents, + optionally with substitute pipeline definitions, to simulate ingesting data into + an index. This API is meant to be used for troubleshooting or pipeline development, + as it does not actually index any data into Elasticsearch. The API runs the default + and final pipeline for that index against a set of documents provided in the + body of the request. If a pipeline contains a reroute processor, it follows that + reroute processor to the new index, running that index's pipelines as well the + same way that a non-simulated ingest would. No data is indexed into Elasticsearch. + Instead, the transformed document is returned, along with the list of pipelines + that have been run and the name of the index where the document would have been + indexed if this were not a simulation. The transformed document is validated + against the mappings that would apply to this index, and any validation error + is reported in the result. This API differs from the simulate pipeline API in + that you specify a single pipeline for that API, and it runs only that one pipeline. + The simulate pipeline API is more useful for developing a single pipeline, while + the simulate ingest API is more useful for troubleshooting the interaction of + the various pipelines that get applied when ingesting into an index. By default, + the pipeline definitions that are currently in the system are used. However, + you can supply substitute pipeline definitions in the body of the request. These + will be used in place of the pipeline definitions that are already in the system. + This can be used to replace existing pipeline definitions or to create new ones. + The pipeline substitutions are used only within this request. + + ``_ + + :param docs: Sample documents to test in the pipeline. + :param index: The index to simulate ingesting into. This value can be overridden + by specifying an index on each document. If you specify this parameter in + the request path, it is used for any documents that do not explicitly specify + an index argument. + :param component_template_substitutions: A map of component template names to + substitute component template definition objects. + :param index_template_subtitutions: A map of index template names to substitute + index template definition objects. + :param mapping_addition: + :param pipeline: The pipeline to use as the default pipeline. This value can + be used to override the default pipeline of the index. + :param pipeline_substitutions: Pipelines to test. If you don’t specify the `pipeline` + request path parameter, this parameter is required. If you specify both this + and the request path parameter, the API only uses the request path parameter. + """ + if docs is None and body is None: + raise ValueError("Empty value passed for parameter 'docs'") + __path_parts: t.Dict[str, str] + if index not in SKIP_IN_PATH: + __path_parts = {"index": _quote(index)} + __path = f'/_ingest/{__path_parts["index"]}/_simulate' + else: + __path_parts = {} + __path = "/_ingest/_simulate" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pipeline is not None: + __query["pipeline"] = pipeline + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if docs is not None: + __body["docs"] = docs + if component_template_substitutions is not None: + __body["component_template_substitutions"] = ( + component_template_substitutions + ) + if index_template_subtitutions is not None: + __body["index_template_subtitutions"] = index_template_subtitutions + if mapping_addition is not None: + __body["mapping_addition"] = mapping_addition + if pipeline_substitutions is not None: + __body["pipeline_substitutions"] = pipeline_substitutions + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="simulate.ingest", + path_parts=__path_parts, + ) From a050328c479cdac68e1ba7e581100e39e2065aae Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 16 Jan 2025 14:41:40 +0400 Subject: [PATCH 19/65] Add sphinx.configuration to .readthedocs.yml (#2740) (#2743) (cherry picked from commit 1f66af588fb8d2a96188674bd9ce23f813318aaa) Co-authored-by: Quentin Pradet --- .readthedocs.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.readthedocs.yml b/.readthedocs.yml index ccbd86512..0bb2cebab 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -14,3 +14,6 @@ python: - path: . extra_requirements: - "docs" + +sphinx: + configuration: docs/sphinx/conf.py From bc44f1aa5459d19da10b62cbbe6a1d9bffc1b628 Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Thu, 16 Jan 2025 17:28:58 +0400 Subject: [PATCH 20/65] Auto-generated code for 8.x (#2747) --- elasticsearch/_async/client/__init__.py | 206 +++++-- elasticsearch/_async/client/ccr.py | 60 +++ elasticsearch/_async/client/cluster.py | 50 +- elasticsearch/_async/client/connector.py | 100 ++++ .../_async/client/dangling_indices.py | 6 +- elasticsearch/_async/client/enrich.py | 21 + elasticsearch/_async/client/esql.py | 265 ++++++++- elasticsearch/_async/client/features.py | 10 + elasticsearch/_async/client/ilm.py | 23 +- elasticsearch/_async/client/indices.py | 452 ++++++++++++---- elasticsearch/_async/client/inference.py | 79 +++ elasticsearch/_async/client/ingest.py | 6 - elasticsearch/_async/client/license.py | 32 ++ elasticsearch/_async/client/logstash.py | 3 +- elasticsearch/_async/client/migration.py | 8 +- elasticsearch/_async/client/ml.py | 19 +- elasticsearch/_async/client/nodes.py | 24 +- elasticsearch/_async/client/query_rules.py | 45 +- elasticsearch/_async/client/rollup.py | 42 +- .../_async/client/searchable_snapshots.py | 43 +- elasticsearch/_async/client/security.py | 507 ++++++++++++++++-- elasticsearch/_async/client/shutdown.py | 47 +- elasticsearch/_async/client/simulate.py | 2 +- elasticsearch/_async/client/slm.py | 111 +++- elasticsearch/_async/client/snapshot.py | 243 ++++++++- elasticsearch/_async/client/sql.py | 112 ++-- elasticsearch/_async/client/synonyms.py | 58 +- elasticsearch/_async/client/tasks.py | 98 ++-- elasticsearch/_async/client/text_structure.py | 155 ++++-- elasticsearch/_async/client/transform.py | 5 + elasticsearch/_async/client/watcher.py | 206 ++++++- elasticsearch/_async/client/xpack.py | 7 +- elasticsearch/_sync/client/__init__.py | 206 +++++-- elasticsearch/_sync/client/ccr.py | 60 +++ elasticsearch/_sync/client/cluster.py | 50 +- elasticsearch/_sync/client/connector.py | 100 ++++ .../_sync/client/dangling_indices.py | 6 +- elasticsearch/_sync/client/enrich.py | 21 + elasticsearch/_sync/client/esql.py | 265 ++++++++- elasticsearch/_sync/client/features.py | 10 + elasticsearch/_sync/client/ilm.py | 23 +- elasticsearch/_sync/client/indices.py | 452 ++++++++++++---- elasticsearch/_sync/client/inference.py | 79 +++ elasticsearch/_sync/client/ingest.py | 6 - elasticsearch/_sync/client/license.py | 32 ++ elasticsearch/_sync/client/logstash.py | 3 +- elasticsearch/_sync/client/migration.py | 8 +- elasticsearch/_sync/client/ml.py | 19 +- elasticsearch/_sync/client/nodes.py | 24 +- elasticsearch/_sync/client/query_rules.py | 45 +- elasticsearch/_sync/client/rollup.py | 42 +- .../_sync/client/searchable_snapshots.py | 43 +- elasticsearch/_sync/client/security.py | 507 ++++++++++++++++-- elasticsearch/_sync/client/shutdown.py | 47 +- elasticsearch/_sync/client/simulate.py | 2 +- elasticsearch/_sync/client/slm.py | 111 +++- elasticsearch/_sync/client/snapshot.py | 243 ++++++++- elasticsearch/_sync/client/sql.py | 112 ++-- elasticsearch/_sync/client/synonyms.py | 58 +- elasticsearch/_sync/client/tasks.py | 98 ++-- elasticsearch/_sync/client/text_structure.py | 155 ++++-- elasticsearch/_sync/client/transform.py | 5 + elasticsearch/_sync/client/watcher.py | 206 ++++++- elasticsearch/_sync/client/xpack.py | 7 +- 64 files changed, 5028 insertions(+), 1062 deletions(-) diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index a846119b5..fa2481973 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -646,41 +646,125 @@ async def bulk( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Bulk index or delete documents. Performs multiple indexing or delete operations - in a single API call. This reduces overhead and can greatly increase indexing - speed. + Bulk index or delete documents. Perform multiple `index`, `create`, `delete`, + and `update` actions in a single request. This reduces overhead and can greatly + increase indexing speed. If the Elasticsearch security features are enabled, + you must have the following index privileges for the target data stream, index, + or index alias: * To use the `create` action, you must have the `create_doc`, + `create`, `index`, or `write` index privilege. Data streams support only the + `create` action. * To use the `index` action, you must have the `create`, `index`, + or `write` index privilege. * To use the `delete` action, you must have the `delete` + or `write` index privilege. * To use the `update` action, you must have the `index` + or `write` index privilege. * To automatically create a data stream or index + with a bulk API request, you must have the `auto_configure`, `create_index`, + or `manage` index privilege. * To make the result of a bulk operation visible + to search using the `refresh` parameter, you must have the `maintenance` or `manage` + index privilege. Automatic data stream creation requires a matching index template + with data stream enabled. The actions are specified in the request body using + a newline delimited JSON (NDJSON) structure: ``` action_and_meta_data\\n optional_source\\n + action_and_meta_data\\n optional_source\\n .... action_and_meta_data\\n optional_source\\n + ``` The `index` and `create` actions expect a source on the next line and have + the same semantics as the `op_type` parameter in the standard index API. A `create` + action fails if a document with the same ID already exists in the target An `index` + action adds or replaces a document as necessary. NOTE: Data streams support only + the `create` action. To update or delete a document in a data stream, you must + target the backing index containing the document. An `update` action expects + that the partial doc, upsert, and script and its options are specified on the + next line. A `delete` action does not expect a source on the next line and has + the same semantics as the standard delete API. NOTE: The final line of data must + end with a newline character (`\\n`). Each newline character may be preceded + by a carriage return (`\\r`). When sending NDJSON data to the `_bulk` endpoint, + use a `Content-Type` header of `application/json` or `application/x-ndjson`. + Because this format uses literal newline characters (`\\n`) as delimiters, make + sure that the JSON actions and sources are not pretty printed. If you provide + a target in the request path, it is used for any actions that don't explicitly + specify an `_index` argument. A note on the format: the idea here is to make + processing as fast as possible. As some of the actions are redirected to other + shards on other nodes, only `action_meta_data` is parsed on the receiving node + side. Client libraries using this protocol should try and strive to do something + similar on the client side, and reduce buffering as much as possible. There is + no "correct" number of actions to perform in a single bulk request. Experiment + with different settings to find the optimal size for your particular workload. + Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by + default so clients must ensure that no request exceeds this size. It is not possible + to index a single document that exceeds the size limit, so you must pre-process + any such documents into smaller pieces before sending them to Elasticsearch. + For instance, split documents into pages or chapters before indexing them, or + store raw binary data in a system outside Elasticsearch and replace the raw data + with a link to the external system in the documents that you send to Elasticsearch. + **Client suppport for bulk requests** Some of the officially supported clients + provide helpers to assist with bulk requests and reindexing: * Go: Check out + `esutil.BulkIndexer` * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` + and `Search::Elasticsearch::Client::5_0::Scroll` * Python: Check out `elasticsearch.helpers.*` + * JavaScript: Check out `client.helpers.*` * .NET: Check out `BulkAllObservable` + * PHP: Check out bulk indexing. **Submitting bulk requests with cURL** If you're + providing text file input to `curl`, you must use the `--data-binary` flag instead + of plain `-d`. The latter doesn't preserve newlines. For example: ``` $ cat requests + { "index" : { "_index" : "test", "_id" : "1" } } { "field1" : "value1" } $ curl + -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary + "@requests"; echo {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} + ``` **Optimistic concurrency control** Each `index` and `delete` action within + a bulk API call may include the `if_seq_no` and `if_primary_term` parameters + in their respective action and meta data lines. The `if_seq_no` and `if_primary_term` + parameters control how operations are run, based on the last modification to + existing documents. See Optimistic concurrency control for more details. **Versioning** + Each bulk item can include the version value using the `version` field. It automatically + follows the behavior of the index or delete operation based on the `_version` + mapping. It also support the `version_type`. **Routing** Each bulk item can include + the routing value using the `routing` field. It automatically follows the behavior + of the index or delete operation based on the `_routing` mapping. NOTE: Data + streams do not support custom routing unless they were created with the `allow_custom_routing` + setting enabled in the template. **Wait for active shards** When making bulk + calls, you can set the `wait_for_active_shards` parameter to require a minimum + number of shard copies to be active before starting to process the bulk request. + **Refresh** Control when the changes made by this request are visible to search. + NOTE: Only the shards that receive the bulk request will be affected by refresh. + Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen + to be routed to different shards in an index with five shards. The request will + only wait for those three shards to refresh. The other two shards that make up + the index do not participate in the `_bulk` request at all. ``_ :param operations: - :param index: Name of the data stream, index, or index alias to perform bulk + :param index: The name of the data stream, index, or index alias to perform bulk actions on. :param list_executed_pipelines: If `true`, the response will include the ingest - pipelines that were executed for each index or create. - :param pipeline: ID of the pipeline to use to preprocess incoming documents. - If the index has a default ingest pipeline specified, then setting the value - to `_none` disables the default ingest pipeline for this request. If a final - pipeline is configured it will always run, regardless of the value of this + pipelines that were run for each index or create. + :param pipeline: The pipeline identifier to use to preprocess incoming documents. + If the index has a default ingest pipeline specified, setting the value to + `_none` turns off the default ingest pipeline for this request. If a final + pipeline is configured, it will always run regardless of the value of this parameter. :param refresh: If `true`, Elasticsearch refreshes the affected shards to make - this operation visible to search, if `wait_for` then wait for a refresh to - make this operation visible to search, if `false` do nothing with refreshes. + this operation visible to search. If `wait_for`, wait for a refresh to make + this operation visible to search. If `false`, do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. - :param require_alias: If `true`, the request’s actions must target an index alias. + :param require_alias: If `true`, the request's actions must target an index alias. :param require_data_stream: If `true`, the request's actions must target a data - stream (existing or to-be-created). - :param routing: Custom value used to route operations to a specific shard. - :param source: `true` or `false` to return the `_source` field or not, or a list - of fields to return. + stream (existing or to be created). + :param routing: A custom value that is used to route operations to a specific + shard. + :param source: Indicates whether to return the `_source` field (`true` or `false`) + or contains a list of fields to return. :param source_excludes: A comma-separated list of source fields to exclude from - the response. + the response. You can also use this parameter to exclude fields from the + subset specified in `_source_includes` query parameter. If the `_source` + parameter is `false`, this parameter is ignored. :param source_includes: A comma-separated list of source fields to include in - the response. - :param timeout: Period each action waits for the following operations: automatic - index creation, dynamic mapping updates, waiting for active shards. + the response. If this parameter is specified, only these source fields are + returned. You can exclude fields from this subset using the `_source_excludes` + query parameter. If the `_source` parameter is `false`, this parameter is + ignored. + :param timeout: The period each action waits for the following operations: automatic + index creation, dynamic mapping updates, and waiting for active shards. The + default is `1m` (one minute), which guarantees Elasticsearch waits for at + least the timeout before failing. The actual wait time could be longer, particularly + when multiple waits occur. :param wait_for_active_shards: The number of shard copies that must be active - before proceeding with the operation. Set to all or any positive integer - up to the total number of shards in the index (`number_of_replicas+1`). + before proceeding with the operation. Set to `all` or any positive integer + up to the total number of shards in the index (`number_of_replicas+1`). The + default is `1`, which waits for each primary shard to be active. """ if operations is None and body is None: raise ValueError( @@ -760,7 +844,7 @@ async def clear_scroll( ``_ - :param scroll_id: Scroll IDs to clear. To clear all scroll IDs, use `_all`. + :param scroll_id: The scroll IDs to clear. To clear all scroll IDs, use `_all`. """ __path_parts: t.Dict[str, str] = {} __path = "/_search/scroll" @@ -884,46 +968,62 @@ async def count( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Count search results. Get the number of documents matching a query. + Count search results. Get the number of documents matching a query. The query + can either be provided using a simple query string as a parameter or using the + Query DSL defined within the request body. The latter must be nested in a `query` + key, which is the same as the search API. The count API supports multi-target + syntax. You can run a single count API search across multiple data streams and + indices. The operation is broadcast across all shards. For each shard ID group, + a replica is chosen and the search is run against it. This means that replicas + increase the scalability of the count. ``_ - :param index: Comma-separated list of data streams, indices, and aliases to search. - Supports wildcards (`*`). To search all data streams and indices, omit this - parameter or use `*` or `_all`. + :param index: A comma-separated list of data streams, indices, and aliases to + search. It supports wildcards (`*`). To search all data streams and indices, + omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. - This behavior applies even if the request targets other open indices. + This behavior applies even if the request targets other open indices. For + example, a request targeting `foo*,bar*` returns an error if an index starts + with `foo` but no index starts with `bar`. :param analyze_wildcard: If `true`, wildcard and prefix queries are analyzed. - This parameter can only be used when the `q` query string parameter is specified. - :param analyzer: Analyzer to use for the query string. This parameter can only - be used when the `q` query string parameter is specified. + This parameter can be used only when the `q` query string parameter is specified. + :param analyzer: The analyzer to use for the query string. This parameter can + be used only when the `q` query string parameter is specified. :param default_operator: The default operator for query string query: `AND` or - `OR`. This parameter can only be used when the `q` query string parameter + `OR`. This parameter can be used only when the `q` query string parameter is specified. - :param df: Field to use as default where no field prefix is given in the query - string. This parameter can only be used when the `q` query string parameter + :param df: The field to use as a default when no field prefix is given in the + query string. This parameter can be used only when the `q` query string parameter is specified. - :param expand_wildcards: Type of index that wildcard patterns can match. If the - request can target data streams, this argument determines whether wildcard - expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. - :param ignore_throttled: If `true`, concrete, expanded or aliased indices are + :param expand_wildcards: The type of index that wildcard patterns can match. + If the request can target data streams, this argument determines whether + wildcard expressions match hidden data streams. It supports comma-separated + values, such as `open,hidden`. + :param ignore_throttled: If `true`, concrete, expanded, or aliased indices are ignored when frozen. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param lenient: If `true`, format-based query failures (such as providing text - to a numeric field) in the query string will be ignored. - :param min_score: Sets the minimum `_score` value that documents must have to - be included in the result. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. - :param q: Query in the Lucene query string syntax. - :param query: Defines the search definition using the Query DSL. - :param routing: Custom value used to route operations to a specific shard. - :param terminate_after: Maximum number of documents to collect for each shard. + to a numeric field) in the query string will be ignored. This parameter can + be used only when the `q` query string parameter is specified. + :param min_score: The minimum `_score` value that documents must have to be included + in the result. + :param preference: The node or shard the operation should be performed on. By + default, it is random. + :param q: The query in Lucene query string syntax. + :param query: Defines the search definition using the Query DSL. The query is + optional, and when not provided, it will use `match_all` to count all the + docs. + :param routing: A custom value used to route operations to a specific shard. + :param terminate_after: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. - Elasticsearch collects documents before sorting. + Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. + Elasticsearch applies this parameter to each shard handling the request. + When possible, let Elasticsearch perform early termination automatically. + Avoid specifying this parameter for requests that target data streams with + backing indices across multiple data tiers. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: @@ -2491,9 +2591,9 @@ async def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get cluster info. Returns basic information about the cluster. + Get cluster info. Get basic build, version, and cluster information. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/" @@ -4469,6 +4569,7 @@ async def search_shards( human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, local: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, routing: t.Optional[str] = None, @@ -4496,6 +4597,7 @@ async def search_shards( a missing or closed index. :param local: If `true`, the request retrieves information from the local node only. + :param master_timeout: Period to wait for a connection to the master node. :param preference: Specifies the node or shard the operation should be performed on. Random by default. :param routing: Custom value used to route operations to a specific shard. @@ -4522,6 +4624,8 @@ async def search_shards( __query["ignore_unavailable"] = ignore_unavailable if local is not None: __query["local"] = local + if master_timeout is not None: + __query["master_timeout"] = master_timeout if preference is not None: __query["preference"] = preference if pretty is not None: diff --git a/elasticsearch/_async/client/ccr.py b/elasticsearch/_async/client/ccr.py index 5b2f5e533..b7d24b26d 100644 --- a/elasticsearch/_async/client/ccr.py +++ b/elasticsearch/_async/client/ccr.py @@ -33,6 +33,7 @@ async def delete_auto_follow_pattern( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -42,6 +43,7 @@ async def delete_auto_follow_pattern( ``_ :param name: The name of the auto follow pattern. + :param master_timeout: Period to wait for a connection to the master node. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -54,6 +56,8 @@ async def delete_auto_follow_pattern( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -94,6 +98,7 @@ async def follow( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, max_outstanding_read_requests: t.Optional[int] = None, max_outstanding_write_requests: t.Optional[int] = None, max_read_request_operation_count: t.Optional[int] = None, @@ -124,6 +129,7 @@ async def follow( :param remote_cluster: The remote cluster containing the leader index. :param data_stream_name: If the leader index is part of a data stream, the name to which the local data stream for the followed index should be renamed. + :param master_timeout: Period to wait for a connection to the master node. :param max_outstanding_read_requests: The maximum number of outstanding reads requests from the remote cluster. :param max_outstanding_write_requests: The maximum number of outstanding write @@ -174,6 +180,8 @@ async def follow( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if wait_for_active_shards is not None: @@ -232,6 +240,7 @@ async def follow_info( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -244,6 +253,7 @@ async def follow_info( :param index: A comma-separated list of index patterns; use `_all` to perform the operation on all indices + :param master_timeout: Period to wait for a connection to the master node. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -256,6 +266,8 @@ async def follow_info( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -277,6 +289,7 @@ async def follow_stats( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Get follower stats. Get cross-cluster replication follower stats. The API returns @@ -287,6 +300,8 @@ async def follow_stats( :param index: A comma-separated list of index patterns; use `_all` to perform the operation on all indices + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -301,6 +316,8 @@ async def follow_stats( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", @@ -331,6 +348,7 @@ async def forget_follower( human: t.Optional[bool] = None, leader_remote_cluster: t.Optional[str] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -360,6 +378,8 @@ async def forget_follower( :param follower_index: :param follower_index_uuid: :param leader_remote_cluster: + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -375,6 +395,8 @@ async def forget_follower( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if follower_cluster is not None: __body["follower_cluster"] = follower_cluster @@ -403,6 +425,7 @@ async def get_auto_follow_pattern( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -412,6 +435,7 @@ async def get_auto_follow_pattern( :param name: Specifies the auto-follow pattern collection that you want to retrieve. If you do not specify a name, the API returns information for all collections. + :param master_timeout: Period to wait for a connection to the master node. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: @@ -427,6 +451,8 @@ async def get_auto_follow_pattern( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -447,6 +473,7 @@ async def pause_auto_follow_pattern( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -463,6 +490,7 @@ async def pause_auto_follow_pattern( :param name: The name of the auto follow pattern that should pause discovering new indices to follow. + :param master_timeout: Period to wait for a connection to the master node. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -475,6 +503,8 @@ async def pause_auto_follow_pattern( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -495,6 +525,7 @@ async def pause_follow( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -507,6 +538,7 @@ async def pause_follow( :param index: The name of the follower index that should pause following its leader index. + :param master_timeout: Period to wait for a connection to the master node. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -519,6 +551,8 @@ async def pause_follow( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -561,6 +595,7 @@ async def put_auto_follow_pattern( human: t.Optional[bool] = None, leader_index_exclusion_patterns: t.Optional[t.Sequence[str]] = None, leader_index_patterns: t.Optional[t.Sequence[str]] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, max_outstanding_read_requests: t.Optional[int] = None, max_outstanding_write_requests: t.Optional[int] = None, max_read_request_operation_count: t.Optional[int] = None, @@ -600,6 +635,7 @@ async def put_auto_follow_pattern( or more leader_index_exclusion_patterns won’t be followed. :param leader_index_patterns: An array of simple index patterns to match against indices in the remote cluster specified by the remote_cluster field. + :param master_timeout: Period to wait for a connection to the master node. :param max_outstanding_read_requests: The maximum number of outstanding reads requests from the remote cluster. :param max_outstanding_write_requests: The maximum number of outstanding reads @@ -644,6 +680,8 @@ async def put_auto_follow_pattern( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: @@ -704,6 +742,7 @@ async def resume_auto_follow_pattern( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -717,6 +756,7 @@ async def resume_auto_follow_pattern( :param name: The name of the auto follow pattern to resume discovering new indices to follow. + :param master_timeout: Period to wait for a connection to the master node. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -729,6 +769,8 @@ async def resume_auto_follow_pattern( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -762,6 +804,7 @@ async def resume_follow( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, max_outstanding_read_requests: t.Optional[int] = None, max_outstanding_write_requests: t.Optional[int] = None, max_read_request_operation_count: t.Optional[int] = None, @@ -785,6 +828,7 @@ async def resume_follow( ``_ :param index: The name of the follow index to resume following. + :param master_timeout: Period to wait for a connection to the master node. :param max_outstanding_read_requests: :param max_outstanding_write_requests: :param max_read_request_operation_count: @@ -808,6 +852,8 @@ async def resume_follow( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: @@ -859,13 +905,19 @@ async def stats( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Get cross-cluster replication stats. This API returns stats about auto-following and the same shard-level stats as the get follower stats API. ``_ + + :param master_timeout: Period to wait for a connection to the master node. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_ccr/stats" @@ -876,8 +928,12 @@ async def stats( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", @@ -896,6 +952,7 @@ async def unfollow( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -911,6 +968,7 @@ async def unfollow( :param index: The name of the follower index that should be turned into a regular index. + :param master_timeout: Period to wait for a connection to the master node. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -923,6 +981,8 @@ async def unfollow( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} diff --git a/elasticsearch/_async/client/cluster.py b/elasticsearch/_async/client/cluster.py index 03c17de2d..7722dd7fc 100644 --- a/elasticsearch/_async/client/cluster.py +++ b/elasticsearch/_async/client/cluster.py @@ -38,6 +38,7 @@ async def allocation_explain( include_disk_info: t.Optional[bool] = None, include_yes_decisions: t.Optional[bool] = None, index: t.Optional[str] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, primary: t.Optional[bool] = None, shard: t.Optional[int] = None, @@ -61,6 +62,7 @@ async def allocation_explain( :param include_yes_decisions: If true, returns YES decisions in explanation. :param index: Specifies the name of the index that you would like an explanation for. + :param master_timeout: Period to wait for a connection to the master node. :param primary: If true, returns explanation for the primary shard for the given shard ID. :param shard: Specifies the ID of the shard that you would like an explanation @@ -80,6 +82,8 @@ async def allocation_explain( __query["include_disk_info"] = include_disk_info if include_yes_decisions is not None: __query["include_yes_decisions"] = include_yes_decisions + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: @@ -119,9 +123,8 @@ async def delete_component_template( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete component templates. Deletes component templates. Component templates - are building blocks for constructing index templates that specify index mappings, - settings, and aliases. + Delete component templates. Component templates are building blocks for constructing + index templates that specify index mappings, settings, and aliases. ``_ @@ -167,6 +170,7 @@ async def delete_voting_config_exclusions( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, wait_for_removal: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: @@ -176,6 +180,7 @@ async def delete_voting_config_exclusions( ``_ + :param master_timeout: Period to wait for a connection to the master node. :param wait_for_removal: Specifies whether to wait for all excluded nodes to be removed from the cluster before clearing the voting configuration exclusions list. Defaults to true, meaning that all excluded nodes must be removed from @@ -192,6 +197,8 @@ async def delete_voting_config_exclusions( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if wait_for_removal is not None: @@ -275,7 +282,7 @@ async def get_component_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get component templates. Retrieves information about component templates. + Get component templates. Get information about component templates. ``_ @@ -625,6 +632,7 @@ async def post_voting_config_exclusions( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, node_ids: t.Optional[t.Union[str, t.Sequence[str]]] = None, node_names: t.Optional[t.Union[str, t.Sequence[str]]] = None, pretty: t.Optional[bool] = None, @@ -661,6 +669,7 @@ async def post_voting_config_exclusions( ``_ + :param master_timeout: Period to wait for a connection to the master node. :param node_ids: A comma-separated list of the persistent ids of the nodes to exclude from the voting configuration. If specified, you may not also specify node_names. @@ -680,6 +689,8 @@ async def post_voting_config_exclusions( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if node_ids is not None: __query["node_ids"] = node_ids if node_names is not None: @@ -719,20 +730,21 @@ async def put_component_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a component template. Creates or updates a component template. - Component templates are building blocks for constructing index templates that - specify index mappings, settings, and aliases. An index template can be composed - of multiple component templates. To use a component template, specify it in an - index template’s `composed_of` list. Component templates are only applied to - new data streams and indices as part of a matching index template. Settings and - mappings specified directly in the index template or the create index request - override any settings or mappings specified in a component template. Component - templates are only used during index creation. For data streams, this includes - data stream creation and the creation of a stream’s backing indices. Changes - to component templates do not affect existing indices, including a stream’s backing - indices. You can use C-style `/* *\\/` block comments in component templates. + Create or update a component template. Component templates are building blocks + for constructing index templates that specify index mappings, settings, and aliases. + An index template can be composed of multiple component templates. To use a component + template, specify it in an index template’s `composed_of` list. Component templates + are only applied to new data streams and indices as part of a matching index + template. Settings and mappings specified directly in the index template or the + create index request override any settings or mappings specified in a component + template. Component templates are only used during index creation. For data streams, + this includes data stream creation and the creation of a stream’s backing indices. + Changes to component templates do not affect existing indices, including a stream’s + backing indices. You can use C-style `/* *\\/` block comments in component templates. You can include comments anywhere in the request body except before the opening - curly bracket. + curly bracket. **Applying component templates** You cannot directly apply a component + template to a data stream or index. To be applied, a component template must + be included in an index template's `composed_of` list. ``_ @@ -755,8 +767,8 @@ async def put_component_template( :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - :param meta: Optional user metadata about the component template. May have any - contents. This map is not automatically generated by Elasticsearch. This + :param meta: Optional user metadata about the component template. It may have + any contents. This map is not automatically generated by Elasticsearch. This information is stored in the cluster state, so keeping it short is preferable. To unset `_meta`, replace the template without specifying this information. :param version: Version number used to manage component templates externally. diff --git a/elasticsearch/_async/client/connector.py b/elasticsearch/_async/client/connector.py index 8a3f6b4b1..9cf131642 100644 --- a/elasticsearch/_async/client/connector.py +++ b/elasticsearch/_async/client/connector.py @@ -996,6 +996,106 @@ async def sync_job_post( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=( + "deleted_document_count", + "indexed_document_count", + "indexed_document_volume", + "last_seen", + "metadata", + "total_document_count", + ), + ) + @_stability_warning(Stability.EXPERIMENTAL) + async def sync_job_update_stats( + self, + *, + connector_sync_job_id: str, + deleted_document_count: t.Optional[int] = None, + indexed_document_count: t.Optional[int] = None, + indexed_document_volume: t.Optional[int] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + last_seen: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + metadata: t.Optional[t.Mapping[str, t.Any]] = None, + pretty: t.Optional[bool] = None, + total_document_count: t.Optional[int] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Set the connector sync job stats. Stats include: `deleted_document_count`, `indexed_document_count`, + `indexed_document_volume`, and `total_document_count`. You can also update `last_seen`. + This API is mainly used by the connector service for updating sync job information. + To sync data using self-managed connectors, you need to deploy the Elastic connector + service on your own infrastructure. This service runs automatically on Elastic + Cloud for Elastic managed connectors. + + ``_ + + :param connector_sync_job_id: The unique identifier of the connector sync job. + :param deleted_document_count: The number of documents the sync job deleted. + :param indexed_document_count: The number of documents the sync job indexed. + :param indexed_document_volume: The total size of the data (in MiB) the sync + job indexed. + :param last_seen: The timestamp to use in the `last_seen` property for the connector + sync job. + :param metadata: The connector-specific metadata. + :param total_document_count: The total number of documents in the target index + after the sync job finished. + """ + if connector_sync_job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") + if deleted_document_count is None and body is None: + raise ValueError( + "Empty value passed for parameter 'deleted_document_count'" + ) + if indexed_document_count is None and body is None: + raise ValueError( + "Empty value passed for parameter 'indexed_document_count'" + ) + if indexed_document_volume is None and body is None: + raise ValueError( + "Empty value passed for parameter 'indexed_document_volume'" + ) + __path_parts: t.Dict[str, str] = { + "connector_sync_job_id": _quote(connector_sync_job_id) + } + __path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_stats' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if deleted_document_count is not None: + __body["deleted_document_count"] = deleted_document_count + if indexed_document_count is not None: + __body["indexed_document_count"] = indexed_document_count + if indexed_document_volume is not None: + __body["indexed_document_volume"] = indexed_document_volume + if last_seen is not None: + __body["last_seen"] = last_seen + if metadata is not None: + __body["metadata"] = metadata + if total_document_count is not None: + __body["total_document_count"] = total_document_count + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="connector.sync_job_update_stats", + path_parts=__path_parts, + ) + @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def update_active_filtering( diff --git a/elasticsearch/_async/client/dangling_indices.py b/elasticsearch/_async/client/dangling_indices.py index e5b23d720..59f5e3267 100644 --- a/elasticsearch/_async/client/dangling_indices.py +++ b/elasticsearch/_async/client/dangling_indices.py @@ -44,7 +44,7 @@ async def delete_dangling_index( For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. - ``_ + ``_ :param index_uuid: The UUID of the index to delete. Use the get dangling indices API to find the UUID. @@ -103,7 +103,7 @@ async def import_dangling_index( For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. - ``_ + ``_ :param index_uuid: The UUID of the index to import. Use the get dangling indices API to locate the UUID. @@ -162,7 +162,7 @@ async def list_dangling_indices( indices while an Elasticsearch node is offline. Use this API to list dangling indices, which you can then import or delete. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_dangling" diff --git a/elasticsearch/_async/client/enrich.py b/elasticsearch/_async/client/enrich.py index 643ef6b90..f34e874c8 100644 --- a/elasticsearch/_async/client/enrich.py +++ b/elasticsearch/_async/client/enrich.py @@ -33,6 +33,7 @@ async def delete_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -41,6 +42,7 @@ async def delete_policy( ``_ :param name: Enrich policy to delete. + :param master_timeout: Period to wait for a connection to the master node. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -53,6 +55,8 @@ async def delete_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -73,6 +77,7 @@ async def execute_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: @@ -82,6 +87,7 @@ async def execute_policy( ``_ :param name: Enrich policy to execute. + :param master_timeout: Period to wait for a connection to the master node. :param wait_for_completion: If `true`, the request blocks other enrich policy execution requests until complete. """ @@ -96,6 +102,8 @@ async def execute_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if wait_for_completion is not None: @@ -118,6 +126,7 @@ async def get_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -127,6 +136,7 @@ async def get_policy( :param name: Comma-separated list of enrich policy names used to limit the request. To return information for all enrich policies, omit this parameter. + :param master_timeout: Period to wait for a connection to the master node. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: @@ -142,6 +152,8 @@ async def get_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -165,6 +177,7 @@ async def put_policy( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, geo_match: t.Optional[t.Mapping[str, t.Any]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, match: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, range: t.Optional[t.Mapping[str, t.Any]] = None, @@ -178,6 +191,7 @@ async def put_policy( :param name: Name of the enrich policy to create or update. :param geo_match: Matches enrich data to incoming documents based on a `geo_shape` query. + :param master_timeout: Period to wait for a connection to the master node. :param match: Matches enrich data to incoming documents based on a `term` query. :param range: Matches a number, date, or IP address in incoming documents to a range in the enrich index based on a `term` query. @@ -194,6 +208,8 @@ async def put_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: @@ -221,6 +237,7 @@ async def stats( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -228,6 +245,8 @@ async def stats( enrich policies that are currently executing. ``_ + + :param master_timeout: Period to wait for a connection to the master node. """ __path_parts: t.Dict[str, str] = {} __path = "/_enrich/_stats" @@ -238,6 +257,8 @@ async def stats( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} diff --git a/elasticsearch/_async/client/esql.py b/elasticsearch/_async/client/esql.py index b8a39d611..764f96658 100644 --- a/elasticsearch/_async/client/esql.py +++ b/elasticsearch/_async/client/esql.py @@ -20,11 +20,274 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import _rewrite_parameters +from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class EsqlClient(NamespacedClient): + @_rewrite_parameters( + body_fields=( + "query", + "columnar", + "filter", + "locale", + "params", + "profile", + "tables", + ), + ignore_deprecated_options={"params"}, + ) + async def async_query( + self, + *, + query: t.Optional[str] = None, + columnar: t.Optional[bool] = None, + delimiter: t.Optional[str] = None, + drop_null_columns: t.Optional[bool] = None, + error_trace: t.Optional[bool] = None, + filter: t.Optional[t.Mapping[str, t.Any]] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + format: t.Optional[ + t.Union[ + str, + t.Literal[ + "arrow", "cbor", "csv", "json", "smile", "tsv", "txt", "yaml" + ], + ] + ] = None, + human: t.Optional[bool] = None, + keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + keep_on_completion: t.Optional[bool] = None, + locale: t.Optional[str] = None, + params: t.Optional[ + t.Sequence[t.Union[None, bool, float, int, str, t.Any]] + ] = None, + pretty: t.Optional[bool] = None, + profile: t.Optional[bool] = None, + tables: t.Optional[ + t.Mapping[str, t.Mapping[str, t.Mapping[str, t.Any]]] + ] = None, + wait_for_completion_timeout: t.Optional[ + t.Union[str, t.Literal[-1], t.Literal[0]] + ] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Run an async ES|QL query. Asynchronously run an ES|QL (Elasticsearch query language) + query, monitor its progress, and retrieve results when they become available. + The API accepts the same parameters and request body as the synchronous query + API, along with additional async related properties. + + ``_ + + :param query: The ES|QL query API accepts an ES|QL query string in the query + parameter, runs it, and returns the results. + :param columnar: By default, ES|QL returns results as rows. For example, FROM + returns each individual document as one row. For the JSON, YAML, CBOR and + smile formats, ES|QL can return the results in a columnar fashion where one + row represents all the values of a certain column in the results. + :param delimiter: The character to use between values within a CSV row. It is + valid only for the CSV format. + :param drop_null_columns: Indicates whether columns that are entirely `null` + will be removed from the `columns` and `values` portion of the results. If + `true`, the response will include an extra section under the name `all_columns` + which has the name of all the columns. + :param filter: Specify a Query DSL query in the filter parameter to filter the + set of documents that an ES|QL query runs on. + :param format: A short version of the Accept header, for example `json` or `yaml`. + :param keep_alive: The period for which the query and its results are stored + in the cluster. The default period is five days. When this period expires, + the query and its results are deleted, even if the query is still ongoing. + If the `keep_on_completion` parameter is false, Elasticsearch only stores + async queries that do not complete within the period set by the `wait_for_completion_timeout` + parameter, regardless of this value. + :param keep_on_completion: Indicates whether the query and its results are stored + in the cluster. If false, the query and its results are stored in the cluster + only if the request does not complete during the period set by the `wait_for_completion_timeout` + parameter. + :param locale: + :param params: To avoid any attempts of hacking or code injection, extract the + values in a separate list of parameters. Use question mark placeholders (?) + in the query string for each of the parameters. + :param profile: If provided and `true` the response will include an extra `profile` + object with information on how the query was executed. This information is + for human debugging and its format can change at any time but it can give + some insight into the performance of each part of the query. + :param tables: Tables to use with the LOOKUP operation. The top level key is + the table name and the next level key is the column name. + :param wait_for_completion_timeout: The period to wait for the request to finish. + By default, the request waits for 1 second for the query results. If the + query completes during this period, results are returned Otherwise, a query + ID is returned that can later be used to retrieve the results. + """ + if query is None and body is None: + raise ValueError("Empty value passed for parameter 'query'") + __path_parts: t.Dict[str, str] = {} + __path = "/_query/async" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if delimiter is not None: + __query["delimiter"] = delimiter + if drop_null_columns is not None: + __query["drop_null_columns"] = drop_null_columns + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if format is not None: + __query["format"] = format + if human is not None: + __query["human"] = human + if keep_alive is not None: + __query["keep_alive"] = keep_alive + if keep_on_completion is not None: + __query["keep_on_completion"] = keep_on_completion + if pretty is not None: + __query["pretty"] = pretty + if wait_for_completion_timeout is not None: + __query["wait_for_completion_timeout"] = wait_for_completion_timeout + if not __body: + if query is not None: + __body["query"] = query + if columnar is not None: + __body["columnar"] = columnar + if filter is not None: + __body["filter"] = filter + if locale is not None: + __body["locale"] = locale + if params is not None: + __body["params"] = params + if profile is not None: + __body["profile"] = profile + if tables is not None: + __body["tables"] = tables + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="esql.async_query", + path_parts=__path_parts, + ) + + @_rewrite_parameters() + async def async_query_delete( + self, + *, + id: str, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Delete an async ES|QL query. If the query is still running, it is cancelled. + Otherwise, the stored results are deleted. If the Elasticsearch security features + are enabled, only the following users can use this API to delete a query: * The + authenticated user that submitted the original query request * Users with the + `cancel_task` cluster privilege + + ``_ + + :param id: The unique identifier of the query. A query ID is provided in the + ES|QL async query API response for a query that does not complete in the + designated time. A query ID is also provided when the request was submitted + with the `keep_on_completion` parameter set to `true`. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_query/async/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "DELETE", + __path, + params=__query, + headers=__headers, + endpoint_id="esql.async_query_delete", + path_parts=__path_parts, + ) + + @_rewrite_parameters() + async def async_query_get( + self, + *, + id: str, + drop_null_columns: t.Optional[bool] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + wait_for_completion_timeout: t.Optional[ + t.Union[str, t.Literal[-1], t.Literal[0]] + ] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Get async ES|QL query results. Get the current status and available results or + stored results for an ES|QL asynchronous query. If the Elasticsearch security + features are enabled, only the user who first submitted the ES|QL query can retrieve + the results using this API. + + ``_ + + :param id: The unique identifier of the query. A query ID is provided in the + ES|QL async query API response for a query that does not complete in the + designated time. A query ID is also provided when the request was submitted + with the `keep_on_completion` parameter set to `true`. + :param drop_null_columns: Indicates whether columns that are entirely `null` + will be removed from the `columns` and `values` portion of the results. If + `true`, the response will include an extra section under the name `all_columns` + which has the name of all the columns. + :param keep_alive: The period for which the query and its results are stored + in the cluster. When this period expires, the query and its results are deleted, + even if the query is still ongoing. + :param wait_for_completion_timeout: The period to wait for the request to finish. + By default, the request waits for complete query results. If the request + completes during the period specified in this parameter, complete query results + are returned. Otherwise, the response returns an `is_running` value of `true` + and no results. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_query/async/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if drop_null_columns is not None: + __query["drop_null_columns"] = drop_null_columns + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if keep_alive is not None: + __query["keep_alive"] = keep_alive + if pretty is not None: + __query["pretty"] = pretty + if wait_for_completion_timeout is not None: + __query["wait_for_completion_timeout"] = wait_for_completion_timeout + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="esql.async_query_get", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=( "query", diff --git a/elasticsearch/_async/client/features.py b/elasticsearch/_async/client/features.py index ed85bbb4b..f1d79ec34 100644 --- a/elasticsearch/_async/client/features.py +++ b/elasticsearch/_async/client/features.py @@ -32,6 +32,7 @@ async def get_features( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -48,6 +49,8 @@ async def get_features( the plugin that defines that feature must be installed on the master node. ``_ + + :param master_timeout: Period to wait for a connection to the master node. """ __path_parts: t.Dict[str, str] = {} __path = "/_features" @@ -58,6 +61,8 @@ async def get_features( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -78,6 +83,7 @@ async def reset_features( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -97,6 +103,8 @@ async def reset_features( individual nodes. ``_ + + :param master_timeout: Period to wait for a connection to the master node. """ __path_parts: t.Dict[str, str] = {} __path = "/_features/_reset" @@ -107,6 +115,8 @@ async def reset_features( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} diff --git a/elasticsearch/_async/client/ilm.py b/elasticsearch/_async/client/ilm.py index 009706e41..53c1d959f 100644 --- a/elasticsearch/_async/client/ilm.py +++ b/elasticsearch/_async/client/ilm.py @@ -90,7 +90,6 @@ async def explain_lifecycle( only_errors: t.Optional[bool] = None, only_managed: t.Optional[bool] = None, pretty: t.Optional[bool] = None, - timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Explain the lifecycle state. Get the current lifecycle status for one or more @@ -112,8 +111,6 @@ async def explain_lifecycle( while executing the policy, or attempting to use a policy that does not exist. :param only_managed: Filters the returned indices to only indices that are managed by ILM. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -134,8 +131,6 @@ async def explain_lifecycle( __query["only_managed"] = only_managed if pretty is not None: __query["pretty"] = pretty - if timeout is not None: - __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", @@ -341,8 +336,8 @@ async def move_to_step( ``_ :param index: The name of the index whose lifecycle step is to change - :param current_step: - :param next_step: + :param current_step: The step that the index is expected to be in. + :param next_step: The step that you want to run. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -552,8 +547,11 @@ async def start( ``_ - :param master_timeout: - :param timeout: + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_ilm/start" @@ -601,8 +599,11 @@ async def stop( ``_ - :param master_timeout: - :param timeout: + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_ilm/stop" diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index 11768a72a..1b0cb6332 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -143,8 +143,12 @@ async def analyze( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get tokens from text analysis. The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) - on a text string and returns the resulting tokens. + Get tokens from text analysis. The analyze API performs analysis on a text string + and returns the resulting tokens. Generating excessive amount of tokens may cause + a node to run out of memory. The `index.analyze.max_token_count` setting enables + you to limit the number of tokens that can be produced. If more than this limit + of tokens gets generated, an error occurs. The `_analyze` endpoint without a + specified index will always use `10000` as its limit. ``_ @@ -246,7 +250,10 @@ async def clear_cache( ) -> ObjectApiResponse[t.Any]: """ Clear the cache. Clear the cache of one or more indices. For data streams, the - API clears the caches of the stream's backing indices. + API clears the caches of the stream's backing indices. By default, the clear + cache API clears all caches. To clear only specific caches, use the `fielddata`, + `query`, or `request` parameters. To clear the cache only of specific fields, + use the `fields` parameter. ``_ @@ -347,10 +354,28 @@ async def clone( the new index, which is a much more time consuming process. * Finally, it recovers the target index as though it were a closed index which had just been re-opened. IMPORTANT: Indices can only be cloned if they meet the following requirements: + * The index must be marked as read-only and have a cluster health status of green. * The target index must not exist. * The source index must have the same number of primary shards as the target index. * The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing - index. + index. The current write index on a data stream cannot be cloned. In order to + clone the current write index, the data stream must first be rolled over so that + a new write index is created and then the previous write index can be cloned. + NOTE: Mappings cannot be specified in the `_clone` request. The mappings of the + source index will be used for the target index. **Monitor the cloning process** + The cloning process can be monitored with the cat recovery API or the cluster + health API can be used to wait until all primary shards have been allocated by + setting the `wait_for_status` parameter to `yellow`. The `_clone` API returns + as soon as the target index has been added to the cluster state, before any shards + have been allocated. At this point, all shards are in the state unassigned. If, + for any reason, the target index can't be allocated, its primary shard will remain + unassigned until it can be allocated on that node. Once the primary shard is + allocated, it moves to state initializing, and the clone process begins. When + the clone operation completes, the shard will become active. At that point, Elasticsearch + will try to allocate any replicas and may decide to relocate the primary shard + to another node. **Wait for active shards** Because the clone operation creates + a new index to clone the shards to, the wait for active shards setting on index + creation applies to the clone index action as well. ``_ @@ -536,7 +561,26 @@ async def create( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create an index. Creates a new index. + Create an index. You can use the create index API to add a new index to an Elasticsearch + cluster. When creating an index, you can specify the following: * Settings for + the index. * Mappings for fields in the index. * Index aliases **Wait for active + shards** By default, index creation will only return a response to the client + when the primary copies of each shard have been started, or the request times + out. The index creation response will indicate what happened. For example, `acknowledged` + indicates whether the index was successfully created in the cluster, `while shards_acknowledged` + indicates whether the requisite number of shard copies were started for each + shard in the index before timing out. Note that it is still possible for either + `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation + to be successful. These values simply indicate whether the operation completed + before the timeout. If `acknowledged` is false, the request timed out before + the cluster state was updated with the newly created index, but it probably will + be created sometime soon. If `shards_acknowledged` is false, then the request + timed out before the requisite number of shards were started (by default just + the primaries), even if the cluster state was successfully updated to reflect + the newly created index (that is to say, `acknowledged` is `true`). You can change + the default of only waiting for the primary shards to start through the index + setting `index.write.wait_for_active_shards`. Note that changing this setting + will also affect the `wait_for_active_shards` value on all subsequent write operations. ``_ @@ -732,7 +776,11 @@ async def delete( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete indices. Deletes one or more indices. + Delete indices. Deleting an index deletes its documents, shards, and metadata. + It does not delete related Kibana components, such as data views, visualizations, + or dashboards. You cannot delete the current write index of a data stream. To + delete the index, you must roll over the data stream so a new write index is + created. You can then use the delete index API to delete the previous write index. ``_ @@ -804,7 +852,7 @@ async def delete_alias( """ Delete an alias. Removes a data stream or index from an alias. - ``_ + ``_ :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). @@ -1034,7 +1082,7 @@ async def delete_template( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a legacy index template. + Delete a legacy index template. ``_ @@ -1100,7 +1148,13 @@ async def disk_usage( Analyze the index disk usage. Analyze the disk usage of each field of an index or data stream. This API might not support indices created in previous Elasticsearch versions. The result of a small index can be inaccurate as some parts of an index - might not be analyzed by the API. + might not be analyzed by the API. NOTE: The total size of fields of the analyzed + shards of the index in the response is usually smaller than the index `store_size` + value because some small metadata files are ignored and some parts of data files + might not be scanned by the API. Since stored fields are stored together in a + compressed format, the sizes of stored fields are also estimates and can be inaccurate. + The stored size of the `_id` field is likely underestimated while the `_source` + field is overestimated. ``_ @@ -1249,8 +1303,7 @@ async def exists( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Check indices. Checks if one or more indices, index aliases, or data streams - exist. + Check indices. Check if one or more indices, index aliases, or data streams exist. ``_ @@ -1447,16 +1500,21 @@ async def exists_template( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Check existence of index templates. Returns information about whether a particular - index template exists. + Check existence of index templates. Get information about whether index templates + exist. Index templates define settings, mappings, and aliases that can be applied + automatically to new indices. IMPORTANT: This documentation is about legacy index + templates, which are deprecated and will be replaced by the composable templates + introduced in Elasticsearch 7.8. ``_ - :param name: The comma separated names of the index templates - :param flat_settings: Return settings in flat format (default: false) - :param local: Return local information, do not retrieve the state from master - node (default: false) - :param master_timeout: Explicit operation timeout for connection to master node + :param name: A comma-separated list of index template names used to limit the + request. Wildcard (`*`) expressions are supported. + :param flat_settings: Indicates whether to use a flat format for the response. + :param local: Indicates whether to get information from the local node only. + :param master_timeout: The period to wait for the master node. If the master + node is not available before the timeout expires, the request fails and returns + an error. To indicate that the request should never timeout, set it to `-1`. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -1559,9 +1617,7 @@ async def field_usage_stats( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, - timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, @@ -1570,7 +1626,10 @@ async def field_usage_stats( Get field usage stats. Get field usage information for each shard and field of an index. Field usage statistics are automatically captured when queries are running on a cluster. A shard-level search request that accesses a given field, - even if multiple times during that request, is counted as a single use. + even if multiple times during that request, is counted as a single use. The response + body reports the per-shard usage count of the data structures that back the fields + in the index. A given request will increment each count by a maximum value of + 1, even if the request accesses the same field multiple times. ``_ @@ -1589,11 +1648,6 @@ async def field_usage_stats( in the statistics. :param ignore_unavailable: If `true`, missing or closed indices are not included in the response. - :param master_timeout: Period to wait for a connection to the master node. If - no response is received before the timeout expires, the request fails and - returns an error. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). @@ -1617,12 +1671,8 @@ async def field_usage_stats( __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty - if timeout is not None: - __query["timeout"] = timeout if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards __headers = {"accept": "application/json"} @@ -1770,7 +1820,35 @@ async def forcemerge( merges. So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since - the new documents can't be backed up incrementally. + the new documents can't be backed up incrementally. **Blocks during a force merge** + Calls to this API block until the merge is complete (unless request contains + `wait_for_completion=false`). If the client connection is lost before completion + then the force merge process will continue in the background. Any new requests + to force merge the same indices will also block until the ongoing force merge + is complete. **Running force merge asynchronously** If the request contains `wait_for_completion=false`, + Elasticsearch performs some preflight checks, launches the request, and returns + a task you can use to get the status of the task. However, you can not cancel + this task as the force merge task is not cancelable. Elasticsearch creates a + record of this task as a document at `_tasks/`. When you are done with + a task, you should delete the task document so Elasticsearch can reclaim the + space. **Force merging multiple indices** You can force merge multiple indices + with a single request by targeting: * One or more data streams that contain multiple + backing indices * Multiple indices * One or more aliases * All data streams and + indices in a cluster Each targeted shard is force-merged separately using the + force_merge threadpool. By default each node only has a single `force_merge` + thread which means that the shards on that node are force-merged one at a time. + If you expand the `force_merge` threadpool on a node then it will force merge + its shards in parallel Force merge makes the storage for the shard being merged + temporarily increase, as it may require free space up to triple its size in case + `max_num_segments parameter` is set to `1`, to rewrite all segments into a new + one. **Data streams and time-based indices** Force-merging is useful for managing + a data stream's older backing indices and other time-based indices, particularly + after a rollover. In these cases, each index only receives indexing traffic for + a certain period of time. Once an index receive no more writes, its shards can + be force-merged to a single segment. This can be a good idea because single-segment + shards can sometimes use simpler and more efficient data structures to perform + searches. For example: ``` POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 + ``` ``_ @@ -1863,8 +1941,8 @@ async def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index information. Returns information about one or more indices. For data - streams, the API returns information about the stream’s backing indices. + Get index information. Get information about one or more indices. For data streams, + the API returns information about the stream’s backing indices. ``_ @@ -1955,7 +2033,7 @@ async def get_alias( """ Get aliases. Retrieves information for one or more data stream or index aliases. - ``_ + ``_ :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, @@ -2080,6 +2158,42 @@ async def get_data_lifecycle( path_parts=__path_parts, ) + @_rewrite_parameters() + async def get_data_lifecycle_stats( + self, + *, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Get data stream lifecycle stats. Get statistics about the data streams that are + managed by a data stream lifecycle. + + ``_ + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_lifecycle/stats" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="indices.get_data_lifecycle_stats", + path_parts=__path_parts, + ) + @_rewrite_parameters() async def get_data_stream( self, @@ -2179,11 +2293,13 @@ async def get_field_mapping( """ Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. + This API is useful if you don't need a complete mapping or if an index mapping + contains a large number of fields. ``_ :param fields: Comma-separated list or wildcard expression of fields used to - limit returned information. + limit returned information. Supports wildcards (`*`). :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. @@ -2255,7 +2371,7 @@ async def get_index_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index templates. Returns information about one or more index templates. + Get index templates. Get information about one or more index templates. ``_ @@ -2328,8 +2444,8 @@ async def get_mapping( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get mapping definitions. Retrieves mapping definitions for one or more indices. - For data streams, the API retrieves mappings for the stream’s backing indices. + Get mapping definitions. For data streams, the API retrieves mappings for the + stream’s backing indices. ``_ @@ -2413,8 +2529,8 @@ async def get_settings( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index settings. Returns setting information for one or more indices. For - data streams, returns setting information for the stream’s backing indices. + Get index settings. Get setting information for one or more indices. For data + streams, it returns setting information for the stream's backing indices. ``_ @@ -2501,7 +2617,9 @@ async def get_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index templates. Retrieves information about one or more index templates. + Get index templates. Get information about one or more index templates. IMPORTANT: + This documentation is about legacy index templates, which are deprecated and + will be replaced by the composable templates introduced in Elasticsearch 7.8. ``_ @@ -2680,7 +2798,27 @@ async def open( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Opens a closed index. For data streams, the API opens any closed backing indices. + Open a closed index. For data streams, the API opens any closed backing indices. + A closed index is blocked for read/write operations and does not allow all operations + that opened indices allow. It is not possible to index documents or to search + for documents in a closed index. This allows closed indices to not have to maintain + internal data structures for indexing or searching documents, resulting in a + smaller overhead on the cluster. When opening or closing an index, the master + is responsible for restarting the index shards to reflect the new state of the + index. The shards will then go through the normal recovery process. The data + of opened or closed indices is automatically replicated by the cluster to ensure + that enough shard copies are safely kept around at all times. You can open and + close multiple indices. An error is thrown if the request explicitly refers to + a missing index. This behavior can be turned off by using the `ignore_unavailable=true` + parameter. By default, you must explicitly name the indices you are opening or + closing. To open or close indices with `_all`, `*`, or other wildcard expressions, + change the `action.destructive_requires_name` setting to `false`. This setting + can also be changed with the cluster update settings API. Closed indices consume + a significant amount of disk-space which can cause problems in managed environments. + Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` + to `false`. Because opening or closing an index allocates its shards, the `wait_for_active_shards` + setting on index creation applies to the `_open` and `_close` index actions as + well. ``_ @@ -3023,7 +3161,33 @@ async def put_index_template( ) -> ObjectApiResponse[t.Any]: """ Create or update an index template. Index templates define settings, mappings, - and aliases that can be applied automatically to new indices. + and aliases that can be applied automatically to new indices. Elasticsearch applies + templates to new indices based on an wildcard pattern that matches the index + name. Index templates are applied during data stream or index creation. For data + streams, these settings and mappings are applied when the stream's backing indices + are created. Settings and mappings specified in a create index API request override + any settings or mappings specified in an index template. Changes to index templates + do not affect existing indices, including the existing backing indices of a data + stream. You can use C-style `/* *\\/` block comments in index templates. You + can include comments anywhere in the request body, except before the opening + curly bracket. **Multiple matching templates** If multiple index templates match + the name of a new index or data stream, the template with the highest priority + is used. Multiple templates with overlapping index patterns at the same priority + are not allowed and an error will be thrown when attempting to create a template + matching an existing index template at identical priorities. **Composing aliases, + mappings, and settings** When multiple component templates are specified in the + `composed_of` field for an index template, they are merged in the order specified, + meaning that later component templates override earlier component templates. + Any mappings, settings, or aliases from the parent index template are merged + in next. Finally, any configuration on the index request itself is merged. Mapping + definitions are merged recursively, which means that later mapping components + can introduce new field mappings and update the mapping configuration. If a field + mapping is already contained in an earlier component, its definition will be + completely overwritten by the later one. This recursive merging strategy applies + not only to field mappings, but also root options like `dynamic_templates` and + `meta`. If an earlier component contains a `dynamic_templates` block, then by + default new `dynamic_templates` entries are appended onto the end. If an entry + already exists with the same key, then it is overwritten by the new definition. ``_ @@ -3053,8 +3217,11 @@ async def put_index_template( :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - :param meta: Optional user metadata about the index template. May have any contents. - This map is not automatically generated by Elasticsearch. + :param meta: Optional user metadata about the index template. It may have any + contents. It is not automatically generated or used by Elasticsearch. This + user-defined object is stored in the cluster state, so keeping it short is + preferable To unset the metadata, replace the template without specifying + it. :param priority: Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though @@ -3063,7 +3230,9 @@ async def put_index_template( :param template: Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration. :param version: Version number used to manage index templates externally. This - number is not automatically generated by Elasticsearch. + number is not automatically generated by Elasticsearch. External systems + can use these version numbers to simplify template management. To unset a + version, replace the template without specifying one. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -3182,9 +3351,27 @@ async def put_mapping( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update field mappings. Adds new fields to an existing data stream or index. You - can also use this API to change the search settings of existing fields. For data - streams, these changes are applied to all backing indices by default. + Update field mappings. Add new fields to an existing data stream or index. You + can also use this API to change the search settings of existing fields and add + new properties to existing object fields. For data streams, these changes are + applied to all backing indices by default. **Add multi-fields to an existing + field** Multi-fields let you index the same field in different ways. You can + use this API to update the fields mapping parameter and enable multi-fields for + an existing field. WARNING: If an index (or data stream) contains documents when + you add a multi-field, those documents will not have values for the new multi-field. + You can populate the new multi-field with the update by query API. **Change supported + mapping parameters for an existing field** The documentation for each mapping + parameter indicates whether you can update it for an existing field using this + API. For example, you can use the update mapping API to update the `ignore_above` + parameter. **Change the mapping of an existing field** Except for supported mapping + parameters, you can't change the mapping or field type of an existing field. + Changing an existing field could invalidate data that's already indexed. If you + need to change the mapping of a field in a data stream's backing indices, refer + to documentation about modifying data streams. If you need to change the mapping + of a field in other indices, create a new index with the correct mapping and + reindex your data into that index. **Rename a field** Renaming a field would + invalidate data already indexed under the old field name. Instead, add an alias + field to create an alternate field name. ``_ @@ -3315,6 +3502,19 @@ async def put_settings( """ Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. + To revert a setting to the default value, use a null value. The list of per-index + settings that can be updated dynamically on live indices can be found in index + module documentation. To preserve existing settings from being updated, set the + `preserve_existing` parameter to `true`. NOTE: You can only define new analyzers + on closed indices. To add an analyzer, you must close the index, define the analyzer, + and reopen the index. You cannot close the write index of a data stream. To update + the analyzer for a data stream's write index and future backing indices, update + the analyzer in the index template used by the stream. Then roll over the data + stream to apply the new analyzer to the stream's write index and future backing + indices. This affects searches and any new data added to the stream after the + rollover. However, it does not affect the data stream's backing indices or their + existing data. To change the analyzer for existing backing indices, you must + create a new data stream and reindex your data into it. ``_ @@ -3428,7 +3628,14 @@ async def put_template( according to their order. Index templates are only applied during index creation. Changes to index templates do not affect existing indices. Settings and mappings specified in create index API requests override any settings or mappings specified - in an index template. + in an index template. You can use C-style `/* *\\/` block comments in index templates. + You can include comments anywhere in the request body, except before the opening + curly bracket. **Indices matching multiple templates** Multiple index templates + can potentially match an index, in this case, both the settings and mappings + are merged into the final configuration of the index. The order of the merging + can be controlled using the order parameter, with lower order being applied first, + and higher orders overriding them. NOTE: Multiple matching templates with the + same order value will result in a non-deterministic merging order. ``_ @@ -3449,7 +3656,8 @@ async def put_template( with lower values. :param settings: Configuration options for the index. :param version: Version number used to manage index templates externally. This - number is not automatically generated by Elasticsearch. + number is not automatically generated by Elasticsearch. To unset a version, + replace the template without specifying one. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -3510,23 +3718,25 @@ async def recovery( """ Get index recovery information. Get information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information - for the stream's backing indices. Shard recovery is the process of initializing - a shard copy, such as restoring a primary shard from a snapshot or creating a - replica shard from a primary shard. When a shard recovery completes, the recovered - shard is available for search and indexing. Recovery automatically occurs during - the following processes: * When creating an index for the first time. * When - a node rejoins the cluster and starts up any missing primary shard copies using - the data that it holds in its data path. * Creation of new replica shard copies - from the primary. * Relocation of a shard copy to a different node in the same - cluster. * A snapshot restore operation. * A clone, shrink, or split operation. - You can determine the cause of a shard recovery using the recovery or cat recovery - APIs. The index recovery API reports information about completed recoveries only - for shard copies that currently exist in the cluster. It only reports the last - recovery for each shard copy and does not report historical information about - earlier recoveries, nor does it report information about the recoveries of shard - copies that no longer exist. This means that if a shard copy completes a recovery - and then Elasticsearch relocates it onto a different node then the information - about the original recovery will not be shown in the recovery API. + for the stream's backing indices. All recoveries, whether ongoing or complete, + are kept in the cluster state and may be reported on at any time. Shard recovery + is the process of initializing a shard copy, such as restoring a primary shard + from a snapshot or creating a replica shard from a primary shard. When a shard + recovery completes, the recovered shard is available for search and indexing. + Recovery automatically occurs during the following processes: * When creating + an index for the first time. * When a node rejoins the cluster and starts up + any missing primary shard copies using the data that it holds in its data path. + * Creation of new replica shard copies from the primary. * Relocation of a shard + copy to a different node in the same cluster. * A snapshot restore operation. + * A clone, shrink, or split operation. You can determine the cause of a shard + recovery using the recovery or cat recovery APIs. The index recovery API reports + information about completed recoveries only for shard copies that currently exist + in the cluster. It only reports the last recovery for each shard copy and does + not report historical information about earlier recoveries, nor does it report + information about the recoveries of shard copies that no longer exist. This means + that if a shard copy completes a recovery and then Elasticsearch relocates it + onto a different node then the information about the original recovery will not + be shown in the recovery API. ``_ @@ -3590,7 +3800,17 @@ async def refresh( """ Refresh an index. A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation - on the stream’s backing indices. + on the stream’s backing indices. By default, Elasticsearch periodically refreshes + indices every second, but only on indices that have received one search request + or more in the last 30 seconds. You can change this default interval with the + `index.refresh_interval` setting. Refresh requests are synchronous and do not + return a response until the refresh operation completes. Refreshes are resource-intensive. + To ensure good cluster performance, it's recommended to wait for Elasticsearch's + periodic refresh rather than performing an explicit refresh when possible. If + your application workflow indexes documents and then runs a search to retrieve + the indexed document, it's recommended to use the index API's `refresh=wait_for` + query parameter option. This option ensures the indexing operation waits for + a periodic refresh before running the search. ``_ @@ -3752,6 +3972,24 @@ async def resolve_cluster( search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). * Cluster version information, including the Elasticsearch server version. + For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information + about the local cluster and all remotely configured clusters that start with + the alias `cluster*`. Each cluster returns information about whether it has any + indices, aliases or data streams that match `my-index-*`. **Advantages of using + this endpoint before a cross-cluster search** You may want to exclude a cluster + or index from a search when: * A remote cluster is not currently connected and + is configured with `skip_unavailable=false`. Running a cross-cluster search under + those conditions will cause the entire search to fail. * A cluster has no matching + indices, aliases or data streams for the index expression (or your user does + not have permissions to search them). For example, suppose your index expression + is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data + streams that match `logs*`. In that case, that cluster will return no results + from that cluster if you include it in a cross-cluster search. * The index expression + (combined with any query parameters you specify) will likely cause an exception + to be thrown when you do the search. In these cases, the "error" field in the + `_resolve/cluster` response will be present. (This is also where security/permission + errors will be shown.) * A remote cluster is an older version that does not support + the feature you want to use in your search. ``_ @@ -3898,7 +4136,33 @@ async def rollover( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Roll over to a new index. Creates a new index for a data stream or index alias. + Roll over to a new index. TIP: It is recommended to use the index lifecycle rollover + action to automate rollovers. The rollover API creates a new index for a data + stream or index alias. The API behavior depends on the rollover target. **Roll + over a data stream** If you roll over a data stream, the API creates a new write + index for the stream. The stream's previous write index becomes a regular backing + index. A rollover also increments the data stream's generation. **Roll over an + index alias with a write index** TIP: Prior to Elasticsearch 7.9, you'd typically + use an index alias with a write index to manage time series data. Data streams + replace this functionality, require less maintenance, and automatically integrate + with data tiers. If an index alias points to multiple indices, one of the indices + must be a write index. The rollover API creates a new write index for the alias + with `is_write_index` set to `true`. The API also `sets is_write_index` to `false` + for the previous write index. **Roll over an index alias with one index** If + you roll over an index alias that points to only one index, the API creates a + new index for the alias and removes the original index from the alias. NOTE: + A rollover creates a new index and is subject to the `wait_for_active_shards` + setting. **Increment index names for an alias** When you roll over an index alias, + you can specify a name for the new index. If you don't specify a name and the + current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, + the new index name increments that number. For example, if you roll over an alias + with a current index of `my-index-000001`, the rollover creates a new index named + `my-index-000002`. This number is always six characters and zero-padded, regardless + of the previous index's name. If you use an index alias for time series data, + you can use date math in the index name to track the rollover date. For example, + you can create an alias that points to an index named ``. + If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. + If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. ``_ @@ -4269,8 +4533,8 @@ async def simulate_index_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Simulate an index. Returns the index configuration that would be applied to the - specified index from an existing index template. + Simulate an index. Get the index configuration that would be applied to the specified + index from an existing index template. ``_ @@ -4347,7 +4611,7 @@ async def simulate_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Simulate an index template. Returns the index configuration that would be applied + Simulate an index template. Get the index configuration that would be applied by a particular index template. ``_ @@ -4481,25 +4745,29 @@ async def split( """ Split an index. Split an index into a new index with more primary shards. * Before you can split an index: * The index must be read-only. * The cluster health status - must be green. The number of times the index can be split (and the number of - shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` - setting. The number of routing shards specifies the hashing space that is used - internally to distribute documents across shards with consistent hashing. For - instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x - 3) could be split by a factor of 2 or 3. A split operation: * Creates a new target - index with the same definition as the source index, but with a larger number - of primary shards. * Hard-links segments from the source index into the target - index. If the file system doesn't support hard-linking, all segments are copied - into the new index, which is a much more time consuming process. * Hashes all - documents again, after low level files are created, to delete documents that - belong to a different shard. * Recovers the target index as though it were a - closed index which had just been re-opened. IMPORTANT: Indices can only be split - if they satisfy the following requirements: * The target index must not exist. - * The source index must have fewer primary shards than the target index. * The - number of primary shards in the target index must be a multiple of the number - of primary shards in the source index. * The node handling the split process - must have sufficient free disk space to accommodate a second copy of the existing - index. + must be green. You can do make an index read-only with the following request + using the add index block API: ``` PUT /my_source_index/_block/write ``` The + current write index on a data stream cannot be split. In order to split the current + write index, the data stream must first be rolled over so that a new write index + is created and then the previous write index can be split. The number of times + the index can be split (and the number of shards that each original shard can + be split into) is determined by the `index.number_of_routing_shards` setting. + The number of routing shards specifies the hashing space that is used internally + to distribute documents across shards with consistent hashing. For instance, + a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be + split by a factor of 2 or 3. A split operation: * Creates a new target index + with the same definition as the source index, but with a larger number of primary + shards. * Hard-links segments from the source index into the target index. If + the file system doesn't support hard-linking, all segments are copied into the + new index, which is a much more time consuming process. * Hashes all documents + again, after low level files are created, to delete documents that belong to + a different shard. * Recovers the target index as though it were a closed index + which had just been re-opened. IMPORTANT: Indices can only be split if they satisfy + the following requirements: * The target index must not exist. * The source index + must have fewer primary shards than the target index. * The number of primary + shards in the target index must be a multiple of the number of primary shards + in the source index. * The node handling the split process must have sufficient + free disk space to accommodate a second copy of the existing index. ``_ diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index 60addf118..47bba65d2 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -317,3 +317,82 @@ async def put( endpoint_id="inference.put", path_parts=__path_parts, ) + + @_rewrite_parameters( + body_name="inference_config", + ) + async def update( + self, + *, + inference_id: str, + inference_config: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, + task_type: t.Optional[ + t.Union[ + str, + t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"], + ] + ] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Update an inference endpoint. Modify `task_settings`, secrets (within `service_settings`), + or `num_allocations` for an inference endpoint, depending on the specific endpoint + service and `task_type`. IMPORTANT: The inference APIs enable you to use certain + services, such as built-in machine learning models (ELSER, E5), models uploaded + through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, + Watsonx.ai, or Hugging Face. For built-in models and models uploaded through + Eland, the inference APIs offer an alternative way to use and manage trained + models. However, if you do not plan to use the inference APIs to use these models + or if you want to use non-NLP models, use the machine learning trained model + APIs. + + ``_ + + :param inference_id: The unique identifier of the inference endpoint. + :param inference_config: + :param task_type: The type of inference task that the model performs. + """ + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") + if inference_config is None and body is None: + raise ValueError( + "Empty value passed for parameters 'inference_config' and 'body', one of them should be set." + ) + elif inference_config is not None and body is not None: + raise ValueError("Cannot set both 'inference_config' and 'body'") + __path_parts: t.Dict[str, str] + if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: + __path_parts = { + "task_type": _quote(task_type), + "inference_id": _quote(inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}/_update' + elif inference_id not in SKIP_IN_PATH: + __path_parts = {"inference_id": _quote(inference_id)} + __path = f'/_inference/{__path_parts["inference_id"]}/_update' + else: + raise ValueError("Couldn't find a path for the given parameters") + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __body = inference_config if inference_config is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.update", + path_parts=__path_parts, + ) diff --git a/elasticsearch/_async/client/ingest.py b/elasticsearch/_async/client/ingest.py index c2585b037..92fbd8c93 100644 --- a/elasticsearch/_async/client/ingest.py +++ b/elasticsearch/_async/client/ingest.py @@ -226,7 +226,6 @@ async def get_geoip_database( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -238,9 +237,6 @@ async def get_geoip_database( :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. - :param master_timeout: Period to wait for a connection to the master node. If - no response is received before the timeout expires, the request fails and - returns an error. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: @@ -256,8 +252,6 @@ async def get_geoip_database( __query["filter_path"] = filter_path if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} diff --git a/elasticsearch/_async/client/license.py b/elasticsearch/_async/client/license.py index 72f606865..41eeb0aa9 100644 --- a/elasticsearch/_async/client/license.py +++ b/elasticsearch/_async/client/license.py @@ -32,7 +32,9 @@ async def delete( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Delete the license. When the license expires, your subscription level reverts @@ -40,6 +42,10 @@ async def delete( can use this API. ``_ + + :param master_timeout: Period to wait for a connection to the master node. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_license" @@ -50,8 +56,12 @@ async def delete( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", @@ -196,7 +206,9 @@ async def post( human: t.Optional[bool] = None, license: t.Optional[t.Mapping[str, t.Any]] = None, licenses: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -215,6 +227,9 @@ async def post( :param license: :param licenses: A sequence of one or more JSON documents containing the license information. + :param master_timeout: Period to wait for a connection to the master node. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_license" @@ -228,8 +243,12 @@ async def post( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if license is not None: __body["license"] = license @@ -258,7 +277,9 @@ async def post_start_basic( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Start a basic license. Start an indefinite basic license, which gives access @@ -273,6 +294,9 @@ async def post_start_basic( :param acknowledge: whether the user has acknowledged acknowledge messages (default: false) + :param master_timeout: Period to wait for a connection to the master node. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_license/start_basic" @@ -285,8 +309,12 @@ async def post_start_basic( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", @@ -305,6 +333,7 @@ async def post_start_trial( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, type_query_string: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: @@ -320,6 +349,7 @@ async def post_start_trial( :param acknowledge: whether the user has acknowledged acknowledge messages (default: false) + :param master_timeout: Period to wait for a connection to the master node. :param type_query_string: """ __path_parts: t.Dict[str, str] = {} @@ -333,6 +363,8 @@ async def post_start_trial( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if type_query_string is not None: diff --git a/elasticsearch/_async/client/logstash.py b/elasticsearch/_async/client/logstash.py index c4651a39a..a98c85368 100644 --- a/elasticsearch/_async/client/logstash.py +++ b/elasticsearch/_async/client/logstash.py @@ -37,7 +37,8 @@ async def delete_pipeline( ) -> ObjectApiResponse[t.Any]: """ Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central - Management. + Management. If the request succeeds, you receive an empty response with an appropriate + status code. ``_ diff --git a/elasticsearch/_async/client/migration.py b/elasticsearch/_async/client/migration.py index e864e2ecf..c1eef1d4e 100644 --- a/elasticsearch/_async/client/migration.py +++ b/elasticsearch/_async/client/migration.py @@ -39,7 +39,7 @@ async def deprecations( Get deprecation information. Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. TIP: This APIs is designed for indirect use by the - Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. + Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. ``_ @@ -86,9 +86,9 @@ async def get_feature_upgrade_status( to how features store configuration information and data in system indices. Check which features need to be migrated and the status of any migrations that are in progress. TIP: This API is designed for indirect use by the Upgrade Assistant. - We strongly recommend you use the Upgrade Assistant. + You are strongly recommended to use the Upgrade Assistant. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_migration/system_features" @@ -127,7 +127,7 @@ async def post_feature_upgrade( unavailable during the migration process. TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_migration/system_features" diff --git a/elasticsearch/_async/client/ml.py b/elasticsearch/_async/client/ml.py index c2edb5858..e4b2ec65e 100644 --- a/elasticsearch/_async/client/ml.py +++ b/elasticsearch/_async/client/ml.py @@ -686,6 +686,7 @@ async def delete_trained_model( force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Delete an unreferenced trained model. The request deletes a trained inference @@ -696,6 +697,8 @@ async def delete_trained_model( :param model_id: The unique identifier of the trained model. :param force: Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") @@ -712,6 +715,8 @@ async def delete_trained_model( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", @@ -3205,7 +3210,11 @@ async def put_data_frame_analytics( """ Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination - index. + index. By default, the query used in the source configuration is `{"match_all": + {}}`. If the destination index does not exist, it is created automatically when + you start the job. If you supply only a subset of the regression or classification + parameters, hyperparameter optimization occurs. It determines a value for each + of the undefined parameters. ``_ @@ -3382,7 +3391,8 @@ async def put_datafeed( an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') - at each interval. When Elasticsearch security features are enabled, your datafeed + at each interval. By default, the datafeed uses the following query: `{"match_all": + {"boost": 1}}`. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or @@ -3645,7 +3655,8 @@ async def put_job( ) -> ObjectApiResponse[t.Any]: """ Create an anomaly detection job. If you include a `datafeed_config`, you must - have read index privileges on the source index. + have read index privileges on the source index. If you include a `datafeed_config` + but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. ``_ @@ -5451,7 +5462,7 @@ async def validate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Validates an anomaly detection job. + Validate an anomaly detection job. ``_ diff --git a/elasticsearch/_async/client/nodes.py b/elasticsearch/_async/client/nodes.py index 5aa8aa0be..02fce0788 100644 --- a/elasticsearch/_async/client/nodes.py +++ b/elasticsearch/_async/client/nodes.py @@ -50,9 +50,9 @@ async def clear_repositories_metering_archive( ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned - information. All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). - :param max_archive_version: Specifies the maximum [archive_version](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-repositories-metering-api.html#get-repositories-metering-api-response-body) - to be cleared from the archive. + information. + :param max_archive_version: Specifies the maximum `archive_version` to be cleared + from the archive. """ if node_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'node_id'") @@ -138,7 +138,6 @@ async def hot_threads( human: t.Optional[bool] = None, ignore_idle_threads: t.Optional[bool] = None, interval: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, snapshots: t.Optional[int] = None, sort: t.Optional[ @@ -161,9 +160,6 @@ async def hot_threads( :param ignore_idle_threads: If true, known idle threads (e.g. waiting in a socket select, or to get a task from an empty queue) are filtered out. :param interval: The interval to do the second sampling of threads. - :param master_timeout: Period to wait for a connection to the master node. If - no response is received before the timeout expires, the request fails and - returns an error. :param snapshots: Number of samples of thread stacktrace. :param sort: The sort order for 'cpu' type (default: total) :param threads: Specifies the number of hot threads to provide information for. @@ -189,8 +185,6 @@ async def hot_threads( __query["ignore_idle_threads"] = ignore_idle_threads if interval is not None: __query["interval"] = interval - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if snapshots is not None: @@ -223,7 +217,6 @@ async def info( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: @@ -238,9 +231,6 @@ async def info( :param metric: Limits the information returned to the specific metrics. Supports a comma-separated list, such as http,ingest. :param flat_settings: If true, returns settings in flat format. - :param master_timeout: Period to wait for a connection to the master node. If - no response is received before the timeout expires, the request fails and - returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ @@ -266,8 +256,6 @@ async def info( __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: @@ -374,7 +362,6 @@ async def stats( level: t.Optional[ t.Union[str, t.Literal["cluster", "indices", "shards"]] ] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, types: t.Optional[t.Sequence[str]] = None, @@ -406,9 +393,6 @@ async def stats( from segments that are not loaded into memory. :param level: Indicates whether statistics are aggregated at the cluster, index, or shard level. - :param master_timeout: Period to wait for a connection to the master node. If - no response is received before the timeout expires, the request fails and - returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param types: A comma-separated list of document types for the indexing index @@ -467,8 +451,6 @@ async def stats( __query["include_unloaded_segments"] = include_unloaded_segments if level is not None: __query["level"] = level - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: diff --git a/elasticsearch/_async/client/query_rules.py b/elasticsearch/_async/client/query_rules.py index 02f97bac7..b98a1d762 100644 --- a/elasticsearch/_async/client/query_rules.py +++ b/elasticsearch/_async/client/query_rules.py @@ -37,7 +37,9 @@ async def delete_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a query rule. Delete a query rule within a query ruleset. + Delete a query rule. Delete a query rule within a query ruleset. This is a destructive + action that is only recoverable by re-adding the same rule with the create or + update query rule API. ``_ @@ -85,7 +87,8 @@ async def delete_ruleset( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a query ruleset. + Delete a query ruleset. Remove a query ruleset and its associated data. This + is a destructive action that is not recoverable. ``_ @@ -221,8 +224,8 @@ async def list_rulesets( ``_ - :param from_: Starting offset (default: 0) - :param size: specifies a max number of results to get + :param from_: The offset from the first result to fetch. + :param size: The maximum number of results to retrieve. """ __path_parts: t.Dict[str, str] = {} __path = "/_query_rules" @@ -271,16 +274,25 @@ async def put_rule( ) -> ObjectApiResponse[t.Any]: """ Create or update a query rule. Create or update a query rule within a query ruleset. + IMPORTANT: Due to limitations within pinned queries, you can only pin documents + using ids or docs, but cannot use both in single rule. It is advised to use one + or the other in query rulesets, to avoid errors. Additionally, pinned queries + have a maximum limit of 100 pinned hits. If multiple matching rules pin more + than 100 documents, only the first 100 documents are pinned in the order they + are specified in the ruleset. ``_ :param ruleset_id: The unique identifier of the query ruleset containing the - rule to be created or updated + rule to be created or updated. :param rule_id: The unique identifier of the query rule within the specified - ruleset to be created or updated - :param actions: - :param criteria: - :param type: + ruleset to be created or updated. + :param actions: The actions to take when the rule is matched. The format of this + action depends on the rule type. + :param criteria: The criteria that must be met for the rule to be applied. If + multiple criteria are specified for a rule, all criteria must be met for + the rule to be applied. + :param type: The type of rule. :param priority: """ if ruleset_id in SKIP_IN_PATH: @@ -345,12 +357,19 @@ async def put_ruleset( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a query ruleset. + Create or update a query ruleset. There is a limit of 100 rules per ruleset. + This limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` + cluster setting. IMPORTANT: Due to limitations within pinned queries, you can + only select documents using `ids` or `docs`, but cannot use both in single rule. + It is advised to use one or the other in query rulesets, to avoid errors. Additionally, + pinned queries have a maximum limit of 100 pinned hits. If multiple matching + rules pin more than 100 documents, only the first 100 documents are pinned in + the order they are specified in the ruleset. ``_ :param ruleset_id: The unique identifier of the query ruleset to be created or - updated + updated. :param rules: """ if ruleset_id in SKIP_IN_PATH: @@ -405,7 +424,9 @@ async def test( :param ruleset_id: The unique identifier of the query ruleset to be created or updated - :param match_criteria: + :param match_criteria: The match criteria to apply to rules in the given query + ruleset. Match criteria should match the keys defined in the `criteria.metadata` + field of the rule. """ if ruleset_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'ruleset_id'") diff --git a/elasticsearch/_async/client/rollup.py b/elasticsearch/_async/client/rollup.py index 11b4c5cda..8fe54394f 100644 --- a/elasticsearch/_async/client/rollup.py +++ b/elasticsearch/_async/client/rollup.py @@ -397,14 +397,37 @@ async def rollup_search( rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given - the original query. + the original query. The request body supports a subset of features from the regular + search API. The following functionality is not available: `size`: Because rollups + work on pre-aggregated data, no search hits can be returned and so size must + be set to zero or omitted entirely. `highlighter`, `suggestors`, `post_filter`, + `profile`, `explain`: These are similarly disallowed. **Searching both historical + rollup and non-rollup data** The rollup search API has the capability to search + across both "live" non-rollup data and the aggregated rollup data. This is done + by simply adding the live indices to the URI. For example: ``` GET sensor-1,sensor_rollup/_rollup_search + { "size": 0, "aggregations": { "max_temperature": { "max": { "field": "temperature" + } } } } ``` The rollup search endpoint does two things when the search runs: + * The original request is sent to the non-rollup index unaltered. * A rewritten + version of the original request is sent to the rollup index. When the two responses + are received, the endpoint rewrites the rollup response and merges the two together. + During the merging process, if there is any overlap in buckets between the two + responses, the buckets from the non-rollup index are used. ``_ - :param index: Enables searching rolled-up data using the standard Query DSL. + :param index: A comma-separated list of data streams and indices used to limit + the request. This parameter has the following rules: * At least one data + stream, index, or wildcard expression must be specified. This target can + include a rollup or non-rollup index. For data streams, the stream's backing + indices can only serve as non-rollup indices. Omitting the parameter or using + `_all` are not permitted. * Multiple non-rollup indices may be specified. + * Only one rollup index may be specified. If more than one are supplied, + an exception occurs. * Wildcard expressions (`*`) may be used. If they match + more than one rollup index, an exception occurs. However, you can use an + expression to match multiple non-rollup indices or data streams. :param aggregations: Specifies aggregations. :param aggs: Specifies aggregations. - :param query: Specifies a DSL query. + :param query: Specifies a DSL query that is subject to some limitations. :param rest_total_hits_as_int: Indicates whether hits.total should be rendered as an integer or an object in the rest search response :param size: Must be zero if set, as rollups work on pre-aggregated data. @@ -506,14 +529,23 @@ async def stop_job( ) -> ObjectApiResponse[t.Any]: """ Stop rollup jobs. If you try to stop a job that does not exist, an exception - occurs. If you try to stop a job that is already stopped, nothing happens. + occurs. If you try to stop a job that is already stopped, nothing happens. Since + only a stopped job can be deleted, it can be useful to block the API until the + indexer has fully stopped. This is accomplished with the `wait_for_completion` + query parameter, and optionally a timeout. For example: ``` POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s + ``` The parameter blocks the API call from returning until either the job has + moved to STOPPED or the specified time has elapsed. If the specified time elapses + without the job moving to STOPPED, a timeout exception occurs. ``_ :param id: Identifier for the rollup job. :param timeout: If `wait_for_completion` is `true`, the API blocks for (at maximum) the specified duration while waiting for the job to stop. If more than `timeout` - time has passed, the API throws a timeout exception. + time has passed, the API throws a timeout exception. NOTE: Even if a timeout + occurs, the stop request is still processing and eventually moves the job + to STOPPED. The timeout simply means the API call itself timed out while + waiting for the status change. :param wait_for_completion: If set to `true`, causes the API to block until the indexer state completely stops. If set to `false`, the API returns immediately and the indexer is stopped asynchronously in the background. diff --git a/elasticsearch/_async/client/searchable_snapshots.py b/elasticsearch/_async/client/searchable_snapshots.py index dbef68c7b..7985c936b 100644 --- a/elasticsearch/_async/client/searchable_snapshots.py +++ b/elasticsearch/_async/client/searchable_snapshots.py @@ -47,11 +47,9 @@ async def cache_stats( Get cache statistics. Get statistics about the shared cache for partially mounted indices. - ``_ + ``_ - :param node_id: A comma-separated list of node IDs or names to limit the returned - information; use `_local` to return information from the node you're connecting - to, leave empty to get information from all nodes + :param node_id: The names of the nodes in the cluster to target. :param master_timeout: """ __path_parts: t.Dict[str, str] @@ -107,9 +105,10 @@ async def clear_cache( Clear the cache. Clear indices and data streams from the shared cache for partially mounted indices. - ``_ + ``_ - :param index: A comma-separated list of index names + :param index: A comma-separated list of data streams, indices, and aliases to + clear from the cache. It supports wildcards (`*`). :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) @@ -184,17 +183,22 @@ async def mount( ``_ :param repository: The name of the repository containing the snapshot of the - index to mount - :param snapshot: The name of the snapshot of the index to mount - :param index: - :param ignore_index_settings: - :param index_settings: - :param master_timeout: Explicit operation timeout for connection to master node - :param renamed_index: - :param storage: Selects the kind of local storage used to accelerate searches. - Experimental, and defaults to `full_copy` - :param wait_for_completion: Should this request wait until the operation has - completed before returning + index to mount. + :param snapshot: The name of the snapshot of the index to mount. + :param index: The name of the index contained in the snapshot whose data is to + be mounted. If no `renamed_index` is specified, this name will also be used + to create the new index. + :param ignore_index_settings: The names of settings that should be removed from + the index when it is mounted. + :param index_settings: The settings that should be added to the index when it + is mounted. + :param master_timeout: The period to wait for the master node. If the master + node is not available before the timeout expires, the request fails and returns + an error. To indicate that the request should never timeout, set it to `-1`. + :param renamed_index: The name of the index that will be created. + :param storage: The mount option for the searchable snapshot index. + :param wait_for_completion: If true, the request blocks until the operation is + complete. """ if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'repository'") @@ -261,9 +265,10 @@ async def stats( """ Get searchable snapshot statistics. - ``_ + ``_ - :param index: A comma-separated list of index names + :param index: A comma-separated list of data streams and indices to retrieve + statistics for. :param level: Return stats aggregated at cluster, index or shard level """ __path_parts: t.Dict[str, str] diff --git a/elasticsearch/_async/client/security.py b/elasticsearch/_async/client/security.py index 816fba31e..c87daecfd 100644 --- a/elasticsearch/_async/client/security.py +++ b/elasticsearch/_async/client/security.py @@ -45,14 +45,33 @@ async def activate_user_profile( ) -> ObjectApiResponse[t.Any]: """ Activate a user profile. Create or update a user profile on behalf of another - user. + user. NOTE: The user profile feature is designed only for use by Kibana and Elastic's + Observability, Enterprise Search, and Elastic Security solutions. Individual + users and external applications should not call this API directly. The calling + application must have either an `access_token` or a combination of `username` + and `password` for the user that the profile document is intended for. Elastic + reserves the right to change or remove this feature in future releases without + prior notice. This API creates or updates a profile document for end users with + information that is extracted from the user's authentication object including + `username`, `full_name,` `roles`, and the authentication realm. For example, + in the JWT `access_token` case, the profile user's `username` is extracted from + the JWT token claim pointed to by the `claims.principal` setting of the JWT realm + that authenticated the token. When updating a profile document, the API enables + the document if it was disabled. Any updates do not change existing content for + either the `labels` or `data` fields. ``_ - :param grant_type: - :param access_token: - :param password: - :param username: + :param grant_type: The type of grant. + :param access_token: The user's Elasticsearch access token or JWT. Both `access` + and `id` JWT token types are supported and they depend on the underlying + JWT realm configuration. If you specify the `access_token` grant type, this + parameter is required. It is not valid with other grant types. + :param password: The user's password. If you specify the `password` grant type, + this parameter is required. It is not valid with other grant types. + :param username: The username that identifies the user. If you specify the `password` + grant type, this parameter is required. It is not valid with other grant + types. """ if grant_type is None and body is None: raise ValueError("Empty value passed for parameter 'grant_type'") @@ -244,6 +263,94 @@ async def bulk_put_role( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("ids", "expiration", "metadata", "role_descriptors"), + ) + async def bulk_update_api_keys( + self, + *, + ids: t.Optional[t.Union[str, t.Sequence[str]]] = None, + error_trace: t.Optional[bool] = None, + expiration: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + metadata: t.Optional[t.Mapping[str, t.Any]] = None, + pretty: t.Optional[bool] = None, + role_descriptors: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Bulk update API keys. Update the attributes for multiple API keys. IMPORTANT: + It is not possible to use an API key as the authentication credential for this + API. To update API keys, the owner user's credentials are required. This API + is similar to the update API key API but enables you to apply the same update + to multiple API keys in one API call. This operation can greatly improve performance + over making individual updates. It is not possible to update expired or invalidated + API keys. This API supports updates to API key access scope, metadata and expiration. + The access scope of each API key is derived from the `role_descriptors` you specify + in the request and a snapshot of the owner user's permissions at the time of + the request. The snapshot of the owner's permissions is updated automatically + on every call. IMPORTANT: If you don't specify `role_descriptors` in the request, + a call to this API might still change an API key's access scope. This change + can occur if the owner user's permissions have changed since the API key was + created or last modified. A successful request returns a JSON structure that + contains the IDs of all updated API keys, the IDs of API keys that already had + the requested changes and did not require an update, and error details for any + failed update. + + ``_ + + :param ids: The API key identifiers. + :param expiration: Expiration time for the API keys. By default, API keys never + expire. This property can be omitted to leave the value unchanged. + :param metadata: Arbitrary nested metadata to associate with the API keys. Within + the `metadata` object, top-level keys beginning with an underscore (`_`) + are reserved for system usage. Any information specified with this parameter + fully replaces metadata previously associated with the API key. + :param role_descriptors: The role descriptors to assign to the API keys. An API + key's effective permissions are an intersection of its assigned privileges + and the point-in-time snapshot of permissions of the owner user. You can + assign new privileges by specifying them in this parameter. To remove assigned + privileges, supply the `role_descriptors` parameter as an empty object `{}`. + If an API key has no assigned privileges, it inherits the owner user's full + permissions. The snapshot of the owner's permissions is always updated, whether + you supply the `role_descriptors` parameter. The structure of a role descriptor + is the same as the request for the create API keys API. + """ + if ids is None and body is None: + raise ValueError("Empty value passed for parameter 'ids'") + __path_parts: t.Dict[str, str] = {} + __path = "/_security/api_key/_bulk_update" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if ids is not None: + __body["ids"] = ids + if expiration is not None: + __body["expiration"] = expiration + if metadata is not None: + __body["metadata"] = metadata + if role_descriptors is not None: + __body["role_descriptors"] = role_descriptors + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.bulk_update_api_keys", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=("password", "password_hash"), ) @@ -773,6 +880,74 @@ async def create_service_token( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("x509_certificate_chain",), + ) + async def delegate_pki( + self, + *, + x509_certificate_chain: t.Optional[t.Sequence[str]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Delegate PKI authentication. This API implements the exchange of an X509Certificate + chain for an Elasticsearch access token. The certificate chain is validated, + according to RFC 5280, by sequentially considering the trust configuration of + every installed PKI realm that has `delegation.enabled` set to `true`. A successfully + trusted client certificate is also subject to the validation of the subject distinguished + name according to thw `username_pattern` of the respective realm. This API is + called by smart and trusted proxies, such as Kibana, which terminate the user's + TLS session but still want to authenticate the user by using a PKI realm—-​as + if the user connected directly to Elasticsearch. IMPORTANT: The association between + the subject public key in the target certificate and the corresponding private + key is not validated. This is part of the TLS authentication process and it is + delegated to the proxy that calls this API. The proxy is trusted to have performed + the TLS authentication and this API translates that authentication into an Elasticsearch + access token. + + ``_ + + :param x509_certificate_chain: The X509Certificate chain, which is represented + as an ordered string array. Each string in the array is a base64-encoded + (Section 4 of RFC4648 - not base64url-encoded) of the certificate's DER encoding. + The first element is the target certificate that contains the subject distinguished + name that is requesting access. This may be followed by additional certificates; + each subsequent certificate is used to certify the previous one. + """ + if x509_certificate_chain is None and body is None: + raise ValueError( + "Empty value passed for parameter 'x509_certificate_chain'" + ) + __path_parts: t.Dict[str, str] = {} + __path = "/_security/delegate_pki" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if x509_certificate_chain is not None: + __body["x509_certificate_chain"] = x509_certificate_chain + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.delegate_pki", + path_parts=__path_parts, + ) + @_rewrite_parameters() async def delete_privileges( self, @@ -1098,14 +1273,21 @@ async def disable_user_profile( ) -> ObjectApiResponse[t.Any]: """ Disable a user profile. Disable user profiles so that they are not visible in - user profile searches. + user profile searches. NOTE: The user profile feature is designed only for use + by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security + solutions. Individual users and external applications should not call this API + directly. Elastic reserves the right to change or remove this feature in future + releases without prior notice. When you activate a user profile, its automatically + enabled and visible in user profile searches. You can use the disable user profile + API to disable a user profile so it’s not visible in these searches. To re-enable + a disabled user profile, use the enable user profile API . ``_ :param uid: Unique identifier for the user profile. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make - this operation visible to search, if 'wait_for' then wait for a refresh to - make this operation visible to search, if 'false' do nothing with refreshes. + this operation visible to search. If 'wait_for', it waits for a refresh to + make this operation visible to search. If 'false', it does nothing with refreshes. """ if uid in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'uid'") @@ -1195,14 +1377,20 @@ async def enable_user_profile( ) -> ObjectApiResponse[t.Any]: """ Enable a user profile. Enable user profiles to make them visible in user profile - searches. + searches. NOTE: The user profile feature is designed only for use by Kibana and + Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual + users and external applications should not call this API directly. Elastic reserves + the right to change or remove this feature in future releases without prior notice. + When you activate a user profile, it's automatically enabled and visible in user + profile searches. If you later disable the user profile, you can use the enable + user profile API to make the profile visible in these searches again. ``_ - :param uid: Unique identifier for the user profile. + :param uid: A unique identifier for the user profile. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make - this operation visible to search, if 'wait_for' then wait for a refresh to - make this operation visible to search, if 'false' do nothing with refreshes. + this operation visible to search. If 'wait_for', it waits for a refresh to + make this operation visible to search. If 'false', nothing is done with refreshes. """ if uid in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'uid'") @@ -1667,6 +1855,49 @@ async def get_service_credentials( path_parts=__path_parts, ) + @_rewrite_parameters() + async def get_settings( + self, + *, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Get security index settings. Get the user-configurable settings for the security + internal index (`.security` and associated indices). + + ``_ + + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_security/settings" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="security.get_settings", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=( "grant_type", @@ -1860,15 +2091,19 @@ async def get_user_profile( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a user profile. Get a user's profile using the unique profile ID. + Get a user profile. Get a user's profile using the unique profile ID. NOTE: The + user profile feature is designed only for use by Kibana and Elastic's Observability, + Enterprise Search, and Elastic Security solutions. Individual users and external + applications should not call this API directly. Elastic reserves the right to + change or remove this feature in future releases without prior notice. ``_ :param uid: A unique identifier for the user profile. - :param data: List of filters for the `data` field of the profile document. To - return all content use `data=*`. To return a subset of content use `data=` - to retrieve content nested under the specified ``. By default returns - no `data` content. + :param data: A comma-separated list of filters for the `data` field of the profile + document. To return all content use `data=*`. To return a subset of content + use `data=` to retrieve content nested under the specified ``. + By default returns no `data` content. """ if uid in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'uid'") @@ -2140,11 +2375,15 @@ async def has_privileges_user_profile( ) -> ObjectApiResponse[t.Any]: """ Check user profile privileges. Determine whether the users associated with the - specified user profile IDs have all the requested privileges. + specified user profile IDs have all the requested privileges. NOTE: The user + profile feature is designed only for use by Kibana and Elastic's Observability, + Enterprise Search, and Elastic Security solutions. Individual users and external + applications should not call this API directly. Elastic reserves the right to + change or remove this feature in future releases without prior notice. ``_ - :param privileges: + :param privileges: An object containing all the privileges to be checked. :param uids: A list of profile IDs. The privileges are checked for associated users of the profiles. """ @@ -3312,13 +3551,25 @@ async def saml_authenticate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Authenticate SAML. Submits a SAML response message to Elasticsearch for consumption. + Authenticate SAML. Submit a SAML response message to Elasticsearch for consumption. + NOTE: This API is intended for use by custom web applications other than Kibana. + If you are using Kibana, refer to the documentation for configuring SAML single-sign-on + on the Elastic Stack. The SAML message that is submitted can be: * A response + to a SAML authentication request that was previously created using the SAML prepare + authentication API. * An unsolicited SAML message in the case of an IdP-initiated + single sign-on (SSO) flow. In either case, the SAML message needs to be a base64 + encoded XML document with a root element of ``. After successful validation, + Elasticsearch responds with an Elasticsearch internal access token and refresh + token that can be subsequently used for authentication. This API endpoint essentially + exchanges SAML responses that indicate successful authentication in the IdP for + Elasticsearch access and refresh tokens, which can be used for authentication + against Elasticsearch. ``_ - :param content: The SAML response as it was sent by the user’s browser, usually + :param content: The SAML response as it was sent by the user's browser, usually a Base64 encoded XML document. - :param ids: A json array with all the valid SAML Request Ids that the caller + :param ids: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. :param realm: The name of the realm that should authenticate the SAML response. Useful in cases where many SAML realms are defined. @@ -3375,10 +3626,19 @@ async def saml_complete_logout( ) -> ObjectApiResponse[t.Any]: """ Logout of SAML completely. Verifies the logout response sent from the SAML IdP. + NOTE: This API is intended for use by custom web applications other than Kibana. + If you are using Kibana, refer to the documentation for configuring SAML single-sign-on + on the Elastic Stack. The SAML IdP may send a logout response back to the SP + after handling the SP-initiated SAML Single Logout. This API verifies the response + by ensuring the content is relevant and validating its signature. An empty response + is returned if the verification process is successful. The response can be sent + by the IdP with either the HTTP-Redirect or the HTTP-Post binding. The caller + of this API must prepare the request accordingly so that this API can handle + either of them. ``_ - :param ids: A json array with all the valid SAML Request Ids that the caller + :param ids: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. :param realm: The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response. @@ -3440,25 +3700,33 @@ async def saml_invalidate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Invalidate SAML. Submits a SAML LogoutRequest message to Elasticsearch for consumption. + Invalidate SAML. Submit a SAML LogoutRequest message to Elasticsearch for consumption. + NOTE: This API is intended for use by custom web applications other than Kibana. + If you are using Kibana, refer to the documentation for configuring SAML single-sign-on + on the Elastic Stack. The logout request comes from the SAML IdP during an IdP + initiated Single Logout. The custom web application can use this API to have + Elasticsearch process the `LogoutRequest`. After successful validation of the + request, Elasticsearch invalidates the access token and refresh token that corresponds + to that specific SAML principal and provides a URL that contains a SAML LogoutResponse + message. Thus the user can be redirected back to their IdP. ``_ :param query_string: The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. This query should include - a single parameter named SAMLRequest that contains a SAML logout request + a single parameter named `SAMLRequest` that contains a SAML logout request that is deflated and Base64 encoded. If the SAML IdP has signed the logout - request, the URL should include two extra parameters named SigAlg and Signature + request, the URL should include two extra parameters named `SigAlg` and `Signature` that contain the algorithm used for the signature and the signature value - itself. In order for Elasticsearch to be able to verify the IdP’s signature, - the value of the query_string field must be an exact match to the string + itself. In order for Elasticsearch to be able to verify the IdP's signature, + the value of the `query_string` field must be an exact match to the string provided by the browser. The client application must not attempt to parse or process the string in any way. :param acs: The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this - parameter or the realm parameter. + parameter or the `realm` parameter. :param realm: The name of the SAML realm in Elasticsearch the configuration. - You must specify either this parameter or the acs parameter. + You must specify either this parameter or the `acs` parameter. """ if query_string is None and body is None: raise ValueError("Empty value passed for parameter 'query_string'") @@ -3508,12 +3776,19 @@ async def saml_logout( ) -> ObjectApiResponse[t.Any]: """ Logout of SAML. Submits a request to invalidate an access token and refresh token. + NOTE: This API is intended for use by custom web applications other than Kibana. + If you are using Kibana, refer to the documentation for configuring SAML single-sign-on + on the Elastic Stack. This API invalidates the tokens that were generated for + a user by the SAML authenticate API. If the SAML realm in Elasticsearch is configured + accordingly and the SAML IdP supports this, the Elasticsearch response contains + a URL to redirect the user to the IdP that contains a SAML logout request (starting + an SP-initiated SAML Single Logout). ``_ :param token: The access token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent token that was received - after refreshing the original one by using a refresh_token. + after refreshing the original one by using a `refresh_token`. :param refresh_token: The refresh token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent refresh token that was received after refreshing the original access token. @@ -3564,19 +3839,31 @@ async def saml_prepare_authentication( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Prepare SAML authentication. Creates a SAML authentication request (``) - as a URL string, based on the configuration of the respective SAML realm in Elasticsearch. + Prepare SAML authentication. Create a SAML authentication request (``) + as a URL string based on the configuration of the respective SAML realm in Elasticsearch. + NOTE: This API is intended for use by custom web applications other than Kibana. + If you are using Kibana, refer to the documentation for configuring SAML single-sign-on + on the Elastic Stack. This API returns a URL pointing to the SAML Identity Provider. + You can use the URL to redirect the browser of the user in order to continue + the authentication process. The URL includes a single parameter named `SAMLRequest`, + which contains a SAML Authentication request that is deflated and Base64 encoded. + If the configuration dictates that SAML authentication requests should be signed, + the URL has two extra parameters named `SigAlg` and `Signature`. These parameters + contain the algorithm used for the signature and the signature value itself. + It also returns a random string that uniquely identifies this SAML Authentication + request. The caller of this API needs to store this identifier as it needs to + be used in a following step of the authentication process. ``_ :param acs: The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. The realm is used to generate the authentication - request. You must specify either this parameter or the realm parameter. + request. You must specify either this parameter or the `realm` parameter. :param realm: The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request. You must specify either this - parameter or the acs parameter. + parameter or the `acs` parameter. :param relay_state: A string that will be included in the redirect URL that this - API returns as the RelayState query parameter. If the Authentication Request + API returns as the `RelayState` query parameter. If the Authentication Request is signed, this value is used as part of the signature computation. """ __path_parts: t.Dict[str, str] = {} @@ -3621,7 +3908,10 @@ async def saml_service_provider_metadata( ) -> ObjectApiResponse[t.Any]: """ Create SAML service provider metadata. Generate SAML metadata for a SAML 2.0 - Service Provider. + Service Provider. The SAML 2.0 specification provides a mechanism for Service + Providers to describe their capabilities and configuration using a metadata file. + This API generates Service Provider metadata based on the configuration of a + SAML realm in Elasticsearch. ``_ @@ -3668,21 +3958,27 @@ async def suggest_user_profiles( ) -> ObjectApiResponse[t.Any]: """ Suggest a user profile. Get suggestions for user profiles that match specified - search criteria. + search criteria. NOTE: The user profile feature is designed only for use by Kibana + and Elastic's Observability, Enterprise Search, and Elastic Security solutions. + Individual users and external applications should not call this API directly. + Elastic reserves the right to change or remove this feature in future releases + without prior notice. ``_ - :param data: List of filters for the `data` field of the profile document. To - return all content use `data=*`. To return a subset of content use `data=` - to retrieve content nested under the specified ``. By default returns - no `data` content. + :param data: A comma-separated list of filters for the `data` field of the profile + document. To return all content use `data=*`. To return a subset of content, + use `data=` to retrieve content nested under the specified ``. + By default, the API returns no `data` content. It is an error to specify + `data` as both the query parameter and the request body field. :param hint: Extra search criteria to improve relevance of the suggestion result. Profiles matching the spcified hint are ranked higher in the response. Profiles - not matching the hint don't exclude the profile from the response as long - as the profile matches the `name` field query. - :param name: Query string used to match name-related fields in user profile documents. - Name-related fields are the user's `username`, `full_name`, and `email`. - :param size: Number of profiles to return. + not matching the hint aren't excluded from the response as long as the profile + matches the `name` field query. + :param name: A query string used to match name-related fields in user profile + documents. Name-related fields are the user's `username`, `full_name`, and + `email`. + :param size: The number of profiles to return. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/profile/_suggest" @@ -3824,7 +4120,18 @@ async def update_cross_cluster_api_key( ) -> ObjectApiResponse[t.Any]: """ Update a cross-cluster API key. Update the attributes of an existing cross-cluster - API key, which is used for API key based remote cluster access. + API key, which is used for API key based remote cluster access. To use this API, + you must have at least the `manage_security` cluster privilege. Users can only + update API keys that they created. To update another user's API key, use the + `run_as` feature to submit a request on behalf of another user. IMPORTANT: It's + not possible to use an API key as the authentication credential for this API. + To update an API key, the owner user's credentials are required. It's not possible + to update expired API keys, or API keys that have been invalidated by the invalidate + API key API. This API supports updates to an API key's access scope, metadata, + and expiration. The owner user's information, such as the `username` and `realm`, + is also updated automatically on every call. NOTE: This API cannot update REST + API keys, which should be updated by either the update API key or bulk update + API keys API. ``_ @@ -3833,8 +4140,8 @@ async def update_cross_cluster_api_key( of permissions for cross cluster search and cross cluster replication. At least one of them must be specified. When specified, the new access assignment fully replaces the previously assigned access. - :param expiration: Expiration time for the API key. By default, API keys never - expire. This property can be omitted to leave the value unchanged. + :param expiration: The expiration time for the API key. By default, API keys + never expire. This property can be omitted to leave the value unchanged. :param metadata: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. When specified, this information @@ -3874,6 +4181,81 @@ async def update_cross_cluster_api_key( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("security", "security_profile", "security_tokens"), + parameter_aliases={ + "security-profile": "security_profile", + "security-tokens": "security_tokens", + }, + ) + async def update_settings( + self, + *, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + security: t.Optional[t.Mapping[str, t.Any]] = None, + security_profile: t.Optional[t.Mapping[str, t.Any]] = None, + security_tokens: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Update security index settings. Update the user-configurable settings for the + security internal index (`.security` and associated indices). Only a subset of + settings are allowed to be modified, for example `index.auto_expand_replicas` + and `index.number_of_replicas`. If a specific index is not in use on the system + and settings are provided for it, the request will be rejected. This API does + not yet support configuring the settings for indices before they are in use. + + ``_ + + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. + :param security: Settings for the index used for most security configuration, + including native realm users and roles configured with the API. + :param security_profile: Settings for the index used to store profile information. + :param security_tokens: Settings for the index used to store tokens. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_security/settings" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + if not __body: + if security is not None: + __body["security"] = security + if security_profile is not None: + __body["security-profile"] = security_profile + if security_tokens is not None: + __body["security-tokens"] = security_tokens + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.update_settings", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=("data", "labels"), ) @@ -3896,22 +4278,37 @@ async def update_user_profile_data( ) -> ObjectApiResponse[t.Any]: """ Update user profile data. Update specific data for the user profile that is associated - with a unique ID. + with a unique ID. NOTE: The user profile feature is designed only for use by + Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. + Individual users and external applications should not call this API directly. + Elastic reserves the right to change or remove this feature in future releases + without prior notice. To use this API, you must have one of the following privileges: + * The `manage_user_profile` cluster privilege. * The `update_profile_data` global + privilege for the namespaces that are referenced in the request. This API updates + the `labels` and `data` fields of an existing user profile document with JSON + objects. New keys and their values are added to the profile document and conflicting + keys are replaced by data that's included in the request. For both labels and + data, content is namespaced by the top-level fields. The `update_profile_data` + global privilege grants privileges for updating only the allowed namespaces. ``_ :param uid: A unique identifier for the user profile. :param data: Non-searchable data that you want to associate with the user profile. - This field supports a nested data structure. + This field supports a nested data structure. Within the `data` object, top-level + keys cannot begin with an underscore (`_`) or contain a period (`.`). The + data object is not searchable, but can be retrieved with the get user profile + API. :param if_primary_term: Only perform the operation if the document has this primary term. :param if_seq_no: Only perform the operation if the document has this sequence number. :param labels: Searchable data that you want to associate with the user profile. - This field supports a nested data structure. + This field supports a nested data structure. Within the labels object, top-level + keys cannot begin with an underscore (`_`) or contain a period (`.`). :param refresh: If 'true', Elasticsearch refreshes the affected shards to make - this operation visible to search, if 'wait_for' then wait for a refresh to - make this operation visible to search, if 'false' do nothing with refreshes. + this operation visible to search. If 'wait_for', it waits for a refresh to + make this operation visible to search. If 'false', nothing is done with refreshes. """ if uid in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'uid'") diff --git a/elasticsearch/_async/client/shutdown.py b/elasticsearch/_async/client/shutdown.py index e4117bff8..df396a7a3 100644 --- a/elasticsearch/_async/client/shutdown.py +++ b/elasticsearch/_async/client/shutdown.py @@ -50,7 +50,7 @@ async def delete_node( and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. - ``_ + ``_ :param node_id: The node id of node to be removed from the shutdown state :param master_timeout: Period to wait for a connection to the master node. If @@ -98,9 +98,6 @@ async def get_node( t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, pretty: t.Optional[bool] = None, - timeout: t.Optional[ - t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] - ] = None, ) -> ObjectApiResponse[t.Any]: """ Get the shutdown status. Get information about nodes that are ready to be shut @@ -111,14 +108,12 @@ async def get_node( the operator privileges feature is enabled, you must be an operator to use this API. - ``_ + ``_ :param node_id: Which node for which to retrieve the shutdown status :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if node_id not in SKIP_IN_PATH: @@ -138,8 +133,6 @@ async def get_node( __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty - if timeout is not None: - __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", @@ -178,19 +171,23 @@ async def put_node( """ Prepare a node to be shut down. NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. - Direct use is not supported. If the operator privileges feature is enabled, you - must be an operator to use this API. The API migrates ongoing tasks and index - shards to other nodes as needed to prepare a node to be restarted or shut down - and removed from the cluster. This ensures that Elasticsearch can be stopped - safely with minimal disruption to the cluster. You must specify the type of shutdown: - `restart`, `remove`, or `replace`. If a node is already being prepared for shutdown, - you can use this API to change the shutdown type. IMPORTANT: This API does NOT - terminate the Elasticsearch process. Monitor the node shutdown status to determine - when it is safe to stop Elasticsearch. + Direct use is not supported. If you specify a node that is offline, it will be + prepared for shut down when it rejoins the cluster. If the operator privileges + feature is enabled, you must be an operator to use this API. The API migrates + ongoing tasks and index shards to other nodes as needed to prepare a node to + be restarted or shut down and removed from the cluster. This ensures that Elasticsearch + can be stopped safely with minimal disruption to the cluster. You must specify + the type of shutdown: `restart`, `remove`, or `replace`. If a node is already + being prepared for shutdown, you can use this API to change the shutdown type. + IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the + node shutdown status to determine when it is safe to stop Elasticsearch. - ``_ + ``_ - :param node_id: The node id of node to be shut down + :param node_id: The node identifier. This parameter is not validated against + the cluster's active nodes. This enables you to register a node for shut + down while it is offline. No error is thrown if you specify an invalid node + ID. :param reason: A human-readable reason that the node is being shut down. This field provides information for other cluster operators; it does not affect the shut down process. @@ -211,17 +208,17 @@ async def put_node( the index.unassigned.node_left.delayed_timeout setting. If you specify both a restart allocation delay and an index-level allocation delay, the longer of the two is used. - :param master_timeout: Period to wait for a connection to the master node. If - no response is received before the timeout expires, the request fails and - returns an error. + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. :param target_node_name: Only valid if type is replace. Specifies the name of the node that is replacing the node being shut down. Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. """ if node_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'node_id'") diff --git a/elasticsearch/_async/client/simulate.py b/elasticsearch/_async/client/simulate.py index c4beb9dda..4a2d871b2 100644 --- a/elasticsearch/_async/client/simulate.py +++ b/elasticsearch/_async/client/simulate.py @@ -87,7 +87,7 @@ async def ingest( This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request. - ``_ + ``_ :param docs: Sample documents to test in the pipeline. :param index: The index to simulate ingesting into. This value can be overridden diff --git a/elasticsearch/_async/client/slm.py b/elasticsearch/_async/client/slm.py index 180ff26e5..cc3380d77 100644 --- a/elasticsearch/_async/client/slm.py +++ b/elasticsearch/_async/client/slm.py @@ -33,7 +33,9 @@ async def delete_lifecycle( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Delete a policy. Delete a snapshot lifecycle policy definition. This operation @@ -43,6 +45,11 @@ async def delete_lifecycle( ``_ :param policy_id: The id of the snapshot lifecycle policy to remove + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. """ if policy_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'policy_id'") @@ -55,8 +62,12 @@ async def delete_lifecycle( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", @@ -75,7 +86,9 @@ async def execute_lifecycle( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Run a policy. Immediately create a snapshot according to the snapshot lifecycle @@ -86,6 +99,11 @@ async def execute_lifecycle( ``_ :param policy_id: The id of the snapshot lifecycle policy to be executed + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. """ if policy_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'policy_id'") @@ -98,8 +116,12 @@ async def execute_lifecycle( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", @@ -117,7 +139,9 @@ async def execute_retention( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Run a retention policy. Manually apply the retention policy to force immediate @@ -125,6 +149,12 @@ async def execute_retention( retention rules. The retention policy is normally applied according to its schedule. ``_ + + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/_execute_retention" @@ -135,8 +165,12 @@ async def execute_retention( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", @@ -155,7 +189,9 @@ async def get_lifecycle( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Get policy information. Get snapshot lifecycle policy definitions and information @@ -164,6 +200,11 @@ async def get_lifecycle( ``_ :param policy_id: Comma-separated list of snapshot lifecycle policies to retrieve + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if policy_id not in SKIP_IN_PATH: @@ -179,8 +220,12 @@ async def get_lifecycle( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", @@ -198,13 +243,21 @@ async def get_stats( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Get snapshot lifecycle management statistics. Get global and policy-level statistics about actions taken by snapshot lifecycle management. ``_ + + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/stats" @@ -215,8 +268,12 @@ async def get_stats( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", @@ -234,12 +291,22 @@ async def get_status( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Get the snapshot lifecycle management status. ``_ + + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. To indicate that the request should never timeout, + set it to `-1`. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. To indicate + that the request should never timeout, set it to `-1`. """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/status" @@ -250,8 +317,12 @@ async def get_status( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", @@ -292,9 +363,10 @@ async def put_lifecycle( :param policy_id: The identifier for the snapshot lifecycle policy you want to create or update. :param config: Configuration for each snapshot created by the policy. - :param master_timeout: Period to wait for a connection to the master node. If - no response is received before the timeout expires, the request fails and - returns an error. + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. To indicate that the request should never timeout, + set it to `-1`. :param name: Name automatically assigned to each snapshot created by the policy. Date math is supported. To prevent conflicting snapshot names, a UUID is automatically appended to each snapshot name. @@ -305,8 +377,9 @@ async def put_lifecycle( by the policy. :param schedule: Periodic or absolute schedule at which the policy creates snapshots. SLM applies schedule changes immediately. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. To indicate + that the request should never timeout, set it to `-1`. """ if policy_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'policy_id'") @@ -359,7 +432,9 @@ async def start( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Start snapshot lifecycle management. Snapshot lifecycle management (SLM) starts @@ -367,6 +442,14 @@ async def start( if it has been stopped using the stop SLM API. ``_ + + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. To indicate that the request should never timeout, + set it to `-1`. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. To indicate + that the request should never timeout, set it to `-1`. """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/start" @@ -377,8 +460,12 @@ async def start( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", @@ -396,7 +483,9 @@ async def stop( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Stop snapshot lifecycle management. Stop all snapshot lifecycle management (SLM) @@ -410,6 +499,14 @@ async def stop( status API to see if SLM is running. ``_ + + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. To indicate that the request should never timeout, + set it to `-1`. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. To indicate + that the request should never timeout, set it to `-1`. """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/stop" @@ -420,8 +517,12 @@ async def stop( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", diff --git a/elasticsearch/_async/client/snapshot.py b/elasticsearch/_async/client/snapshot.py index 15e443077..f2c9448b9 100644 --- a/elasticsearch/_async/client/snapshot.py +++ b/elasticsearch/_async/client/snapshot.py @@ -95,21 +95,19 @@ async def clone( human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, - timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ Clone a snapshot. Clone part of all of a snapshot into another snapshot in the same repository. - ``_ + ``_ :param repository: A repository name :param snapshot: The name of the snapshot to clone from :param target_snapshot: The name of the cloned snapshot to create :param indices: :param master_timeout: Explicit operation timeout for connection to master node - :param timeout: """ if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'repository'") @@ -137,8 +135,6 @@ async def clone( __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty - if timeout is not None: - __query["timeout"] = timeout if not __body: if indices is not None: __body["indices"] = indices @@ -185,7 +181,7 @@ async def create( """ Create a snapshot. Take a snapshot of a cluster or of data streams and indices. - ``_ + ``_ :param repository: Repository for the snapshot. :param snapshot: Name of the snapshot. Must be unique in the repository. @@ -353,7 +349,7 @@ async def delete( """ Delete snapshots. - ``_ + ``_ :param repository: A repository name :param snapshot: A comma-separated list of snapshot names @@ -406,7 +402,7 @@ async def delete_repository( removes only the reference to the location where the repository is storing the snapshots. The snapshots themselves are left untouched and in place. - ``_ + ``_ :param name: Name of the snapshot repository to unregister. Wildcard (`*`) patterns are supported. @@ -480,7 +476,7 @@ async def get( """ Get snapshot information. - ``_ + ``_ :param repository: Comma-separated list of snapshot repository names used to limit the request. Wildcard (*) expressions are supported. @@ -592,7 +588,7 @@ async def get_repository( """ Get snapshot repository information. - ``_ + ``_ :param name: A comma-separated list of repository names :param local: Return local information, do not retrieve the state from master @@ -629,6 +625,225 @@ async def get_repository( path_parts=__path_parts, ) + @_rewrite_parameters() + async def repository_analyze( + self, + *, + name: str, + blob_count: t.Optional[int] = None, + concurrency: t.Optional[int] = None, + detailed: t.Optional[bool] = None, + early_read_node_count: t.Optional[int] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + max_blob_size: t.Optional[t.Union[int, str]] = None, + max_total_data_size: t.Optional[t.Union[int, str]] = None, + pretty: t.Optional[bool] = None, + rare_action_probability: t.Optional[float] = None, + rarely_abort_writes: t.Optional[bool] = None, + read_node_count: t.Optional[int] = None, + register_operation_count: t.Optional[int] = None, + seed: t.Optional[int] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Analyze a snapshot repository. Analyze the performance characteristics and any + incorrect behaviour found in a repository. The response exposes implementation + details of the analysis which may change from version to version. The response + body format is therefore not considered stable and may be different in newer + versions. There are a large number of third-party storage systems available, + not all of which are suitable for use as a snapshot repository by Elasticsearch. + Some storage systems behave incorrectly, or perform poorly, especially when accessed + concurrently by multiple clients as the nodes of an Elasticsearch cluster do. + This API performs a collection of read and write operations on your repository + which are designed to detect incorrect behaviour and to measure the performance + characteristics of your storage system. The default values for the parameters + are deliberately low to reduce the impact of running an analysis inadvertently + and to provide a sensible starting point for your investigations. Run your first + analysis with the default parameter values to check for simple problems. If successful, + run a sequence of increasingly large analyses until you encounter a failure or + you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, + a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of + at least `100`. Always specify a generous timeout, possibly `1h` or longer, to + allow time for each analysis to run to completion. Perform the analyses using + a multi-node cluster of a similar size to your production cluster so that it + can detect any problems that only arise when the repository is accessed by many + nodes at once. If the analysis fails, Elasticsearch detected that your repository + behaved unexpectedly. This usually means you are using a third-party storage + system with an incorrect or incompatible implementation of the API it claims + to support. If so, this storage system is not suitable for use as a snapshot + repository. You will need to work with the supplier of your storage system to + address the incompatibilities that Elasticsearch detects. If the analysis is + successful, the API returns details of the testing process, optionally including + how long each operation took. You can use this information to determine the performance + of your storage system. If any operation fails or returns an incorrect result, + the API returns an error. If the API returns an error, it may not have removed + all the data it wrote to the repository. The error will indicate the location + of any leftover data and this path is also recorded in the Elasticsearch logs. + You should verify that this location has been cleaned up correctly. If there + is still leftover data at the specified location, you should manually remove + it. If the connection from your client to Elasticsearch is closed while the client + is waiting for the result of the analysis, the test is cancelled. Some clients + are configured to close their connection if no response is received within a + certain timeout. An analysis takes a long time to complete so you might need + to relax any such client-side timeouts. On cancellation the analysis attempts + to clean up the data it was writing, but it may not be able to remove it all. + The path to the leftover data is recorded in the Elasticsearch logs. You should + verify that this location has been cleaned up correctly. If there is still leftover + data at the specified location, you should manually remove it. If the analysis + is successful then it detected no incorrect behaviour, but this does not mean + that correct behaviour is guaranteed. The analysis attempts to detect common + bugs but it does not offer 100% coverage. Additionally, it does not test the + following: * Your repository must perform durable writes. Once a blob has been + written it must remain in place until it is deleted, even after a power loss + or similar disaster. * Your repository must not suffer from silent data corruption. + Once a blob has been written, its contents must remain unchanged until it is + deliberately modified or deleted. * Your repository must behave correctly even + if connectivity from the cluster is disrupted. Reads and writes may fail in this + case, but they must not return incorrect results. IMPORTANT: An analysis writes + a substantial amount of data to your repository and then reads it back again. + This consumes bandwidth on the network between the cluster and the repository, + and storage space and I/O bandwidth on the repository itself. You must ensure + this load does not affect other users of these systems. Analyses respect the + repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec` + if available and the cluster setting `indices.recovery.max_bytes_per_sec` which + you can use to limit the bandwidth they consume. NOTE: This API is intended for + exploratory use by humans. You should expect the request parameters and the response + format to vary in future versions. NOTE: Different versions of Elasticsearch + may perform different checks for repository compatibility, with newer versions + typically being stricter than older ones. A storage system that passes repository + analysis with one version of Elasticsearch may fail with a different version. + This indicates it behaves incorrectly in ways that the former version did not + detect. You must work with the supplier of your storage system to address the + incompatibilities detected by the repository analysis API in any version of Elasticsearch. + NOTE: This API may not work correctly in a mixed-version cluster. *Implementation + details* NOTE: This section of documentation describes how the repository analysis + API works in this version of Elasticsearch, but you should expect the implementation + to vary between versions. The request parameters and response format depend on + details of the implementation so may also be different in newer versions. The + analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter + and a number of compare-and-exchange operations on linearizable registers, as + set by the `register_operation_count` parameter. These tasks are distributed + over the data and master-eligible nodes in the cluster for execution. For most + blob-level tasks, the executing node first writes a blob to the repository and + then instructs some of the other nodes in the cluster to attempt to read the + data it just wrote. The size of the blob is chosen randomly, according to the + `max_blob_size` and `max_total_data_size` parameters. If any of these reads fails + then the repository does not implement the necessary read-after-write semantics + that Elasticsearch requires. For some blob-level tasks, the executing node will + instruct some of its peers to attempt to read the data before the writing process + completes. These reads are permitted to fail, but must not return partial data. + If any read returns partial data then the repository does not implement the necessary + atomicity semantics that Elasticsearch requires. For some blob-level tasks, the + executing node will overwrite the blob while its peers are reading it. In this + case the data read may come from either the original or the overwritten blob, + but the read operation must not return partial data or a mix of data from the + two blobs. If any of these reads returns partial data or a mix of the two blobs + then the repository does not implement the necessary atomicity semantics that + Elasticsearch requires for overwrites. The executing node will use a variety + of different methods to write the blob. For instance, where applicable, it will + use both single-part and multi-part uploads. Similarly, the reading nodes will + use a variety of different methods to read the data back again. For instance + they may read the entire blob from start to end or may read only a subset of + the data. For some blob-level tasks, the executing node will cancel the write + before it is complete. In this case, it still instructs some of the other nodes + in the cluster to attempt to read the blob but all of these reads must fail to + find the blob. Linearizable registers are special blobs that Elasticsearch manipulates + using an atomic compare-and-exchange operation. This operation ensures correct + and strongly-consistent behavior even when the blob is accessed by multiple nodes + at the same time. The detailed implementation of the compare-and-exchange operation + on linearizable registers varies by repository type. Repository analysis verifies + that that uncontended compare-and-exchange operations on a linearizable register + blob always succeed. Repository analysis also verifies that contended operations + either succeed or report the contention but do not return incorrect results. + If an operation fails due to contention, Elasticsearch retries the operation + until it succeeds. Most of the compare-and-exchange operations performed by repository + analysis atomically increment a counter which is represented as an 8-byte blob. + Some operations also verify the behavior on small blobs with sizes other than + 8 bytes. + + ``_ + + :param name: The name of the repository. + :param blob_count: The total number of blobs to write to the repository during + the test. For realistic experiments, you should set it to at least `2000`. + :param concurrency: The number of operations to run concurrently during the test. + :param detailed: Indicates whether to return detailed results, including timing + information for every operation performed during the analysis. If false, + it returns only a summary of the analysis. + :param early_read_node_count: The number of nodes on which to perform an early + read operation while writing each blob. Early read operations are only rarely + performed. + :param max_blob_size: The maximum size of a blob to be written during the test. + For realistic experiments, you should set it to at least `2gb`. + :param max_total_data_size: An upper limit on the total size of all the blobs + written during the test. For realistic experiments, you should set it to + at least `1tb`. + :param rare_action_probability: The probability of performing a rare action such + as an early read, an overwrite, or an aborted write on each blob. + :param rarely_abort_writes: Indicates whether to rarely cancel writes before + they complete. + :param read_node_count: The number of nodes on which to read a blob after writing. + :param register_operation_count: The minimum number of linearizable register + operations to perform in total. For realistic experiments, you should set + it to at least `100`. + :param seed: The seed for the pseudo-random number generator used to generate + the list of operations performed during the test. To repeat the same set + of operations in multiple experiments, use the same seed in each experiment. + Note that the operations are performed concurrently so might not always happen + in the same order on each run. + :param timeout: The period of time to wait for the test to complete. If no response + is received before the timeout expires, the test is cancelled and returns + an error. + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'name'") + __path_parts: t.Dict[str, str] = {"repository": _quote(name)} + __path = f'/_snapshot/{__path_parts["repository"]}/_analyze' + __query: t.Dict[str, t.Any] = {} + if blob_count is not None: + __query["blob_count"] = blob_count + if concurrency is not None: + __query["concurrency"] = concurrency + if detailed is not None: + __query["detailed"] = detailed + if early_read_node_count is not None: + __query["early_read_node_count"] = early_read_node_count + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if max_blob_size is not None: + __query["max_blob_size"] = max_blob_size + if max_total_data_size is not None: + __query["max_total_data_size"] = max_total_data_size + if pretty is not None: + __query["pretty"] = pretty + if rare_action_probability is not None: + __query["rare_action_probability"] = rare_action_probability + if rarely_abort_writes is not None: + __query["rarely_abort_writes"] = rarely_abort_writes + if read_node_count is not None: + __query["read_node_count"] = read_node_count + if register_operation_count is not None: + __query["register_operation_count"] = register_operation_count + if seed is not None: + __query["seed"] = seed + if timeout is not None: + __query["timeout"] = timeout + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + endpoint_id="snapshot.repository_analyze", + path_parts=__path_parts, + ) + @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def repository_verify_integrity( @@ -684,7 +899,7 @@ async def repository_verify_integrity( in future versions. NOTE: This API may not work correctly in a mixed-version cluster. - ``_ + ``_ :param name: A repository name :param blob_thread_pool_concurrency: Number of threads to use for reading blob @@ -794,7 +1009,7 @@ async def restore( or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot. - ``_ + ``_ :param repository: A repository name :param snapshot: A snapshot name @@ -898,7 +1113,7 @@ async def status( These requests can also tax machine resources and, when using cloud storage, incur high processing costs. - ``_ + ``_ :param repository: A repository name :param snapshot: A comma-separated list of snapshot names @@ -958,7 +1173,7 @@ async def verify_repository( Verify a snapshot repository. Check for common misconfigurations in a snapshot repository. - ``_ + ``_ :param name: A repository name :param master_timeout: Explicit operation timeout for connection to master node diff --git a/elasticsearch/_async/client/sql.py b/elasticsearch/_async/client/sql.py index 06e8f98a3..ca927d765 100644 --- a/elasticsearch/_async/client/sql.py +++ b/elasticsearch/_async/client/sql.py @@ -85,11 +85,14 @@ async def delete_async( ) -> ObjectApiResponse[t.Any]: """ Delete an async SQL search. Delete an async SQL search or a stored synchronous - SQL search. If the search is still running, the API cancels it. + SQL search. If the search is still running, the API cancels it. If the Elasticsearch + security features are enabled, only the following users can use this API to delete + a search: * Users with the `cancel_task` cluster privilege. * The user who first + submitted the search. ``_ - :param id: Identifier for the search. + :param id: The identifier for the search. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -132,20 +135,23 @@ async def get_async( ) -> ObjectApiResponse[t.Any]: """ Get async SQL search results. Get the current status and available results for - an async SQL search or stored synchronous SQL search. + an async SQL search or stored synchronous SQL search. If the Elasticsearch security + features are enabled, only the user who first submitted the SQL search can retrieve + the search using this API. ``_ - :param id: Identifier for the search. - :param delimiter: Separator for CSV results. The API only supports this parameter - for CSV responses. - :param format: Format for the response. You must specify a format using this - parameter or the Accept HTTP header. If you specify both, the API uses this - parameter. - :param keep_alive: Retention period for the search and its results. Defaults + :param id: The identifier for the search. + :param delimiter: The separator for CSV results. The API supports this parameter + only for CSV responses. + :param format: The format for the response. You must specify a format using this + parameter or the `Accept` HTTP header. If you specify both, the API uses + this parameter. + :param keep_alive: The retention period for the search and its results. It defaults to the `keep_alive` period for the original SQL search. - :param wait_for_completion_timeout: Period to wait for complete results. Defaults - to no timeout, meaning the request waits for complete search results. + :param wait_for_completion_timeout: The period to wait for complete results. + It defaults to no timeout, meaning the request waits for complete search + results. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -194,7 +200,7 @@ async def get_async_status( ``_ - :param id: Identifier for the search. + :param id: The identifier for the search. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -221,6 +227,7 @@ async def get_async_status( @_rewrite_parameters( body_fields=( + "allow_partial_search_results", "catalog", "columnar", "cursor", @@ -243,6 +250,7 @@ async def get_async_status( async def query( self, *, + allow_partial_search_results: t.Optional[bool] = None, catalog: t.Optional[str] = None, columnar: t.Optional[bool] = None, cursor: t.Optional[str] = None, @@ -277,36 +285,45 @@ async def query( ``_ - :param catalog: Default catalog (cluster) for queries. If unspecified, the queries - execute on the data in the local cluster only. - :param columnar: If true, the results in a columnar fashion: one row represents - all the values of a certain column from the current page of results. - :param cursor: Cursor used to retrieve a set of paginated results. If you specify - a cursor, the API only uses the `columnar` and `time_zone` request body parameters. - It ignores other request body parameters. - :param fetch_size: The maximum number of rows (or entries) to return in one response - :param field_multi_value_leniency: Throw an exception when encountering multiple - values for a field (default) or be lenient and return the first value from - the list (without any guarantees of what that will be - typically the first - in natural ascending order). - :param filter: Elasticsearch query DSL for additional filtering. - :param format: Format for the response. - :param index_using_frozen: If true, the search can run on frozen indices. Defaults - to false. - :param keep_alive: Retention period for an async or saved synchronous search. - :param keep_on_completion: If true, Elasticsearch stores synchronous searches - if you also specify the wait_for_completion_timeout parameter. If false, - Elasticsearch only stores async searches that don’t finish before the wait_for_completion_timeout. - :param page_timeout: The timeout before a pagination request fails. - :param params: Values for parameters in the query. - :param query: SQL query to run. + :param allow_partial_search_results: If `true`, the response has partial results + when there are shard request timeouts or shard failures. If `false`, the + API returns an error with no partial results. + :param catalog: The default catalog (cluster) for queries. If unspecified, the + queries execute on the data in the local cluster only. + :param columnar: If `true`, the results are in a columnar fashion: one row represents + all the values of a certain column from the current page of results. The + API supports this parameter only for CBOR, JSON, SMILE, and YAML responses. + :param cursor: The cursor used to retrieve a set of paginated results. If you + specify a cursor, the API only uses the `columnar` and `time_zone` request + body parameters. It ignores other request body parameters. + :param fetch_size: The maximum number of rows (or entries) to return in one response. + :param field_multi_value_leniency: If `false`, the API returns an exception when + encountering multiple values for a field. If `true`, the API is lenient and + returns the first value from the array with no guarantee of consistent results. + :param filter: The Elasticsearch query DSL for additional filtering. + :param format: The format for the response. You can also specify a format using + the `Accept` HTTP header. If you specify both this parameter and the `Accept` + HTTP header, this parameter takes precedence. + :param index_using_frozen: If `true`, the search can run on frozen indices. + :param keep_alive: The retention period for an async or saved synchronous search. + :param keep_on_completion: If `true`, Elasticsearch stores synchronous searches + if you also specify the `wait_for_completion_timeout` parameter. If `false`, + Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`. + :param page_timeout: The minimum retention period for the scroll cursor. After + this time period, a pagination request might fail because the scroll cursor + is no longer available. Subsequent scroll requests prolong the lifetime of + the scroll cursor by the duration of `page_timeout` in the scroll request. + :param params: The values for parameters in the query. + :param query: The SQL query to run. :param request_timeout: The timeout before the request fails. - :param runtime_mappings: Defines one or more runtime fields in the search request. - These fields take precedence over mapped fields with the same name. - :param time_zone: ISO-8601 time zone ID for the search. - :param wait_for_completion_timeout: Period to wait for complete results. Defaults - to no timeout, meaning the request waits for complete search results. If - the search doesn’t finish within this period, the search becomes async. + :param runtime_mappings: One or more runtime fields for the search request. These + fields take precedence over mapped fields with the same name. + :param time_zone: The ISO-8601 time zone ID for the search. + :param wait_for_completion_timeout: The period to wait for complete results. + It defaults to no timeout, meaning the request waits for complete search + results. If the search doesn't finish within this period, the search becomes + async. To save a synchronous search, you must specify this parameter and + the `keep_on_completion` parameter. """ __path_parts: t.Dict[str, str] = {} __path = "/_sql" @@ -323,6 +340,8 @@ async def query( if pretty is not None: __query["pretty"] = pretty if not __body: + if allow_partial_search_results is not None: + __body["allow_partial_search_results"] = allow_partial_search_results if catalog is not None: __body["catalog"] = catalog if columnar is not None: @@ -384,14 +403,15 @@ async def translate( ) -> ObjectApiResponse[t.Any]: """ Translate SQL into Elasticsearch queries. Translate an SQL search into a search - API request containing Query DSL. + API request containing Query DSL. It accepts the same request body parameters + as the SQL search API, excluding `cursor`. ``_ - :param query: SQL query to run. + :param query: The SQL query to run. :param fetch_size: The maximum number of rows (or entries) to return in one response. - :param filter: Elasticsearch query DSL for additional filtering. - :param time_zone: ISO-8601 time zone ID for the search. + :param filter: The Elasticsearch query DSL for additional filtering. + :param time_zone: The ISO-8601 time zone ID for the search. """ if query is None and body is None: raise ValueError("Empty value passed for parameter 'query'") diff --git a/elasticsearch/_async/client/synonyms.py b/elasticsearch/_async/client/synonyms.py index c86b2c584..e4e79a9e9 100644 --- a/elasticsearch/_async/client/synonyms.py +++ b/elasticsearch/_async/client/synonyms.py @@ -36,11 +36,25 @@ async def delete_synonym( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a synonym set. + Delete a synonym set. You can only delete a synonyms set that is not in use by + any index analyzer. Synonyms sets can be used in synonym graph token filters + and synonym token filters. These synonym filters can be used as part of search + analyzers. Analyzers need to be loaded when an index is restored (such as when + a node starts, or the index becomes open). Even if the analyzer is not used on + any field mapping, it still needs to be loaded on the index recovery phase. If + any analyzers cannot be loaded, the index becomes unavailable and the cluster + status becomes red or yellow as index shards are not available. To prevent that, + synonyms sets that are used in analyzers can't be deleted. A delete request in + this case will return a 400 response code. To remove a synonyms set, you must + first remove all indices that contain analyzers using it. You can migrate an + index by creating a new index that does not contain the token filter with the + synonyms set, and use the reindex API in order to copy over the index data. Once + finished, you can delete the index. When the synonyms set is not used in analyzers, + you will be able to delete it. ``_ - :param id: The id of the synonyms set to be deleted + :param id: The synonyms set identifier to delete. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -81,8 +95,8 @@ async def delete_synonym_rule( ``_ - :param set_id: The id of the synonym set to be updated - :param rule_id: The id of the synonym rule to be deleted + :param set_id: The ID of the synonym set to update. + :param rule_id: The ID of the synonym rule to delete. """ if set_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'set_id'") @@ -131,9 +145,9 @@ async def get_synonym( ``_ - :param id: "The id of the synonyms set to be retrieved - :param from_: Starting offset for query rules to be retrieved - :param size: specifies a max number of query rules to retrieve + :param id: The synonyms set identifier to retrieve. + :param from_: The starting offset for query rules to retrieve. + :param size: The max number of query rules to retrieve. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -178,8 +192,8 @@ async def get_synonym_rule( ``_ - :param set_id: The id of the synonym set to retrieve the synonym rule from - :param rule_id: The id of the synonym rule to retrieve + :param set_id: The ID of the synonym set to retrieve the synonym rule from. + :param rule_id: The ID of the synonym rule to retrieve. """ if set_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'set_id'") @@ -225,10 +239,10 @@ async def get_synonyms_sets( """ Get all synonym sets. Get a summary of all defined synonym sets. - ``_ + ``_ - :param from_: Starting offset - :param size: specifies a max number of results to get + :param from_: The starting offset for synonyms sets to retrieve. + :param size: The maximum number of synonyms sets to retrieve. """ __path_parts: t.Dict[str, str] = {} __path = "/_synonyms" @@ -274,12 +288,15 @@ async def put_synonym( """ Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create - multiple synonym sets. + multiple synonym sets. When an existing synonyms set is updated, the search analyzers + that use the synonyms set are reloaded automatically for all indices. This is + equivalent to invoking the reload search analyzers API for all indices that use + the synonyms set. ``_ - :param id: The id of the synonyms set to be created or updated - :param synonyms_set: The synonym set information to update + :param id: The ID of the synonyms set to be created or updated. + :param synonyms_set: The synonym rules definitions for the synonyms set. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -328,13 +345,16 @@ async def put_synonym_rule( ) -> ObjectApiResponse[t.Any]: """ Create or update a synonym rule. Create or update a synonym rule in a synonym - set. + set. If any of the synonym rules included is invalid, the API returns an error. + When you update a synonym rule, all analyzers using the synonyms set will be + reloaded automatically to reflect the new rule. ``_ - :param set_id: The id of the synonym set to be updated with the synonym rule - :param rule_id: The id of the synonym rule to be updated or created - :param synonyms: + :param set_id: The ID of the synonym set. + :param rule_id: The ID of the synonym rule to be updated or created. + :param synonyms: The synonym rule information definition, which must be in Solr + format. """ if set_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'set_id'") diff --git a/elasticsearch/_async/client/tasks.py b/elasticsearch/_async/client/tasks.py index 1c32896b4..474ffa23e 100644 --- a/elasticsearch/_async/client/tasks.py +++ b/elasticsearch/_async/client/tasks.py @@ -47,27 +47,30 @@ async def cancel( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Cancel a task. A task may continue to run for some time after it has been cancelled - because it may not be able to safely stop its current activity straight away. - It is also possible that Elasticsearch must complete its work on other tasks - before it can process the cancellation. The get task information API will continue - to list these cancelled tasks until they complete. The cancelled flag in the - response indicates that the cancellation command has been processed and the task - will stop as soon as possible. To troubleshoot why a cancelled task does not - complete promptly, use the get task information API with the `?detailed` parameter - to identify the other tasks the system is running. You can also use the node - hot threads API to obtain detailed information about the work the system is doing + Cancel a task. WARNING: The task management API is new and should still be considered + a beta feature. The API may change in ways that are not backwards compatible. + A task may continue to run for some time after it has been cancelled because + it may not be able to safely stop its current activity straight away. It is also + possible that Elasticsearch must complete its work on other tasks before it can + process the cancellation. The get task information API will continue to list + these cancelled tasks until they complete. The cancelled flag in the response + indicates that the cancellation command has been processed and the task will + stop as soon as possible. To troubleshoot why a cancelled task does not complete + promptly, use the get task information API with the `?detailed` parameter to + identify the other tasks the system is running. You can also use the node hot + threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task. ``_ - :param task_id: ID of the task. - :param actions: Comma-separated list or wildcard expression of actions used to - limit the request. - :param nodes: Comma-separated list of node IDs or names used to limit the request. - :param parent_task_id: Parent task ID used to limit the tasks. - :param wait_for_completion: Should the request block until the cancellation of - the task and its descendant tasks is completed. Defaults to false + :param task_id: The task identifier. + :param actions: A comma-separated list or wildcard expression of actions that + is used to limit the request. + :param nodes: A comma-separated list of node IDs or names that is used to limit + the request. + :param parent_task_id: A parent task ID that is used to limit the tasks. + :param wait_for_completion: If true, the request blocks until all found tasks + are complete. """ __path_parts: t.Dict[str, str] if task_id not in SKIP_IN_PATH: @@ -118,12 +121,16 @@ async def get( ) -> ObjectApiResponse[t.Any]: """ Get task information. Get information about a task currently running in the cluster. + WARNING: The task management API is new and should still be considered a beta + feature. The API may change in ways that are not backwards compatible. If the + task identifier is not found, a 404 response code indicates that there are no + resources that match the request. ``_ - :param task_id: ID of the task. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. + :param task_id: The task identifier. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. :param wait_for_completion: If `true`, the request blocks until the task has completed. """ @@ -167,7 +174,6 @@ async def list( t.Union[str, t.Literal["nodes", "none", "parents"]] ] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, nodes: t.Optional[t.Union[str, t.Sequence[str]]] = None, parent_task_id: t.Optional[str] = None, pretty: t.Optional[bool] = None, @@ -176,25 +182,45 @@ async def list( ) -> ObjectApiResponse[t.Any]: """ Get all tasks. Get information about the tasks currently running on one or more - nodes in the cluster. + nodes in the cluster. WARNING: The task management API is new and should still + be considered a beta feature. The API may change in ways that are not backwards + compatible. **Identifying running tasks** The `X-Opaque-Id header`, when provided + on the HTTP request header, is going to be returned as a header in the response + as well as in the headers field for in the task information. This enables you + to track certain calls or associate certain tasks with the client that started + them. For example: ``` curl -i -H "X-Opaque-Id: 123456" "http://localhost:9200/_tasks?group_by=parents" + ``` The API returns the following result: ``` HTTP/1.1 200 OK X-Opaque-Id: 123456 + content-type: application/json; charset=UTF-8 content-length: 831 { "tasks" : + { "u5lcZHqcQhu-rUoFaqDphA:45" : { "node" : "u5lcZHqcQhu-rUoFaqDphA", "id" : 45, + "type" : "transport", "action" : "cluster:monitor/tasks/lists", "start_time_in_millis" + : 1513823752749, "running_time_in_nanos" : 293139, "cancellable" : false, "headers" + : { "X-Opaque-Id" : "123456" }, "children" : [ { "node" : "u5lcZHqcQhu-rUoFaqDphA", + "id" : 46, "type" : "direct", "action" : "cluster:monitor/tasks/lists[n]", "start_time_in_millis" + : 1513823752750, "running_time_in_nanos" : 92133, "cancellable" : false, "parent_task_id" + : "u5lcZHqcQhu-rUoFaqDphA:45", "headers" : { "X-Opaque-Id" : "123456" } } ] } + } } ``` In this example, `X-Opaque-Id: 123456` is the ID as a part of the response + header. The `X-Opaque-Id` in the task `headers` is the ID for the task that was + initiated by the REST request. The `X-Opaque-Id` in the children `headers` is + the child task of the task that was initiated by the REST request. ``_ - :param actions: Comma-separated list or wildcard expression of actions used to - limit the request. + :param actions: A comma-separated list or wildcard expression of actions used + to limit the request. For example, you can use `cluser:*` to retrieve all + cluster-related tasks. :param detailed: If `true`, the response includes detailed information about - shard recoveries. This information is useful to distinguish tasks from each + the running tasks. This information is useful to distinguish tasks from each other but is more costly to run. - :param group_by: Key used to group tasks in the response. - :param master_timeout: Period to wait for a connection to the master node. If - no response is received before the timeout expires, the request fails and - returns an error. - :param nodes: Comma-separated list of node IDs or names used to limit returned - information. - :param parent_task_id: Parent task ID used to limit returned information. To - return all tasks, omit this parameter or use a value of `-1`. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. + :param group_by: A key that is used to group tasks in the response. The task + lists can be grouped either by nodes or by parent tasks. + :param nodes: A comma-separated list of node IDs or names that is used to limit + the returned information. + :param parent_task_id: A parent task identifier that is used to limit returned + information. To return all tasks, omit this parameter or use a value of `-1`. + If the parent task is not found, the API does not return a 404 response code. + :param timeout: The period to wait for each node to respond. If a node does not + respond before its timeout expires, the response does not include its information. + However, timed out nodes are included in the `node_failures` property. :param wait_for_completion: If `true`, the request blocks until the operation is complete. """ @@ -213,8 +239,6 @@ async def list( __query["group_by"] = group_by if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if nodes is not None: __query["nodes"] = nodes if parent_task_id is not None: diff --git a/elasticsearch/_async/client/text_structure.py b/elasticsearch/_async/client/text_structure.py index 3e537da41..4c4779cba 100644 --- a/elasticsearch/_async/client/text_structure.py +++ b/elasticsearch/_async/client/text_structure.py @@ -54,7 +54,21 @@ async def find_field_structure( ) -> ObjectApiResponse[t.Any]: """ Find the structure of a text field. Find the structure of a text field in an - Elasticsearch index. + Elasticsearch index. This API provides a starting point for extracting further + information from log messages already ingested into Elasticsearch. For example, + if you have ingested data into a very simple index that has just `@timestamp` + and message fields, you can use this API to see what common structure exists + in the message field. The response from the API contains: * Sample messages. + * Statistics that reveal the most common values for all fields detected within + the text and basic numeric statistics for numeric fields. * Information about + the structure of the text, which is useful when you write ingest configurations + to index it or similarly formatted text. * Appropriate mappings for an Elasticsearch + index, which you could use to ingest the text. All this information can be calculated + by the structure finder with no guidance. However, you can optionally override + some of the decisions about the text structure by specifying one or more query + parameters. If the structure finder produces unexpected results, specify the + `explain` query parameter and an explanation will appear in the response. It + helps determine why the returned structure was chosen. ``_ @@ -84,9 +98,9 @@ async def find_field_structure( `field1`, and `field2` are used in the `grok_pattern` output. The intention in that situation is that a user who knows the meanings will rename the fields before using them. - :param explain: If true, the response includes a field named `explanation`, which - is an array of strings that indicate how the structure finder produced its - result. + :param explain: If `true`, the response includes a field named `explanation`, + which is an array of strings that indicate how the structure finder produced + its result. :param format: The high level structure of the text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to @@ -107,7 +121,7 @@ async def find_field_structure( :param should_trim_fields: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value - is true. Otherwise, the default value is false. + is true. Otherwise, the default value is `false`. :param timeout: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. :param timestamp_field: The name of the field that contains the primary timestamp @@ -236,7 +250,10 @@ async def find_message_structure( Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about - the text structure by specifying one or more query parameters. + the text structure by specifying one or more query parameters. If the structure + finder produces unexpected results, specify the `explain` query parameter and + an explanation will appear in the response. It helps determine why the returned + structure was chosen. ``_ @@ -284,7 +301,7 @@ async def find_message_structure( :param should_trim_fields: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value - is true. Otherwise, the default value is false. + is true. Otherwise, the default value is `false`. :param timeout: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. :param timestamp_field: The name of the field that contains the primary timestamp @@ -413,43 +430,51 @@ async def find_structure( ``_ :param text_files: - :param charset: The text’s character set. It must be a character set that is - supported by the JVM that Elasticsearch uses. For example, UTF-8, UTF-16LE, - windows-1252, or EUC-JP. If this parameter is not specified, the structure + :param charset: The text's character set. It must be a character set that is + supported by the JVM that Elasticsearch uses. For example, `UTF-8`, `UTF-16LE`, + `windows-1252`, or `EUC-JP`. If this parameter is not specified, the structure finder chooses an appropriate character set. - :param column_names: If you have set format to delimited, you can specify the + :param column_names: If you have set format to `delimited`, you can specify the column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example. - :param delimiter: If you have set format to delimited, you can specify the character - used to delimit the values in each row. Only a single character is supported; - the delimiter cannot have multiple characters. By default, the API considers - the following possibilities: comma, tab, semi-colon, and pipe (|). In this - default scenario, all rows must have the same number of fields for the delimited - format to be detected. If you specify a delimiter, up to 10% of the rows - can have a different number of columns than the first row. - :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns - (disabled or v1, default: disabled). - :param explain: If this parameter is set to true, the response includes a field + :param delimiter: If you have set `format` to `delimited`, you can specify the + character used to delimit the values in each row. Only a single character + is supported; the delimiter cannot have multiple characters. By default, + the API considers the following possibilities: comma, tab, semi-colon, and + pipe (`|`). In this default scenario, all rows must have the same number + of fields for the delimited format to be detected. If you specify a delimiter, + up to 10% of the rows can have a different number of columns than the first + row. + :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns. + Use this parameter to specify whether to use ECS Grok patterns instead of + legacy ones when the structure finder creates a Grok pattern. Valid values + are `disabled` and `v1`. This setting primarily has an impact when a whole + message Grok pattern such as `%{CATALINALOG}` matches the input. If the structure + finder identifies a common structure but has no idea of meaning then generic + field names such as `path`, `ipaddress`, `field1`, and `field2` are used + in the `grok_pattern` output, with the intention that a user who knows the + meanings rename these fields before using it. + :param explain: If this parameter is set to `true`, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. - :param format: The high level structure of the text. Valid values are ndjson, - xml, delimited, and semi_structured_text. By default, the API chooses the - format. In this default scenario, all rows must have the same number of fields - for a delimited format to be detected. If the format is set to delimited - and the delimiter is not set, however, the API tolerates up to 5% of rows - that have a different number of columns than the first row. - :param grok_pattern: If you have set format to semi_structured_text, you can - specify a Grok pattern that is used to extract fields from every message + :param format: The high level structure of the text. Valid values are `ndjson`, + `xml`, `delimited`, and `semi_structured_text`. By default, the API chooses + the format. In this default scenario, all rows must have the same number + of fields for a delimited format to be detected. If the format is set to + `delimited` and the delimiter is not set, however, the API tolerates up to + 5% of rows that have a different number of columns than the first row. + :param grok_pattern: If you have set `format` to `semi_structured_text`, you + can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match - what is specified in the timestamp_field parameter. If that parameter is + what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match - "timestamp". If grok_pattern is not specified, the structure finder creates + "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. - :param has_header_row: If you have set format to delimited, you can use this + :param has_header_row: If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. @@ -459,26 +484,58 @@ async def find_structure( that this may lead to very long processing times if the way to group lines into messages is misdetected. :param lines_to_sample: The number of lines to include in the structural analysis, - starting from the beginning of the text. The minimum is 2; If the value of + starting from the beginning of the text. The minimum is 2. If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of - the lines. - :param quote: If you have set format to delimited, you can specify the character + the lines. NOTE: The number of lines and the variation of the lines affects + the speed of the analysis. For example, if you upload text where the first + 1000 lines are all variations on the same message, the analysis will find + more commonality than would be seen with a bigger sample. If possible, however, + it is more efficient to upload sample text with more variety in the first + 1000 lines than to request analysis of 100000 lines to achieve some variety. + :param quote: If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not - specified, the default value is a double quote ("). If your delimited text + specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. - :param should_trim_fields: If you have set format to delimited, you can specify + :param should_trim_fields: If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. - If this parameter is not specified and the delimiter is pipe (|), the default - value is true. Otherwise, the default value is false. - :param timeout: Sets the maximum amount of time that the structure analysis can - take. If the analysis is still running when the timeout expires then it will - be stopped. - :param timestamp_field: Optional parameter to specify the timestamp field in - the file + If this parameter is not specified and the delimiter is pipe (`|`), the default + value is `true`. Otherwise, the default value is `false`. + :param timeout: The maximum amount of time that the structure analysis can take. + If the analysis is still running when the timeout expires then it will be + stopped. + :param timestamp_field: The name of the field that contains the primary timestamp + of each record in the text. In particular, if the text were ingested into + an index, this is the field that would be used to populate the `@timestamp` + field. If the `format` is `semi_structured_text`, this field must match the + name of the appropriate extraction in the `grok_pattern`. Therefore, for + semi-structured text, it is best not to specify this parameter unless `grok_pattern` + is also specified. For structured text, if you specify this parameter, the + field must exist within the text. If this parameter is not specified, the + structure finder makes a decision about which field (if any) is the primary + timestamp field. For structured text, it is not compulsory to have a timestamp + in the text. :param timestamp_format: The Java time format of the timestamp field in the text. + Only a subset of Java time format letter groups are supported: * `a` * `d` + * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` + * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter + groups (fractional seconds) of length one to nine are supported providing + they occur after `ss` and separated from the `ss` by a `.`, `,` or `:`. Spacing + and punctuation is also permitted with the exception of `?`, newline and + carriage return, together with literal text enclosed in single quotes. For + example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. One + valuable use case for this parameter is when the format is semi-structured + text, there are multiple timestamp formats in the text, and you know which + format corresponds to the primary timestamp, but you do not want to specify + the full `grok_pattern`. Another is when the timestamp format is one that + the structure finder does not consider by default. If this parameter is not + specified, the structure finder chooses the best format from a built-in set. + If the special value `null` is specified the structure finder will not look + for a primary timestamp in the text. When the format is semi-structured text + this will result in the structure finder treating the text as single-line + messages. """ if text_files is None and body is None: raise ValueError( @@ -556,10 +613,12 @@ async def test_grok_pattern( ``_ - :param grok_pattern: Grok pattern to run on the text. - :param text: Lines of text to run the Grok pattern on. - :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns - (disabled or v1, default: disabled). + :param grok_pattern: The Grok pattern to run on the text. + :param text: The lines of text to run the Grok pattern on. + :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns. + Use this parameter to specify whether to use ECS Grok patterns instead of + legacy ones when the structure finder creates a Grok pattern. Valid values + are `disabled` and `v1`. """ if grok_pattern is None and body is None: raise ValueError("Empty value passed for parameter 'grok_pattern'") diff --git a/elasticsearch/_async/client/transform.py b/elasticsearch/_async/client/transform.py index 5482ad4c1..1d8f55a3b 100644 --- a/elasticsearch/_async/client/transform.py +++ b/elasticsearch/_async/client/transform.py @@ -489,6 +489,7 @@ async def reset_transform( force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Reset a transform. Resets a transform. Before you can reset it, you must stop @@ -503,6 +504,8 @@ async def reset_transform( :param force: If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform must be stopped before it can be reset. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if transform_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'transform_id'") @@ -519,6 +522,8 @@ async def reset_transform( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", diff --git a/elasticsearch/_async/client/watcher.py b/elasticsearch/_async/client/watcher.py index fa92b9f68..a4fcb27dd 100644 --- a/elasticsearch/_async/client/watcher.py +++ b/elasticsearch/_async/client/watcher.py @@ -42,11 +42,15 @@ async def ack_watch( in the `status.actions..ack.state` structure. IMPORTANT: If the specified watch is currently being executed, this API will return an error The reason for this behavior is to prevent overwriting the watch status from a watch execution. + Acknowledging an action throttles further executions of that action until its + `ack.state` is reset to `awaits_successful_execution`. This happens when the + condition of the watch is not met (the condition evaluates to false). ``_ - :param watch_id: Watch ID - :param action_id: A comma-separated list of the action ids to be acked + :param watch_id: The watch identifier. + :param action_id: A comma-separated list of the action identifiers to acknowledge. + If you omit this parameter, all of the actions of the watch are acknowledged. """ if watch_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'watch_id'") @@ -96,7 +100,7 @@ async def activate_watch( ``_ - :param watch_id: Watch ID + :param watch_id: The watch identifier. """ if watch_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'watch_id'") @@ -136,7 +140,7 @@ async def deactivate_watch( ``_ - :param watch_id: Watch ID + :param watch_id: The watch identifier. """ if watch_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'watch_id'") @@ -182,7 +186,7 @@ async def delete_watch( ``_ - :param id: Watch ID + :param id: The watch identifier. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -255,11 +259,17 @@ async def execute_watch( and control whether a watch record would be written to the watch history after it runs. You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. This serves as great tool for testing - and debugging your watches prior to adding them to Watcher. + and debugging your watches prior to adding them to Watcher. When Elasticsearch + security features are enabled on your cluster, watches are run with the privileges + of the user that stored the watches. If your user is allowed to read index `a`, + but not index `b`, then the exact same set of rules will apply during execution + of a watch. When using the run watch API, the authorization data of the user + that called the API will be used as a base, instead of the information who stored + the watch. ``_ - :param id: Identifier for the watch. + :param id: The watch identifier. :param action_modes: Determines how to handle the watch actions as part of the watch execution. :param alternative_input: When present, the watch uses this object as a payload @@ -270,12 +280,12 @@ async def execute_watch( :param record_execution: When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. In addition, the status of the watch is updated, possibly throttling - subsequent executions. This can also be specified as an HTTP parameter. + subsequent runs. This can also be specified as an HTTP parameter. :param simulated_actions: :param trigger_data: This structure is parsed as the data of the trigger event - that will be used during the watch execution + that will be used during the watch execution. :param watch: When present, this watch is used instead of the one specified in - the request. This watch is not persisted to the index and record_execution + the request. This watch is not persisted to the index and `record_execution` cannot be set. """ __path_parts: t.Dict[str, str] @@ -327,6 +337,50 @@ async def execute_watch( path_parts=__path_parts, ) + @_rewrite_parameters() + async def get_settings( + self, + *, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Get Watcher index settings. Get settings for the Watcher internal index (`.watches`). + Only a subset of settings are shown, for example `index.auto_expand_replicas` + and `index.number_of_replicas`. + + ``_ + + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_watcher/settings" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="watcher.get_settings", + path_parts=__path_parts, + ) + @_rewrite_parameters() async def get_watch( self, @@ -342,7 +396,7 @@ async def get_watch( ``_ - :param id: Watch ID + :param id: The watch identifier. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -374,6 +428,7 @@ async def get_watch( "input", "metadata", "throttle_period", + "throttle_period_in_millis", "transform", "trigger", ), @@ -393,7 +448,8 @@ async def put_watch( input: t.Optional[t.Mapping[str, t.Any]] = None, metadata: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, - throttle_period: t.Optional[str] = None, + throttle_period: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + throttle_period_in_millis: t.Optional[t.Any] = None, transform: t.Optional[t.Mapping[str, t.Any]] = None, trigger: t.Optional[t.Mapping[str, t.Any]] = None, version: t.Optional[int] = None, @@ -414,19 +470,28 @@ async def put_watch( ``_ - :param id: Watch ID - :param actions: - :param active: Specify whether the watch is in/active by default - :param condition: + :param id: The identifier for the watch. + :param actions: The list of actions that will be run if the condition matches. + :param active: The initial state of the watch. The default value is `true`, which + means the watch is active by default. + :param condition: The condition that defines if the actions should be run. :param if_primary_term: only update the watch if the last operation that has changed the watch has the specified primary term :param if_seq_no: only update the watch if the last operation that has changed the watch has the specified sequence number - :param input: - :param metadata: - :param throttle_period: - :param transform: - :param trigger: + :param input: The input that defines the input that loads the data for the watch. + :param metadata: Metadata JSON that will be copied into the history entries. + :param throttle_period: The minimum time between actions being run. The default + is 5 seconds. This default can be changed in the config file with the setting + `xpack.watcher.throttle.period.default_period`. If both this value and the + `throttle_period_in_millis` parameter are specified, Watcher uses the last + parameter included in the request. + :param throttle_period_in_millis: Minimum time in milliseconds between actions + being run. Defaults to 5000. If both this value and the throttle_period parameter + are specified, Watcher uses the last parameter included in the request. + :param transform: The transform that processes the watch payload to prepare it + for the watch actions. + :param trigger: The trigger that defines when the watch should run. :param version: Explicit version number for concurrency control """ if id in SKIP_IN_PATH: @@ -462,6 +527,8 @@ async def put_watch( __body["metadata"] = metadata if throttle_period is not None: __body["throttle_period"] = throttle_period + if throttle_period_in_millis is not None: + __body["throttle_period_in_millis"] = throttle_period_in_millis if transform is not None: __body["transform"] = transform if trigger is not None: @@ -508,16 +575,17 @@ async def query_watches( ) -> ObjectApiResponse[t.Any]: """ Query watches. Get all registered watches in a paginated manner and optionally - filter watches by a query. + filter watches by a query. Note that only the `_id` and `metadata.*` fields are + queryable or sortable. ``_ - :param from_: The offset from the first result to fetch. Needs to be non-negative. - :param query: Optional, query filter watches to be returned. - :param search_after: Optional search After to do pagination using last hit’s - sort values. - :param size: The number of hits to return. Needs to be non-negative. - :param sort: Optional sort definition. + :param from_: The offset from the first result to fetch. It must be non-negative. + :param query: A query that filters the watches to be returned. + :param search_after: Retrieve the next page of hits using a set of sort values + from the previous page. + :param size: The number of hits to return. It must be non-negative. + :param sort: One or more fields used to sort the search results. """ __path_parts: t.Dict[str, str] = {} __path = "/_watcher/_query/watches" @@ -575,12 +643,15 @@ async def start( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ Start the watch service. Start the Watcher service if it is not already running. ``_ + + :param master_timeout: Period to wait for a connection to the master node. """ __path_parts: t.Dict[str, str] = {} __path = "/_watcher/_start" @@ -591,6 +662,8 @@ async def start( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -635,7 +708,8 @@ async def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get Watcher statistics. + Get Watcher statistics. This API always returns basic metrics. You retrieve more + metrics by using the metric parameter. ``_ @@ -678,12 +752,17 @@ async def stop( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ Stop the watch service. Stop the Watcher service if it is running. ``_ + + :param master_timeout: The period to wait for the master node. If the master + node is not available before the timeout expires, the request fails and returns + an error. To indicate that the request should never timeout, set it to `-1`. """ __path_parts: t.Dict[str, str] = {} __path = "/_watcher/_stop" @@ -694,6 +773,8 @@ async def stop( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -705,3 +786,70 @@ async def stop( endpoint_id="watcher.stop", path_parts=__path_parts, ) + + @_rewrite_parameters( + body_fields=("index_auto_expand_replicas", "index_number_of_replicas"), + parameter_aliases={ + "index.auto_expand_replicas": "index_auto_expand_replicas", + "index.number_of_replicas": "index_number_of_replicas", + }, + ) + async def update_settings( + self, + *, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + index_auto_expand_replicas: t.Optional[str] = None, + index_number_of_replicas: t.Optional[int] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Update Watcher index settings. Update settings for the Watcher internal index + (`.watches`). Only a subset of settings can be modified. This includes `index.auto_expand_replicas` + and `index.number_of_replicas`. + + ``_ + + :param index_auto_expand_replicas: + :param index_number_of_replicas: + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_watcher/settings" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + if not __body: + if index_auto_expand_replicas is not None: + __body["index.auto_expand_replicas"] = index_auto_expand_replicas + if index_number_of_replicas is not None: + __body["index.number_of_replicas"] = index_number_of_replicas + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="watcher.update_settings", + path_parts=__path_parts, + ) diff --git a/elasticsearch/_async/client/xpack.py b/elasticsearch/_async/client/xpack.py index b7de2e83f..f02ad837d 100644 --- a/elasticsearch/_async/client/xpack.py +++ b/elasticsearch/_async/client/xpack.py @@ -96,9 +96,10 @@ async def usage( ``_ - :param master_timeout: Period to wait for a connection to the master node. If - no response is received before the timeout expires, the request fails and - returns an error. + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. To indicate that the request should never timeout, + set it to `-1`. """ __path_parts: t.Dict[str, str] = {} __path = "/_xpack/usage" diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index 047404552..3292a7454 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -644,41 +644,125 @@ def bulk( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Bulk index or delete documents. Performs multiple indexing or delete operations - in a single API call. This reduces overhead and can greatly increase indexing - speed. + Bulk index or delete documents. Perform multiple `index`, `create`, `delete`, + and `update` actions in a single request. This reduces overhead and can greatly + increase indexing speed. If the Elasticsearch security features are enabled, + you must have the following index privileges for the target data stream, index, + or index alias: * To use the `create` action, you must have the `create_doc`, + `create`, `index`, or `write` index privilege. Data streams support only the + `create` action. * To use the `index` action, you must have the `create`, `index`, + or `write` index privilege. * To use the `delete` action, you must have the `delete` + or `write` index privilege. * To use the `update` action, you must have the `index` + or `write` index privilege. * To automatically create a data stream or index + with a bulk API request, you must have the `auto_configure`, `create_index`, + or `manage` index privilege. * To make the result of a bulk operation visible + to search using the `refresh` parameter, you must have the `maintenance` or `manage` + index privilege. Automatic data stream creation requires a matching index template + with data stream enabled. The actions are specified in the request body using + a newline delimited JSON (NDJSON) structure: ``` action_and_meta_data\\n optional_source\\n + action_and_meta_data\\n optional_source\\n .... action_and_meta_data\\n optional_source\\n + ``` The `index` and `create` actions expect a source on the next line and have + the same semantics as the `op_type` parameter in the standard index API. A `create` + action fails if a document with the same ID already exists in the target An `index` + action adds or replaces a document as necessary. NOTE: Data streams support only + the `create` action. To update or delete a document in a data stream, you must + target the backing index containing the document. An `update` action expects + that the partial doc, upsert, and script and its options are specified on the + next line. A `delete` action does not expect a source on the next line and has + the same semantics as the standard delete API. NOTE: The final line of data must + end with a newline character (`\\n`). Each newline character may be preceded + by a carriage return (`\\r`). When sending NDJSON data to the `_bulk` endpoint, + use a `Content-Type` header of `application/json` or `application/x-ndjson`. + Because this format uses literal newline characters (`\\n`) as delimiters, make + sure that the JSON actions and sources are not pretty printed. If you provide + a target in the request path, it is used for any actions that don't explicitly + specify an `_index` argument. A note on the format: the idea here is to make + processing as fast as possible. As some of the actions are redirected to other + shards on other nodes, only `action_meta_data` is parsed on the receiving node + side. Client libraries using this protocol should try and strive to do something + similar on the client side, and reduce buffering as much as possible. There is + no "correct" number of actions to perform in a single bulk request. Experiment + with different settings to find the optimal size for your particular workload. + Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by + default so clients must ensure that no request exceeds this size. It is not possible + to index a single document that exceeds the size limit, so you must pre-process + any such documents into smaller pieces before sending them to Elasticsearch. + For instance, split documents into pages or chapters before indexing them, or + store raw binary data in a system outside Elasticsearch and replace the raw data + with a link to the external system in the documents that you send to Elasticsearch. + **Client suppport for bulk requests** Some of the officially supported clients + provide helpers to assist with bulk requests and reindexing: * Go: Check out + `esutil.BulkIndexer` * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` + and `Search::Elasticsearch::Client::5_0::Scroll` * Python: Check out `elasticsearch.helpers.*` + * JavaScript: Check out `client.helpers.*` * .NET: Check out `BulkAllObservable` + * PHP: Check out bulk indexing. **Submitting bulk requests with cURL** If you're + providing text file input to `curl`, you must use the `--data-binary` flag instead + of plain `-d`. The latter doesn't preserve newlines. For example: ``` $ cat requests + { "index" : { "_index" : "test", "_id" : "1" } } { "field1" : "value1" } $ curl + -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary + "@requests"; echo {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} + ``` **Optimistic concurrency control** Each `index` and `delete` action within + a bulk API call may include the `if_seq_no` and `if_primary_term` parameters + in their respective action and meta data lines. The `if_seq_no` and `if_primary_term` + parameters control how operations are run, based on the last modification to + existing documents. See Optimistic concurrency control for more details. **Versioning** + Each bulk item can include the version value using the `version` field. It automatically + follows the behavior of the index or delete operation based on the `_version` + mapping. It also support the `version_type`. **Routing** Each bulk item can include + the routing value using the `routing` field. It automatically follows the behavior + of the index or delete operation based on the `_routing` mapping. NOTE: Data + streams do not support custom routing unless they were created with the `allow_custom_routing` + setting enabled in the template. **Wait for active shards** When making bulk + calls, you can set the `wait_for_active_shards` parameter to require a minimum + number of shard copies to be active before starting to process the bulk request. + **Refresh** Control when the changes made by this request are visible to search. + NOTE: Only the shards that receive the bulk request will be affected by refresh. + Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen + to be routed to different shards in an index with five shards. The request will + only wait for those three shards to refresh. The other two shards that make up + the index do not participate in the `_bulk` request at all. ``_ :param operations: - :param index: Name of the data stream, index, or index alias to perform bulk + :param index: The name of the data stream, index, or index alias to perform bulk actions on. :param list_executed_pipelines: If `true`, the response will include the ingest - pipelines that were executed for each index or create. - :param pipeline: ID of the pipeline to use to preprocess incoming documents. - If the index has a default ingest pipeline specified, then setting the value - to `_none` disables the default ingest pipeline for this request. If a final - pipeline is configured it will always run, regardless of the value of this + pipelines that were run for each index or create. + :param pipeline: The pipeline identifier to use to preprocess incoming documents. + If the index has a default ingest pipeline specified, setting the value to + `_none` turns off the default ingest pipeline for this request. If a final + pipeline is configured, it will always run regardless of the value of this parameter. :param refresh: If `true`, Elasticsearch refreshes the affected shards to make - this operation visible to search, if `wait_for` then wait for a refresh to - make this operation visible to search, if `false` do nothing with refreshes. + this operation visible to search. If `wait_for`, wait for a refresh to make + this operation visible to search. If `false`, do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. - :param require_alias: If `true`, the request’s actions must target an index alias. + :param require_alias: If `true`, the request's actions must target an index alias. :param require_data_stream: If `true`, the request's actions must target a data - stream (existing or to-be-created). - :param routing: Custom value used to route operations to a specific shard. - :param source: `true` or `false` to return the `_source` field or not, or a list - of fields to return. + stream (existing or to be created). + :param routing: A custom value that is used to route operations to a specific + shard. + :param source: Indicates whether to return the `_source` field (`true` or `false`) + or contains a list of fields to return. :param source_excludes: A comma-separated list of source fields to exclude from - the response. + the response. You can also use this parameter to exclude fields from the + subset specified in `_source_includes` query parameter. If the `_source` + parameter is `false`, this parameter is ignored. :param source_includes: A comma-separated list of source fields to include in - the response. - :param timeout: Period each action waits for the following operations: automatic - index creation, dynamic mapping updates, waiting for active shards. + the response. If this parameter is specified, only these source fields are + returned. You can exclude fields from this subset using the `_source_excludes` + query parameter. If the `_source` parameter is `false`, this parameter is + ignored. + :param timeout: The period each action waits for the following operations: automatic + index creation, dynamic mapping updates, and waiting for active shards. The + default is `1m` (one minute), which guarantees Elasticsearch waits for at + least the timeout before failing. The actual wait time could be longer, particularly + when multiple waits occur. :param wait_for_active_shards: The number of shard copies that must be active - before proceeding with the operation. Set to all or any positive integer - up to the total number of shards in the index (`number_of_replicas+1`). + before proceeding with the operation. Set to `all` or any positive integer + up to the total number of shards in the index (`number_of_replicas+1`). The + default is `1`, which waits for each primary shard to be active. """ if operations is None and body is None: raise ValueError( @@ -758,7 +842,7 @@ def clear_scroll( ``_ - :param scroll_id: Scroll IDs to clear. To clear all scroll IDs, use `_all`. + :param scroll_id: The scroll IDs to clear. To clear all scroll IDs, use `_all`. """ __path_parts: t.Dict[str, str] = {} __path = "/_search/scroll" @@ -882,46 +966,62 @@ def count( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Count search results. Get the number of documents matching a query. + Count search results. Get the number of documents matching a query. The query + can either be provided using a simple query string as a parameter or using the + Query DSL defined within the request body. The latter must be nested in a `query` + key, which is the same as the search API. The count API supports multi-target + syntax. You can run a single count API search across multiple data streams and + indices. The operation is broadcast across all shards. For each shard ID group, + a replica is chosen and the search is run against it. This means that replicas + increase the scalability of the count. ``_ - :param index: Comma-separated list of data streams, indices, and aliases to search. - Supports wildcards (`*`). To search all data streams and indices, omit this - parameter or use `*` or `_all`. + :param index: A comma-separated list of data streams, indices, and aliases to + search. It supports wildcards (`*`). To search all data streams and indices, + omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. - This behavior applies even if the request targets other open indices. + This behavior applies even if the request targets other open indices. For + example, a request targeting `foo*,bar*` returns an error if an index starts + with `foo` but no index starts with `bar`. :param analyze_wildcard: If `true`, wildcard and prefix queries are analyzed. - This parameter can only be used when the `q` query string parameter is specified. - :param analyzer: Analyzer to use for the query string. This parameter can only - be used when the `q` query string parameter is specified. + This parameter can be used only when the `q` query string parameter is specified. + :param analyzer: The analyzer to use for the query string. This parameter can + be used only when the `q` query string parameter is specified. :param default_operator: The default operator for query string query: `AND` or - `OR`. This parameter can only be used when the `q` query string parameter + `OR`. This parameter can be used only when the `q` query string parameter is specified. - :param df: Field to use as default where no field prefix is given in the query - string. This parameter can only be used when the `q` query string parameter + :param df: The field to use as a default when no field prefix is given in the + query string. This parameter can be used only when the `q` query string parameter is specified. - :param expand_wildcards: Type of index that wildcard patterns can match. If the - request can target data streams, this argument determines whether wildcard - expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. - :param ignore_throttled: If `true`, concrete, expanded or aliased indices are + :param expand_wildcards: The type of index that wildcard patterns can match. + If the request can target data streams, this argument determines whether + wildcard expressions match hidden data streams. It supports comma-separated + values, such as `open,hidden`. + :param ignore_throttled: If `true`, concrete, expanded, or aliased indices are ignored when frozen. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param lenient: If `true`, format-based query failures (such as providing text - to a numeric field) in the query string will be ignored. - :param min_score: Sets the minimum `_score` value that documents must have to - be included in the result. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. - :param q: Query in the Lucene query string syntax. - :param query: Defines the search definition using the Query DSL. - :param routing: Custom value used to route operations to a specific shard. - :param terminate_after: Maximum number of documents to collect for each shard. + to a numeric field) in the query string will be ignored. This parameter can + be used only when the `q` query string parameter is specified. + :param min_score: The minimum `_score` value that documents must have to be included + in the result. + :param preference: The node or shard the operation should be performed on. By + default, it is random. + :param q: The query in Lucene query string syntax. + :param query: Defines the search definition using the Query DSL. The query is + optional, and when not provided, it will use `match_all` to count all the + docs. + :param routing: A custom value used to route operations to a specific shard. + :param terminate_after: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. - Elasticsearch collects documents before sorting. + Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. + Elasticsearch applies this parameter to each shard handling the request. + When possible, let Elasticsearch perform early termination automatically. + Avoid specifying this parameter for requests that target data streams with + backing indices across multiple data tiers. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: @@ -2489,9 +2589,9 @@ def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get cluster info. Returns basic information about the cluster. + Get cluster info. Get basic build, version, and cluster information. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/" @@ -4467,6 +4567,7 @@ def search_shards( human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, local: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, routing: t.Optional[str] = None, @@ -4494,6 +4595,7 @@ def search_shards( a missing or closed index. :param local: If `true`, the request retrieves information from the local node only. + :param master_timeout: Period to wait for a connection to the master node. :param preference: Specifies the node or shard the operation should be performed on. Random by default. :param routing: Custom value used to route operations to a specific shard. @@ -4520,6 +4622,8 @@ def search_shards( __query["ignore_unavailable"] = ignore_unavailable if local is not None: __query["local"] = local + if master_timeout is not None: + __query["master_timeout"] = master_timeout if preference is not None: __query["preference"] = preference if pretty is not None: diff --git a/elasticsearch/_sync/client/ccr.py b/elasticsearch/_sync/client/ccr.py index f51ad4d0b..bd6eb0b13 100644 --- a/elasticsearch/_sync/client/ccr.py +++ b/elasticsearch/_sync/client/ccr.py @@ -33,6 +33,7 @@ def delete_auto_follow_pattern( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -42,6 +43,7 @@ def delete_auto_follow_pattern( ``_ :param name: The name of the auto follow pattern. + :param master_timeout: Period to wait for a connection to the master node. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -54,6 +56,8 @@ def delete_auto_follow_pattern( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -94,6 +98,7 @@ def follow( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, max_outstanding_read_requests: t.Optional[int] = None, max_outstanding_write_requests: t.Optional[int] = None, max_read_request_operation_count: t.Optional[int] = None, @@ -124,6 +129,7 @@ def follow( :param remote_cluster: The remote cluster containing the leader index. :param data_stream_name: If the leader index is part of a data stream, the name to which the local data stream for the followed index should be renamed. + :param master_timeout: Period to wait for a connection to the master node. :param max_outstanding_read_requests: The maximum number of outstanding reads requests from the remote cluster. :param max_outstanding_write_requests: The maximum number of outstanding write @@ -174,6 +180,8 @@ def follow( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if wait_for_active_shards is not None: @@ -232,6 +240,7 @@ def follow_info( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -244,6 +253,7 @@ def follow_info( :param index: A comma-separated list of index patterns; use `_all` to perform the operation on all indices + :param master_timeout: Period to wait for a connection to the master node. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -256,6 +266,8 @@ def follow_info( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -277,6 +289,7 @@ def follow_stats( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Get follower stats. Get cross-cluster replication follower stats. The API returns @@ -287,6 +300,8 @@ def follow_stats( :param index: A comma-separated list of index patterns; use `_all` to perform the operation on all indices + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -301,6 +316,8 @@ def follow_stats( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", @@ -331,6 +348,7 @@ def forget_follower( human: t.Optional[bool] = None, leader_remote_cluster: t.Optional[str] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -360,6 +378,8 @@ def forget_follower( :param follower_index: :param follower_index_uuid: :param leader_remote_cluster: + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -375,6 +395,8 @@ def forget_follower( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if follower_cluster is not None: __body["follower_cluster"] = follower_cluster @@ -403,6 +425,7 @@ def get_auto_follow_pattern( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -412,6 +435,7 @@ def get_auto_follow_pattern( :param name: Specifies the auto-follow pattern collection that you want to retrieve. If you do not specify a name, the API returns information for all collections. + :param master_timeout: Period to wait for a connection to the master node. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: @@ -427,6 +451,8 @@ def get_auto_follow_pattern( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -447,6 +473,7 @@ def pause_auto_follow_pattern( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -463,6 +490,7 @@ def pause_auto_follow_pattern( :param name: The name of the auto follow pattern that should pause discovering new indices to follow. + :param master_timeout: Period to wait for a connection to the master node. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -475,6 +503,8 @@ def pause_auto_follow_pattern( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -495,6 +525,7 @@ def pause_follow( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -507,6 +538,7 @@ def pause_follow( :param index: The name of the follower index that should pause following its leader index. + :param master_timeout: Period to wait for a connection to the master node. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -519,6 +551,8 @@ def pause_follow( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -561,6 +595,7 @@ def put_auto_follow_pattern( human: t.Optional[bool] = None, leader_index_exclusion_patterns: t.Optional[t.Sequence[str]] = None, leader_index_patterns: t.Optional[t.Sequence[str]] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, max_outstanding_read_requests: t.Optional[int] = None, max_outstanding_write_requests: t.Optional[int] = None, max_read_request_operation_count: t.Optional[int] = None, @@ -600,6 +635,7 @@ def put_auto_follow_pattern( or more leader_index_exclusion_patterns won’t be followed. :param leader_index_patterns: An array of simple index patterns to match against indices in the remote cluster specified by the remote_cluster field. + :param master_timeout: Period to wait for a connection to the master node. :param max_outstanding_read_requests: The maximum number of outstanding reads requests from the remote cluster. :param max_outstanding_write_requests: The maximum number of outstanding reads @@ -644,6 +680,8 @@ def put_auto_follow_pattern( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: @@ -704,6 +742,7 @@ def resume_auto_follow_pattern( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -717,6 +756,7 @@ def resume_auto_follow_pattern( :param name: The name of the auto follow pattern to resume discovering new indices to follow. + :param master_timeout: Period to wait for a connection to the master node. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -729,6 +769,8 @@ def resume_auto_follow_pattern( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -762,6 +804,7 @@ def resume_follow( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, max_outstanding_read_requests: t.Optional[int] = None, max_outstanding_write_requests: t.Optional[int] = None, max_read_request_operation_count: t.Optional[int] = None, @@ -785,6 +828,7 @@ def resume_follow( ``_ :param index: The name of the follow index to resume following. + :param master_timeout: Period to wait for a connection to the master node. :param max_outstanding_read_requests: :param max_outstanding_write_requests: :param max_read_request_operation_count: @@ -808,6 +852,8 @@ def resume_follow( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: @@ -859,13 +905,19 @@ def stats( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Get cross-cluster replication stats. This API returns stats about auto-following and the same shard-level stats as the get follower stats API. ``_ + + :param master_timeout: Period to wait for a connection to the master node. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_ccr/stats" @@ -876,8 +928,12 @@ def stats( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", @@ -896,6 +952,7 @@ def unfollow( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -911,6 +968,7 @@ def unfollow( :param index: The name of the follower index that should be turned into a regular index. + :param master_timeout: Period to wait for a connection to the master node. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -923,6 +981,8 @@ def unfollow( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} diff --git a/elasticsearch/_sync/client/cluster.py b/elasticsearch/_sync/client/cluster.py index 6c1afa6c7..f5b45aa37 100644 --- a/elasticsearch/_sync/client/cluster.py +++ b/elasticsearch/_sync/client/cluster.py @@ -38,6 +38,7 @@ def allocation_explain( include_disk_info: t.Optional[bool] = None, include_yes_decisions: t.Optional[bool] = None, index: t.Optional[str] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, primary: t.Optional[bool] = None, shard: t.Optional[int] = None, @@ -61,6 +62,7 @@ def allocation_explain( :param include_yes_decisions: If true, returns YES decisions in explanation. :param index: Specifies the name of the index that you would like an explanation for. + :param master_timeout: Period to wait for a connection to the master node. :param primary: If true, returns explanation for the primary shard for the given shard ID. :param shard: Specifies the ID of the shard that you would like an explanation @@ -80,6 +82,8 @@ def allocation_explain( __query["include_disk_info"] = include_disk_info if include_yes_decisions is not None: __query["include_yes_decisions"] = include_yes_decisions + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: @@ -119,9 +123,8 @@ def delete_component_template( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete component templates. Deletes component templates. Component templates - are building blocks for constructing index templates that specify index mappings, - settings, and aliases. + Delete component templates. Component templates are building blocks for constructing + index templates that specify index mappings, settings, and aliases. ``_ @@ -167,6 +170,7 @@ def delete_voting_config_exclusions( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, wait_for_removal: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: @@ -176,6 +180,7 @@ def delete_voting_config_exclusions( ``_ + :param master_timeout: Period to wait for a connection to the master node. :param wait_for_removal: Specifies whether to wait for all excluded nodes to be removed from the cluster before clearing the voting configuration exclusions list. Defaults to true, meaning that all excluded nodes must be removed from @@ -192,6 +197,8 @@ def delete_voting_config_exclusions( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if wait_for_removal is not None: @@ -275,7 +282,7 @@ def get_component_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get component templates. Retrieves information about component templates. + Get component templates. Get information about component templates. ``_ @@ -625,6 +632,7 @@ def post_voting_config_exclusions( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, node_ids: t.Optional[t.Union[str, t.Sequence[str]]] = None, node_names: t.Optional[t.Union[str, t.Sequence[str]]] = None, pretty: t.Optional[bool] = None, @@ -661,6 +669,7 @@ def post_voting_config_exclusions( ``_ + :param master_timeout: Period to wait for a connection to the master node. :param node_ids: A comma-separated list of the persistent ids of the nodes to exclude from the voting configuration. If specified, you may not also specify node_names. @@ -680,6 +689,8 @@ def post_voting_config_exclusions( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if node_ids is not None: __query["node_ids"] = node_ids if node_names is not None: @@ -719,20 +730,21 @@ def put_component_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a component template. Creates or updates a component template. - Component templates are building blocks for constructing index templates that - specify index mappings, settings, and aliases. An index template can be composed - of multiple component templates. To use a component template, specify it in an - index template’s `composed_of` list. Component templates are only applied to - new data streams and indices as part of a matching index template. Settings and - mappings specified directly in the index template or the create index request - override any settings or mappings specified in a component template. Component - templates are only used during index creation. For data streams, this includes - data stream creation and the creation of a stream’s backing indices. Changes - to component templates do not affect existing indices, including a stream’s backing - indices. You can use C-style `/* *\\/` block comments in component templates. + Create or update a component template. Component templates are building blocks + for constructing index templates that specify index mappings, settings, and aliases. + An index template can be composed of multiple component templates. To use a component + template, specify it in an index template’s `composed_of` list. Component templates + are only applied to new data streams and indices as part of a matching index + template. Settings and mappings specified directly in the index template or the + create index request override any settings or mappings specified in a component + template. Component templates are only used during index creation. For data streams, + this includes data stream creation and the creation of a stream’s backing indices. + Changes to component templates do not affect existing indices, including a stream’s + backing indices. You can use C-style `/* *\\/` block comments in component templates. You can include comments anywhere in the request body except before the opening - curly bracket. + curly bracket. **Applying component templates** You cannot directly apply a component + template to a data stream or index. To be applied, a component template must + be included in an index template's `composed_of` list. ``_ @@ -755,8 +767,8 @@ def put_component_template( :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - :param meta: Optional user metadata about the component template. May have any - contents. This map is not automatically generated by Elasticsearch. This + :param meta: Optional user metadata about the component template. It may have + any contents. This map is not automatically generated by Elasticsearch. This information is stored in the cluster state, so keeping it short is preferable. To unset `_meta`, replace the template without specifying this information. :param version: Version number used to manage component templates externally. diff --git a/elasticsearch/_sync/client/connector.py b/elasticsearch/_sync/client/connector.py index 6df64b55f..aeffc0d39 100644 --- a/elasticsearch/_sync/client/connector.py +++ b/elasticsearch/_sync/client/connector.py @@ -996,6 +996,106 @@ def sync_job_post( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=( + "deleted_document_count", + "indexed_document_count", + "indexed_document_volume", + "last_seen", + "metadata", + "total_document_count", + ), + ) + @_stability_warning(Stability.EXPERIMENTAL) + def sync_job_update_stats( + self, + *, + connector_sync_job_id: str, + deleted_document_count: t.Optional[int] = None, + indexed_document_count: t.Optional[int] = None, + indexed_document_volume: t.Optional[int] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + last_seen: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + metadata: t.Optional[t.Mapping[str, t.Any]] = None, + pretty: t.Optional[bool] = None, + total_document_count: t.Optional[int] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Set the connector sync job stats. Stats include: `deleted_document_count`, `indexed_document_count`, + `indexed_document_volume`, and `total_document_count`. You can also update `last_seen`. + This API is mainly used by the connector service for updating sync job information. + To sync data using self-managed connectors, you need to deploy the Elastic connector + service on your own infrastructure. This service runs automatically on Elastic + Cloud for Elastic managed connectors. + + ``_ + + :param connector_sync_job_id: The unique identifier of the connector sync job. + :param deleted_document_count: The number of documents the sync job deleted. + :param indexed_document_count: The number of documents the sync job indexed. + :param indexed_document_volume: The total size of the data (in MiB) the sync + job indexed. + :param last_seen: The timestamp to use in the `last_seen` property for the connector + sync job. + :param metadata: The connector-specific metadata. + :param total_document_count: The total number of documents in the target index + after the sync job finished. + """ + if connector_sync_job_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") + if deleted_document_count is None and body is None: + raise ValueError( + "Empty value passed for parameter 'deleted_document_count'" + ) + if indexed_document_count is None and body is None: + raise ValueError( + "Empty value passed for parameter 'indexed_document_count'" + ) + if indexed_document_volume is None and body is None: + raise ValueError( + "Empty value passed for parameter 'indexed_document_volume'" + ) + __path_parts: t.Dict[str, str] = { + "connector_sync_job_id": _quote(connector_sync_job_id) + } + __path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_stats' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if deleted_document_count is not None: + __body["deleted_document_count"] = deleted_document_count + if indexed_document_count is not None: + __body["indexed_document_count"] = indexed_document_count + if indexed_document_volume is not None: + __body["indexed_document_volume"] = indexed_document_volume + if last_seen is not None: + __body["last_seen"] = last_seen + if metadata is not None: + __body["metadata"] = metadata + if total_document_count is not None: + __body["total_document_count"] = total_document_count + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="connector.sync_job_update_stats", + path_parts=__path_parts, + ) + @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def update_active_filtering( diff --git a/elasticsearch/_sync/client/dangling_indices.py b/elasticsearch/_sync/client/dangling_indices.py index d5d869a65..63bebd50c 100644 --- a/elasticsearch/_sync/client/dangling_indices.py +++ b/elasticsearch/_sync/client/dangling_indices.py @@ -44,7 +44,7 @@ def delete_dangling_index( For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. - ``_ + ``_ :param index_uuid: The UUID of the index to delete. Use the get dangling indices API to find the UUID. @@ -103,7 +103,7 @@ def import_dangling_index( For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. - ``_ + ``_ :param index_uuid: The UUID of the index to import. Use the get dangling indices API to locate the UUID. @@ -162,7 +162,7 @@ def list_dangling_indices( indices while an Elasticsearch node is offline. Use this API to list dangling indices, which you can then import or delete. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_dangling" diff --git a/elasticsearch/_sync/client/enrich.py b/elasticsearch/_sync/client/enrich.py index b33eff34d..47b85cbaf 100644 --- a/elasticsearch/_sync/client/enrich.py +++ b/elasticsearch/_sync/client/enrich.py @@ -33,6 +33,7 @@ def delete_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -41,6 +42,7 @@ def delete_policy( ``_ :param name: Enrich policy to delete. + :param master_timeout: Period to wait for a connection to the master node. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -53,6 +55,8 @@ def delete_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -73,6 +77,7 @@ def execute_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: @@ -82,6 +87,7 @@ def execute_policy( ``_ :param name: Enrich policy to execute. + :param master_timeout: Period to wait for a connection to the master node. :param wait_for_completion: If `true`, the request blocks other enrich policy execution requests until complete. """ @@ -96,6 +102,8 @@ def execute_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if wait_for_completion is not None: @@ -118,6 +126,7 @@ def get_policy( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -127,6 +136,7 @@ def get_policy( :param name: Comma-separated list of enrich policy names used to limit the request. To return information for all enrich policies, omit this parameter. + :param master_timeout: Period to wait for a connection to the master node. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: @@ -142,6 +152,8 @@ def get_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -165,6 +177,7 @@ def put_policy( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, geo_match: t.Optional[t.Mapping[str, t.Any]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, match: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, range: t.Optional[t.Mapping[str, t.Any]] = None, @@ -178,6 +191,7 @@ def put_policy( :param name: Name of the enrich policy to create or update. :param geo_match: Matches enrich data to incoming documents based on a `geo_shape` query. + :param master_timeout: Period to wait for a connection to the master node. :param match: Matches enrich data to incoming documents based on a `term` query. :param range: Matches a number, date, or IP address in incoming documents to a range in the enrich index based on a `term` query. @@ -194,6 +208,8 @@ def put_policy( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: @@ -221,6 +237,7 @@ def stats( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -228,6 +245,8 @@ def stats( enrich policies that are currently executing. ``_ + + :param master_timeout: Period to wait for a connection to the master node. """ __path_parts: t.Dict[str, str] = {} __path = "/_enrich/_stats" @@ -238,6 +257,8 @@ def stats( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} diff --git a/elasticsearch/_sync/client/esql.py b/elasticsearch/_sync/client/esql.py index 8863d8e84..7c35bb652 100644 --- a/elasticsearch/_sync/client/esql.py +++ b/elasticsearch/_sync/client/esql.py @@ -20,11 +20,274 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import _rewrite_parameters +from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class EsqlClient(NamespacedClient): + @_rewrite_parameters( + body_fields=( + "query", + "columnar", + "filter", + "locale", + "params", + "profile", + "tables", + ), + ignore_deprecated_options={"params"}, + ) + def async_query( + self, + *, + query: t.Optional[str] = None, + columnar: t.Optional[bool] = None, + delimiter: t.Optional[str] = None, + drop_null_columns: t.Optional[bool] = None, + error_trace: t.Optional[bool] = None, + filter: t.Optional[t.Mapping[str, t.Any]] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + format: t.Optional[ + t.Union[ + str, + t.Literal[ + "arrow", "cbor", "csv", "json", "smile", "tsv", "txt", "yaml" + ], + ] + ] = None, + human: t.Optional[bool] = None, + keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + keep_on_completion: t.Optional[bool] = None, + locale: t.Optional[str] = None, + params: t.Optional[ + t.Sequence[t.Union[None, bool, float, int, str, t.Any]] + ] = None, + pretty: t.Optional[bool] = None, + profile: t.Optional[bool] = None, + tables: t.Optional[ + t.Mapping[str, t.Mapping[str, t.Mapping[str, t.Any]]] + ] = None, + wait_for_completion_timeout: t.Optional[ + t.Union[str, t.Literal[-1], t.Literal[0]] + ] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Run an async ES|QL query. Asynchronously run an ES|QL (Elasticsearch query language) + query, monitor its progress, and retrieve results when they become available. + The API accepts the same parameters and request body as the synchronous query + API, along with additional async related properties. + + ``_ + + :param query: The ES|QL query API accepts an ES|QL query string in the query + parameter, runs it, and returns the results. + :param columnar: By default, ES|QL returns results as rows. For example, FROM + returns each individual document as one row. For the JSON, YAML, CBOR and + smile formats, ES|QL can return the results in a columnar fashion where one + row represents all the values of a certain column in the results. + :param delimiter: The character to use between values within a CSV row. It is + valid only for the CSV format. + :param drop_null_columns: Indicates whether columns that are entirely `null` + will be removed from the `columns` and `values` portion of the results. If + `true`, the response will include an extra section under the name `all_columns` + which has the name of all the columns. + :param filter: Specify a Query DSL query in the filter parameter to filter the + set of documents that an ES|QL query runs on. + :param format: A short version of the Accept header, for example `json` or `yaml`. + :param keep_alive: The period for which the query and its results are stored + in the cluster. The default period is five days. When this period expires, + the query and its results are deleted, even if the query is still ongoing. + If the `keep_on_completion` parameter is false, Elasticsearch only stores + async queries that do not complete within the period set by the `wait_for_completion_timeout` + parameter, regardless of this value. + :param keep_on_completion: Indicates whether the query and its results are stored + in the cluster. If false, the query and its results are stored in the cluster + only if the request does not complete during the period set by the `wait_for_completion_timeout` + parameter. + :param locale: + :param params: To avoid any attempts of hacking or code injection, extract the + values in a separate list of parameters. Use question mark placeholders (?) + in the query string for each of the parameters. + :param profile: If provided and `true` the response will include an extra `profile` + object with information on how the query was executed. This information is + for human debugging and its format can change at any time but it can give + some insight into the performance of each part of the query. + :param tables: Tables to use with the LOOKUP operation. The top level key is + the table name and the next level key is the column name. + :param wait_for_completion_timeout: The period to wait for the request to finish. + By default, the request waits for 1 second for the query results. If the + query completes during this period, results are returned Otherwise, a query + ID is returned that can later be used to retrieve the results. + """ + if query is None and body is None: + raise ValueError("Empty value passed for parameter 'query'") + __path_parts: t.Dict[str, str] = {} + __path = "/_query/async" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if delimiter is not None: + __query["delimiter"] = delimiter + if drop_null_columns is not None: + __query["drop_null_columns"] = drop_null_columns + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if format is not None: + __query["format"] = format + if human is not None: + __query["human"] = human + if keep_alive is not None: + __query["keep_alive"] = keep_alive + if keep_on_completion is not None: + __query["keep_on_completion"] = keep_on_completion + if pretty is not None: + __query["pretty"] = pretty + if wait_for_completion_timeout is not None: + __query["wait_for_completion_timeout"] = wait_for_completion_timeout + if not __body: + if query is not None: + __body["query"] = query + if columnar is not None: + __body["columnar"] = columnar + if filter is not None: + __body["filter"] = filter + if locale is not None: + __body["locale"] = locale + if params is not None: + __body["params"] = params + if profile is not None: + __body["profile"] = profile + if tables is not None: + __body["tables"] = tables + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="esql.async_query", + path_parts=__path_parts, + ) + + @_rewrite_parameters() + def async_query_delete( + self, + *, + id: str, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Delete an async ES|QL query. If the query is still running, it is cancelled. + Otherwise, the stored results are deleted. If the Elasticsearch security features + are enabled, only the following users can use this API to delete a query: * The + authenticated user that submitted the original query request * Users with the + `cancel_task` cluster privilege + + ``_ + + :param id: The unique identifier of the query. A query ID is provided in the + ES|QL async query API response for a query that does not complete in the + designated time. A query ID is also provided when the request was submitted + with the `keep_on_completion` parameter set to `true`. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_query/async/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "DELETE", + __path, + params=__query, + headers=__headers, + endpoint_id="esql.async_query_delete", + path_parts=__path_parts, + ) + + @_rewrite_parameters() + def async_query_get( + self, + *, + id: str, + drop_null_columns: t.Optional[bool] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + wait_for_completion_timeout: t.Optional[ + t.Union[str, t.Literal[-1], t.Literal[0]] + ] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Get async ES|QL query results. Get the current status and available results or + stored results for an ES|QL asynchronous query. If the Elasticsearch security + features are enabled, only the user who first submitted the ES|QL query can retrieve + the results using this API. + + ``_ + + :param id: The unique identifier of the query. A query ID is provided in the + ES|QL async query API response for a query that does not complete in the + designated time. A query ID is also provided when the request was submitted + with the `keep_on_completion` parameter set to `true`. + :param drop_null_columns: Indicates whether columns that are entirely `null` + will be removed from the `columns` and `values` portion of the results. If + `true`, the response will include an extra section under the name `all_columns` + which has the name of all the columns. + :param keep_alive: The period for which the query and its results are stored + in the cluster. When this period expires, the query and its results are deleted, + even if the query is still ongoing. + :param wait_for_completion_timeout: The period to wait for the request to finish. + By default, the request waits for complete query results. If the request + completes during the period specified in this parameter, complete query results + are returned. Otherwise, the response returns an `is_running` value of `true` + and no results. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_query/async/{__path_parts["id"]}' + __query: t.Dict[str, t.Any] = {} + if drop_null_columns is not None: + __query["drop_null_columns"] = drop_null_columns + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if keep_alive is not None: + __query["keep_alive"] = keep_alive + if pretty is not None: + __query["pretty"] = pretty + if wait_for_completion_timeout is not None: + __query["wait_for_completion_timeout"] = wait_for_completion_timeout + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="esql.async_query_get", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=( "query", diff --git a/elasticsearch/_sync/client/features.py b/elasticsearch/_sync/client/features.py index 5b2fcaab7..14cb4f156 100644 --- a/elasticsearch/_sync/client/features.py +++ b/elasticsearch/_sync/client/features.py @@ -32,6 +32,7 @@ def get_features( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -48,6 +49,8 @@ def get_features( the plugin that defines that feature must be installed on the master node. ``_ + + :param master_timeout: Period to wait for a connection to the master node. """ __path_parts: t.Dict[str, str] = {} __path = "/_features" @@ -58,6 +61,8 @@ def get_features( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -78,6 +83,7 @@ def reset_features( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -97,6 +103,8 @@ def reset_features( individual nodes. ``_ + + :param master_timeout: Period to wait for a connection to the master node. """ __path_parts: t.Dict[str, str] = {} __path = "/_features/_reset" @@ -107,6 +115,8 @@ def reset_features( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} diff --git a/elasticsearch/_sync/client/ilm.py b/elasticsearch/_sync/client/ilm.py index 6ace9ee5c..b2591fd90 100644 --- a/elasticsearch/_sync/client/ilm.py +++ b/elasticsearch/_sync/client/ilm.py @@ -90,7 +90,6 @@ def explain_lifecycle( only_errors: t.Optional[bool] = None, only_managed: t.Optional[bool] = None, pretty: t.Optional[bool] = None, - timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Explain the lifecycle state. Get the current lifecycle status for one or more @@ -112,8 +111,6 @@ def explain_lifecycle( while executing the policy, or attempting to use a policy that does not exist. :param only_managed: Filters the returned indices to only indices that are managed by ILM. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -134,8 +131,6 @@ def explain_lifecycle( __query["only_managed"] = only_managed if pretty is not None: __query["pretty"] = pretty - if timeout is not None: - __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", @@ -341,8 +336,8 @@ def move_to_step( ``_ :param index: The name of the index whose lifecycle step is to change - :param current_step: - :param next_step: + :param current_step: The step that the index is expected to be in. + :param next_step: The step that you want to run. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -552,8 +547,11 @@ def start( ``_ - :param master_timeout: - :param timeout: + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_ilm/start" @@ -601,8 +599,11 @@ def stop( ``_ - :param master_timeout: - :param timeout: + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_ilm/stop" diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index 964721138..40062036b 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -143,8 +143,12 @@ def analyze( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get tokens from text analysis. The analyze API performs [analysis](https://www.elastic.co/guide/en/elasticsearch/reference/current/analysis.html) - on a text string and returns the resulting tokens. + Get tokens from text analysis. The analyze API performs analysis on a text string + and returns the resulting tokens. Generating excessive amount of tokens may cause + a node to run out of memory. The `index.analyze.max_token_count` setting enables + you to limit the number of tokens that can be produced. If more than this limit + of tokens gets generated, an error occurs. The `_analyze` endpoint without a + specified index will always use `10000` as its limit. ``_ @@ -246,7 +250,10 @@ def clear_cache( ) -> ObjectApiResponse[t.Any]: """ Clear the cache. Clear the cache of one or more indices. For data streams, the - API clears the caches of the stream's backing indices. + API clears the caches of the stream's backing indices. By default, the clear + cache API clears all caches. To clear only specific caches, use the `fielddata`, + `query`, or `request` parameters. To clear the cache only of specific fields, + use the `fields` parameter. ``_ @@ -347,10 +354,28 @@ def clone( the new index, which is a much more time consuming process. * Finally, it recovers the target index as though it were a closed index which had just been re-opened. IMPORTANT: Indices can only be cloned if they meet the following requirements: + * The index must be marked as read-only and have a cluster health status of green. * The target index must not exist. * The source index must have the same number of primary shards as the target index. * The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing - index. + index. The current write index on a data stream cannot be cloned. In order to + clone the current write index, the data stream must first be rolled over so that + a new write index is created and then the previous write index can be cloned. + NOTE: Mappings cannot be specified in the `_clone` request. The mappings of the + source index will be used for the target index. **Monitor the cloning process** + The cloning process can be monitored with the cat recovery API or the cluster + health API can be used to wait until all primary shards have been allocated by + setting the `wait_for_status` parameter to `yellow`. The `_clone` API returns + as soon as the target index has been added to the cluster state, before any shards + have been allocated. At this point, all shards are in the state unassigned. If, + for any reason, the target index can't be allocated, its primary shard will remain + unassigned until it can be allocated on that node. Once the primary shard is + allocated, it moves to state initializing, and the clone process begins. When + the clone operation completes, the shard will become active. At that point, Elasticsearch + will try to allocate any replicas and may decide to relocate the primary shard + to another node. **Wait for active shards** Because the clone operation creates + a new index to clone the shards to, the wait for active shards setting on index + creation applies to the clone index action as well. ``_ @@ -536,7 +561,26 @@ def create( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create an index. Creates a new index. + Create an index. You can use the create index API to add a new index to an Elasticsearch + cluster. When creating an index, you can specify the following: * Settings for + the index. * Mappings for fields in the index. * Index aliases **Wait for active + shards** By default, index creation will only return a response to the client + when the primary copies of each shard have been started, or the request times + out. The index creation response will indicate what happened. For example, `acknowledged` + indicates whether the index was successfully created in the cluster, `while shards_acknowledged` + indicates whether the requisite number of shard copies were started for each + shard in the index before timing out. Note that it is still possible for either + `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation + to be successful. These values simply indicate whether the operation completed + before the timeout. If `acknowledged` is false, the request timed out before + the cluster state was updated with the newly created index, but it probably will + be created sometime soon. If `shards_acknowledged` is false, then the request + timed out before the requisite number of shards were started (by default just + the primaries), even if the cluster state was successfully updated to reflect + the newly created index (that is to say, `acknowledged` is `true`). You can change + the default of only waiting for the primary shards to start through the index + setting `index.write.wait_for_active_shards`. Note that changing this setting + will also affect the `wait_for_active_shards` value on all subsequent write operations. ``_ @@ -732,7 +776,11 @@ def delete( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete indices. Deletes one or more indices. + Delete indices. Deleting an index deletes its documents, shards, and metadata. + It does not delete related Kibana components, such as data views, visualizations, + or dashboards. You cannot delete the current write index of a data stream. To + delete the index, you must roll over the data stream so a new write index is + created. You can then use the delete index API to delete the previous write index. ``_ @@ -804,7 +852,7 @@ def delete_alias( """ Delete an alias. Removes a data stream or index from an alias. - ``_ + ``_ :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). @@ -1034,7 +1082,7 @@ def delete_template( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Deletes a legacy index template. + Delete a legacy index template. ``_ @@ -1100,7 +1148,13 @@ def disk_usage( Analyze the index disk usage. Analyze the disk usage of each field of an index or data stream. This API might not support indices created in previous Elasticsearch versions. The result of a small index can be inaccurate as some parts of an index - might not be analyzed by the API. + might not be analyzed by the API. NOTE: The total size of fields of the analyzed + shards of the index in the response is usually smaller than the index `store_size` + value because some small metadata files are ignored and some parts of data files + might not be scanned by the API. Since stored fields are stored together in a + compressed format, the sizes of stored fields are also estimates and can be inaccurate. + The stored size of the `_id` field is likely underestimated while the `_source` + field is overestimated. ``_ @@ -1249,8 +1303,7 @@ def exists( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Check indices. Checks if one or more indices, index aliases, or data streams - exist. + Check indices. Check if one or more indices, index aliases, or data streams exist. ``_ @@ -1447,16 +1500,21 @@ def exists_template( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Check existence of index templates. Returns information about whether a particular - index template exists. + Check existence of index templates. Get information about whether index templates + exist. Index templates define settings, mappings, and aliases that can be applied + automatically to new indices. IMPORTANT: This documentation is about legacy index + templates, which are deprecated and will be replaced by the composable templates + introduced in Elasticsearch 7.8. ``_ - :param name: The comma separated names of the index templates - :param flat_settings: Return settings in flat format (default: false) - :param local: Return local information, do not retrieve the state from master - node (default: false) - :param master_timeout: Explicit operation timeout for connection to master node + :param name: A comma-separated list of index template names used to limit the + request. Wildcard (`*`) expressions are supported. + :param flat_settings: Indicates whether to use a flat format for the response. + :param local: Indicates whether to get information from the local node only. + :param master_timeout: The period to wait for the master node. If the master + node is not available before the timeout expires, the request fails and returns + an error. To indicate that the request should never timeout, set it to `-1`. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -1559,9 +1617,7 @@ def field_usage_stats( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, - timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, @@ -1570,7 +1626,10 @@ def field_usage_stats( Get field usage stats. Get field usage information for each shard and field of an index. Field usage statistics are automatically captured when queries are running on a cluster. A shard-level search request that accesses a given field, - even if multiple times during that request, is counted as a single use. + even if multiple times during that request, is counted as a single use. The response + body reports the per-shard usage count of the data structures that back the fields + in the index. A given request will increment each count by a maximum value of + 1, even if the request accesses the same field multiple times. ``_ @@ -1589,11 +1648,6 @@ def field_usage_stats( in the statistics. :param ignore_unavailable: If `true`, missing or closed indices are not included in the response. - :param master_timeout: Period to wait for a connection to the master node. If - no response is received before the timeout expires, the request fails and - returns an error. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). @@ -1617,12 +1671,8 @@ def field_usage_stats( __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty - if timeout is not None: - __query["timeout"] = timeout if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards __headers = {"accept": "application/json"} @@ -1770,7 +1820,35 @@ def forcemerge( merges. So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since - the new documents can't be backed up incrementally. + the new documents can't be backed up incrementally. **Blocks during a force merge** + Calls to this API block until the merge is complete (unless request contains + `wait_for_completion=false`). If the client connection is lost before completion + then the force merge process will continue in the background. Any new requests + to force merge the same indices will also block until the ongoing force merge + is complete. **Running force merge asynchronously** If the request contains `wait_for_completion=false`, + Elasticsearch performs some preflight checks, launches the request, and returns + a task you can use to get the status of the task. However, you can not cancel + this task as the force merge task is not cancelable. Elasticsearch creates a + record of this task as a document at `_tasks/`. When you are done with + a task, you should delete the task document so Elasticsearch can reclaim the + space. **Force merging multiple indices** You can force merge multiple indices + with a single request by targeting: * One or more data streams that contain multiple + backing indices * Multiple indices * One or more aliases * All data streams and + indices in a cluster Each targeted shard is force-merged separately using the + force_merge threadpool. By default each node only has a single `force_merge` + thread which means that the shards on that node are force-merged one at a time. + If you expand the `force_merge` threadpool on a node then it will force merge + its shards in parallel Force merge makes the storage for the shard being merged + temporarily increase, as it may require free space up to triple its size in case + `max_num_segments parameter` is set to `1`, to rewrite all segments into a new + one. **Data streams and time-based indices** Force-merging is useful for managing + a data stream's older backing indices and other time-based indices, particularly + after a rollover. In these cases, each index only receives indexing traffic for + a certain period of time. Once an index receive no more writes, its shards can + be force-merged to a single segment. This can be a good idea because single-segment + shards can sometimes use simpler and more efficient data structures to perform + searches. For example: ``` POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 + ``` ``_ @@ -1863,8 +1941,8 @@ def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index information. Returns information about one or more indices. For data - streams, the API returns information about the stream’s backing indices. + Get index information. Get information about one or more indices. For data streams, + the API returns information about the stream’s backing indices. ``_ @@ -1955,7 +2033,7 @@ def get_alias( """ Get aliases. Retrieves information for one or more data stream or index aliases. - ``_ + ``_ :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, @@ -2080,6 +2158,42 @@ def get_data_lifecycle( path_parts=__path_parts, ) + @_rewrite_parameters() + def get_data_lifecycle_stats( + self, + *, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Get data stream lifecycle stats. Get statistics about the data streams that are + managed by a data stream lifecycle. + + ``_ + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_lifecycle/stats" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="indices.get_data_lifecycle_stats", + path_parts=__path_parts, + ) + @_rewrite_parameters() def get_data_stream( self, @@ -2179,11 +2293,13 @@ def get_field_mapping( """ Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices. + This API is useful if you don't need a complete mapping or if an index mapping + contains a large number of fields. ``_ :param fields: Comma-separated list or wildcard expression of fields used to - limit returned information. + limit returned information. Supports wildcards (`*`). :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. @@ -2255,7 +2371,7 @@ def get_index_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index templates. Returns information about one or more index templates. + Get index templates. Get information about one or more index templates. ``_ @@ -2328,8 +2444,8 @@ def get_mapping( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get mapping definitions. Retrieves mapping definitions for one or more indices. - For data streams, the API retrieves mappings for the stream’s backing indices. + Get mapping definitions. For data streams, the API retrieves mappings for the + stream’s backing indices. ``_ @@ -2413,8 +2529,8 @@ def get_settings( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index settings. Returns setting information for one or more indices. For - data streams, returns setting information for the stream’s backing indices. + Get index settings. Get setting information for one or more indices. For data + streams, it returns setting information for the stream's backing indices. ``_ @@ -2501,7 +2617,9 @@ def get_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index templates. Retrieves information about one or more index templates. + Get index templates. Get information about one or more index templates. IMPORTANT: + This documentation is about legacy index templates, which are deprecated and + will be replaced by the composable templates introduced in Elasticsearch 7.8. ``_ @@ -2680,7 +2798,27 @@ def open( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Opens a closed index. For data streams, the API opens any closed backing indices. + Open a closed index. For data streams, the API opens any closed backing indices. + A closed index is blocked for read/write operations and does not allow all operations + that opened indices allow. It is not possible to index documents or to search + for documents in a closed index. This allows closed indices to not have to maintain + internal data structures for indexing or searching documents, resulting in a + smaller overhead on the cluster. When opening or closing an index, the master + is responsible for restarting the index shards to reflect the new state of the + index. The shards will then go through the normal recovery process. The data + of opened or closed indices is automatically replicated by the cluster to ensure + that enough shard copies are safely kept around at all times. You can open and + close multiple indices. An error is thrown if the request explicitly refers to + a missing index. This behavior can be turned off by using the `ignore_unavailable=true` + parameter. By default, you must explicitly name the indices you are opening or + closing. To open or close indices with `_all`, `*`, or other wildcard expressions, + change the `action.destructive_requires_name` setting to `false`. This setting + can also be changed with the cluster update settings API. Closed indices consume + a significant amount of disk-space which can cause problems in managed environments. + Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` + to `false`. Because opening or closing an index allocates its shards, the `wait_for_active_shards` + setting on index creation applies to the `_open` and `_close` index actions as + well. ``_ @@ -3023,7 +3161,33 @@ def put_index_template( ) -> ObjectApiResponse[t.Any]: """ Create or update an index template. Index templates define settings, mappings, - and aliases that can be applied automatically to new indices. + and aliases that can be applied automatically to new indices. Elasticsearch applies + templates to new indices based on an wildcard pattern that matches the index + name. Index templates are applied during data stream or index creation. For data + streams, these settings and mappings are applied when the stream's backing indices + are created. Settings and mappings specified in a create index API request override + any settings or mappings specified in an index template. Changes to index templates + do not affect existing indices, including the existing backing indices of a data + stream. You can use C-style `/* *\\/` block comments in index templates. You + can include comments anywhere in the request body, except before the opening + curly bracket. **Multiple matching templates** If multiple index templates match + the name of a new index or data stream, the template with the highest priority + is used. Multiple templates with overlapping index patterns at the same priority + are not allowed and an error will be thrown when attempting to create a template + matching an existing index template at identical priorities. **Composing aliases, + mappings, and settings** When multiple component templates are specified in the + `composed_of` field for an index template, they are merged in the order specified, + meaning that later component templates override earlier component templates. + Any mappings, settings, or aliases from the parent index template are merged + in next. Finally, any configuration on the index request itself is merged. Mapping + definitions are merged recursively, which means that later mapping components + can introduce new field mappings and update the mapping configuration. If a field + mapping is already contained in an earlier component, its definition will be + completely overwritten by the later one. This recursive merging strategy applies + not only to field mappings, but also root options like `dynamic_templates` and + `meta`. If an earlier component contains a `dynamic_templates` block, then by + default new `dynamic_templates` entries are appended onto the end. If an entry + already exists with the same key, then it is overwritten by the new definition. ``_ @@ -3053,8 +3217,11 @@ def put_index_template( :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - :param meta: Optional user metadata about the index template. May have any contents. - This map is not automatically generated by Elasticsearch. + :param meta: Optional user metadata about the index template. It may have any + contents. It is not automatically generated or used by Elasticsearch. This + user-defined object is stored in the cluster state, so keeping it short is + preferable To unset the metadata, replace the template without specifying + it. :param priority: Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though @@ -3063,7 +3230,9 @@ def put_index_template( :param template: Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration. :param version: Version number used to manage index templates externally. This - number is not automatically generated by Elasticsearch. + number is not automatically generated by Elasticsearch. External systems + can use these version numbers to simplify template management. To unset a + version, replace the template without specifying one. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -3182,9 +3351,27 @@ def put_mapping( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update field mappings. Adds new fields to an existing data stream or index. You - can also use this API to change the search settings of existing fields. For data - streams, these changes are applied to all backing indices by default. + Update field mappings. Add new fields to an existing data stream or index. You + can also use this API to change the search settings of existing fields and add + new properties to existing object fields. For data streams, these changes are + applied to all backing indices by default. **Add multi-fields to an existing + field** Multi-fields let you index the same field in different ways. You can + use this API to update the fields mapping parameter and enable multi-fields for + an existing field. WARNING: If an index (or data stream) contains documents when + you add a multi-field, those documents will not have values for the new multi-field. + You can populate the new multi-field with the update by query API. **Change supported + mapping parameters for an existing field** The documentation for each mapping + parameter indicates whether you can update it for an existing field using this + API. For example, you can use the update mapping API to update the `ignore_above` + parameter. **Change the mapping of an existing field** Except for supported mapping + parameters, you can't change the mapping or field type of an existing field. + Changing an existing field could invalidate data that's already indexed. If you + need to change the mapping of a field in a data stream's backing indices, refer + to documentation about modifying data streams. If you need to change the mapping + of a field in other indices, create a new index with the correct mapping and + reindex your data into that index. **Rename a field** Renaming a field would + invalidate data already indexed under the old field name. Instead, add an alias + field to create an alternate field name. ``_ @@ -3315,6 +3502,19 @@ def put_settings( """ Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default. + To revert a setting to the default value, use a null value. The list of per-index + settings that can be updated dynamically on live indices can be found in index + module documentation. To preserve existing settings from being updated, set the + `preserve_existing` parameter to `true`. NOTE: You can only define new analyzers + on closed indices. To add an analyzer, you must close the index, define the analyzer, + and reopen the index. You cannot close the write index of a data stream. To update + the analyzer for a data stream's write index and future backing indices, update + the analyzer in the index template used by the stream. Then roll over the data + stream to apply the new analyzer to the stream's write index and future backing + indices. This affects searches and any new data added to the stream after the + rollover. However, it does not affect the data stream's backing indices or their + existing data. To change the analyzer for existing backing indices, you must + create a new data stream and reindex your data into it. ``_ @@ -3428,7 +3628,14 @@ def put_template( according to their order. Index templates are only applied during index creation. Changes to index templates do not affect existing indices. Settings and mappings specified in create index API requests override any settings or mappings specified - in an index template. + in an index template. You can use C-style `/* *\\/` block comments in index templates. + You can include comments anywhere in the request body, except before the opening + curly bracket. **Indices matching multiple templates** Multiple index templates + can potentially match an index, in this case, both the settings and mappings + are merged into the final configuration of the index. The order of the merging + can be controlled using the order parameter, with lower order being applied first, + and higher orders overriding them. NOTE: Multiple matching templates with the + same order value will result in a non-deterministic merging order. ``_ @@ -3449,7 +3656,8 @@ def put_template( with lower values. :param settings: Configuration options for the index. :param version: Version number used to manage index templates externally. This - number is not automatically generated by Elasticsearch. + number is not automatically generated by Elasticsearch. To unset a version, + replace the template without specifying one. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -3510,23 +3718,25 @@ def recovery( """ Get index recovery information. Get information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information - for the stream's backing indices. Shard recovery is the process of initializing - a shard copy, such as restoring a primary shard from a snapshot or creating a - replica shard from a primary shard. When a shard recovery completes, the recovered - shard is available for search and indexing. Recovery automatically occurs during - the following processes: * When creating an index for the first time. * When - a node rejoins the cluster and starts up any missing primary shard copies using - the data that it holds in its data path. * Creation of new replica shard copies - from the primary. * Relocation of a shard copy to a different node in the same - cluster. * A snapshot restore operation. * A clone, shrink, or split operation. - You can determine the cause of a shard recovery using the recovery or cat recovery - APIs. The index recovery API reports information about completed recoveries only - for shard copies that currently exist in the cluster. It only reports the last - recovery for each shard copy and does not report historical information about - earlier recoveries, nor does it report information about the recoveries of shard - copies that no longer exist. This means that if a shard copy completes a recovery - and then Elasticsearch relocates it onto a different node then the information - about the original recovery will not be shown in the recovery API. + for the stream's backing indices. All recoveries, whether ongoing or complete, + are kept in the cluster state and may be reported on at any time. Shard recovery + is the process of initializing a shard copy, such as restoring a primary shard + from a snapshot or creating a replica shard from a primary shard. When a shard + recovery completes, the recovered shard is available for search and indexing. + Recovery automatically occurs during the following processes: * When creating + an index for the first time. * When a node rejoins the cluster and starts up + any missing primary shard copies using the data that it holds in its data path. + * Creation of new replica shard copies from the primary. * Relocation of a shard + copy to a different node in the same cluster. * A snapshot restore operation. + * A clone, shrink, or split operation. You can determine the cause of a shard + recovery using the recovery or cat recovery APIs. The index recovery API reports + information about completed recoveries only for shard copies that currently exist + in the cluster. It only reports the last recovery for each shard copy and does + not report historical information about earlier recoveries, nor does it report + information about the recoveries of shard copies that no longer exist. This means + that if a shard copy completes a recovery and then Elasticsearch relocates it + onto a different node then the information about the original recovery will not + be shown in the recovery API. ``_ @@ -3590,7 +3800,17 @@ def refresh( """ Refresh an index. A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation - on the stream’s backing indices. + on the stream’s backing indices. By default, Elasticsearch periodically refreshes + indices every second, but only on indices that have received one search request + or more in the last 30 seconds. You can change this default interval with the + `index.refresh_interval` setting. Refresh requests are synchronous and do not + return a response until the refresh operation completes. Refreshes are resource-intensive. + To ensure good cluster performance, it's recommended to wait for Elasticsearch's + periodic refresh rather than performing an explicit refresh when possible. If + your application workflow indexes documents and then runs a search to retrieve + the indexed document, it's recommended to use the index API's `refresh=wait_for` + query parameter option. This option ensures the indexing operation waits for + a periodic refresh before running the search. ``_ @@ -3752,6 +3972,24 @@ def resolve_cluster( search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index). * Cluster version information, including the Elasticsearch server version. + For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information + about the local cluster and all remotely configured clusters that start with + the alias `cluster*`. Each cluster returns information about whether it has any + indices, aliases or data streams that match `my-index-*`. **Advantages of using + this endpoint before a cross-cluster search** You may want to exclude a cluster + or index from a search when: * A remote cluster is not currently connected and + is configured with `skip_unavailable=false`. Running a cross-cluster search under + those conditions will cause the entire search to fail. * A cluster has no matching + indices, aliases or data streams for the index expression (or your user does + not have permissions to search them). For example, suppose your index expression + is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data + streams that match `logs*`. In that case, that cluster will return no results + from that cluster if you include it in a cross-cluster search. * The index expression + (combined with any query parameters you specify) will likely cause an exception + to be thrown when you do the search. In these cases, the "error" field in the + `_resolve/cluster` response will be present. (This is also where security/permission + errors will be shown.) * A remote cluster is an older version that does not support + the feature you want to use in your search. ``_ @@ -3898,7 +4136,33 @@ def rollover( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Roll over to a new index. Creates a new index for a data stream or index alias. + Roll over to a new index. TIP: It is recommended to use the index lifecycle rollover + action to automate rollovers. The rollover API creates a new index for a data + stream or index alias. The API behavior depends on the rollover target. **Roll + over a data stream** If you roll over a data stream, the API creates a new write + index for the stream. The stream's previous write index becomes a regular backing + index. A rollover also increments the data stream's generation. **Roll over an + index alias with a write index** TIP: Prior to Elasticsearch 7.9, you'd typically + use an index alias with a write index to manage time series data. Data streams + replace this functionality, require less maintenance, and automatically integrate + with data tiers. If an index alias points to multiple indices, one of the indices + must be a write index. The rollover API creates a new write index for the alias + with `is_write_index` set to `true`. The API also `sets is_write_index` to `false` + for the previous write index. **Roll over an index alias with one index** If + you roll over an index alias that points to only one index, the API creates a + new index for the alias and removes the original index from the alias. NOTE: + A rollover creates a new index and is subject to the `wait_for_active_shards` + setting. **Increment index names for an alias** When you roll over an index alias, + you can specify a name for the new index. If you don't specify a name and the + current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, + the new index name increments that number. For example, if you roll over an alias + with a current index of `my-index-000001`, the rollover creates a new index named + `my-index-000002`. This number is always six characters and zero-padded, regardless + of the previous index's name. If you use an index alias for time series data, + you can use date math in the index name to track the rollover date. For example, + you can create an alias that points to an index named ``. + If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. + If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. ``_ @@ -4269,8 +4533,8 @@ def simulate_index_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Simulate an index. Returns the index configuration that would be applied to the - specified index from an existing index template. + Simulate an index. Get the index configuration that would be applied to the specified + index from an existing index template. ``_ @@ -4347,7 +4611,7 @@ def simulate_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Simulate an index template. Returns the index configuration that would be applied + Simulate an index template. Get the index configuration that would be applied by a particular index template. ``_ @@ -4481,25 +4745,29 @@ def split( """ Split an index. Split an index into a new index with more primary shards. * Before you can split an index: * The index must be read-only. * The cluster health status - must be green. The number of times the index can be split (and the number of - shards that each original shard can be split into) is determined by the `index.number_of_routing_shards` - setting. The number of routing shards specifies the hashing space that is used - internally to distribute documents across shards with consistent hashing. For - instance, a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x - 3) could be split by a factor of 2 or 3. A split operation: * Creates a new target - index with the same definition as the source index, but with a larger number - of primary shards. * Hard-links segments from the source index into the target - index. If the file system doesn't support hard-linking, all segments are copied - into the new index, which is a much more time consuming process. * Hashes all - documents again, after low level files are created, to delete documents that - belong to a different shard. * Recovers the target index as though it were a - closed index which had just been re-opened. IMPORTANT: Indices can only be split - if they satisfy the following requirements: * The target index must not exist. - * The source index must have fewer primary shards than the target index. * The - number of primary shards in the target index must be a multiple of the number - of primary shards in the source index. * The node handling the split process - must have sufficient free disk space to accommodate a second copy of the existing - index. + must be green. You can do make an index read-only with the following request + using the add index block API: ``` PUT /my_source_index/_block/write ``` The + current write index on a data stream cannot be split. In order to split the current + write index, the data stream must first be rolled over so that a new write index + is created and then the previous write index can be split. The number of times + the index can be split (and the number of shards that each original shard can + be split into) is determined by the `index.number_of_routing_shards` setting. + The number of routing shards specifies the hashing space that is used internally + to distribute documents across shards with consistent hashing. For instance, + a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be + split by a factor of 2 or 3. A split operation: * Creates a new target index + with the same definition as the source index, but with a larger number of primary + shards. * Hard-links segments from the source index into the target index. If + the file system doesn't support hard-linking, all segments are copied into the + new index, which is a much more time consuming process. * Hashes all documents + again, after low level files are created, to delete documents that belong to + a different shard. * Recovers the target index as though it were a closed index + which had just been re-opened. IMPORTANT: Indices can only be split if they satisfy + the following requirements: * The target index must not exist. * The source index + must have fewer primary shards than the target index. * The number of primary + shards in the target index must be a multiple of the number of primary shards + in the source index. * The node handling the split process must have sufficient + free disk space to accommodate a second copy of the existing index. ``_ diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index 905d75294..fc3c46861 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -317,3 +317,82 @@ def put( endpoint_id="inference.put", path_parts=__path_parts, ) + + @_rewrite_parameters( + body_name="inference_config", + ) + def update( + self, + *, + inference_id: str, + inference_config: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, + task_type: t.Optional[ + t.Union[ + str, + t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"], + ] + ] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Update an inference endpoint. Modify `task_settings`, secrets (within `service_settings`), + or `num_allocations` for an inference endpoint, depending on the specific endpoint + service and `task_type`. IMPORTANT: The inference APIs enable you to use certain + services, such as built-in machine learning models (ELSER, E5), models uploaded + through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, + Watsonx.ai, or Hugging Face. For built-in models and models uploaded through + Eland, the inference APIs offer an alternative way to use and manage trained + models. However, if you do not plan to use the inference APIs to use these models + or if you want to use non-NLP models, use the machine learning trained model + APIs. + + ``_ + + :param inference_id: The unique identifier of the inference endpoint. + :param inference_config: + :param task_type: The type of inference task that the model performs. + """ + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") + if inference_config is None and body is None: + raise ValueError( + "Empty value passed for parameters 'inference_config' and 'body', one of them should be set." + ) + elif inference_config is not None and body is not None: + raise ValueError("Cannot set both 'inference_config' and 'body'") + __path_parts: t.Dict[str, str] + if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: + __path_parts = { + "task_type": _quote(task_type), + "inference_id": _quote(inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}/_update' + elif inference_id not in SKIP_IN_PATH: + __path_parts = {"inference_id": _quote(inference_id)} + __path = f'/_inference/{__path_parts["inference_id"]}/_update' + else: + raise ValueError("Couldn't find a path for the given parameters") + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __body = inference_config if inference_config is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.update", + path_parts=__path_parts, + ) diff --git a/elasticsearch/_sync/client/ingest.py b/elasticsearch/_sync/client/ingest.py index 513d62bb2..7d8b2d154 100644 --- a/elasticsearch/_sync/client/ingest.py +++ b/elasticsearch/_sync/client/ingest.py @@ -226,7 +226,6 @@ def get_geoip_database( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -238,9 +237,6 @@ def get_geoip_database( :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. - :param master_timeout: Period to wait for a connection to the master node. If - no response is received before the timeout expires, the request fails and - returns an error. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: @@ -256,8 +252,6 @@ def get_geoip_database( __query["filter_path"] = filter_path if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} diff --git a/elasticsearch/_sync/client/license.py b/elasticsearch/_sync/client/license.py index 2f499dc3a..0dd83ee24 100644 --- a/elasticsearch/_sync/client/license.py +++ b/elasticsearch/_sync/client/license.py @@ -32,7 +32,9 @@ def delete( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Delete the license. When the license expires, your subscription level reverts @@ -40,6 +42,10 @@ def delete( can use this API. ``_ + + :param master_timeout: Period to wait for a connection to the master node. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_license" @@ -50,8 +56,12 @@ def delete( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", @@ -196,7 +206,9 @@ def post( human: t.Optional[bool] = None, license: t.Optional[t.Mapping[str, t.Any]] = None, licenses: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -215,6 +227,9 @@ def post( :param license: :param licenses: A sequence of one or more JSON documents containing the license information. + :param master_timeout: Period to wait for a connection to the master node. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_license" @@ -228,8 +243,12 @@ def post( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if license is not None: __body["license"] = license @@ -258,7 +277,9 @@ def post_start_basic( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Start a basic license. Start an indefinite basic license, which gives access @@ -273,6 +294,9 @@ def post_start_basic( :param acknowledge: whether the user has acknowledged acknowledge messages (default: false) + :param master_timeout: Period to wait for a connection to the master node. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_license/start_basic" @@ -285,8 +309,12 @@ def post_start_basic( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", @@ -305,6 +333,7 @@ def post_start_trial( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, type_query_string: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: @@ -320,6 +349,7 @@ def post_start_trial( :param acknowledge: whether the user has acknowledged acknowledge messages (default: false) + :param master_timeout: Period to wait for a connection to the master node. :param type_query_string: """ __path_parts: t.Dict[str, str] = {} @@ -333,6 +363,8 @@ def post_start_trial( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if type_query_string is not None: diff --git a/elasticsearch/_sync/client/logstash.py b/elasticsearch/_sync/client/logstash.py index 5aee9c834..382aedfc7 100644 --- a/elasticsearch/_sync/client/logstash.py +++ b/elasticsearch/_sync/client/logstash.py @@ -37,7 +37,8 @@ def delete_pipeline( ) -> ObjectApiResponse[t.Any]: """ Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central - Management. + Management. If the request succeeds, you receive an empty response with an appropriate + status code. ``_ diff --git a/elasticsearch/_sync/client/migration.py b/elasticsearch/_sync/client/migration.py index e11cf7ca0..6b96c5203 100644 --- a/elasticsearch/_sync/client/migration.py +++ b/elasticsearch/_sync/client/migration.py @@ -39,7 +39,7 @@ def deprecations( Get deprecation information. Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version. TIP: This APIs is designed for indirect use by the - Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. + Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. ``_ @@ -86,9 +86,9 @@ def get_feature_upgrade_status( to how features store configuration information and data in system indices. Check which features need to be migrated and the status of any migrations that are in progress. TIP: This API is designed for indirect use by the Upgrade Assistant. - We strongly recommend you use the Upgrade Assistant. + You are strongly recommended to use the Upgrade Assistant. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_migration/system_features" @@ -127,7 +127,7 @@ def post_feature_upgrade( unavailable during the migration process. TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_migration/system_features" diff --git a/elasticsearch/_sync/client/ml.py b/elasticsearch/_sync/client/ml.py index 9115844dd..20949046c 100644 --- a/elasticsearch/_sync/client/ml.py +++ b/elasticsearch/_sync/client/ml.py @@ -686,6 +686,7 @@ def delete_trained_model( force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Delete an unreferenced trained model. The request deletes a trained inference @@ -696,6 +697,8 @@ def delete_trained_model( :param model_id: The unique identifier of the trained model. :param force: Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") @@ -712,6 +715,8 @@ def delete_trained_model( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", @@ -3205,7 +3210,11 @@ def put_data_frame_analytics( """ Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination - index. + index. By default, the query used in the source configuration is `{"match_all": + {}}`. If the destination index does not exist, it is created automatically when + you start the job. If you supply only a subset of the regression or classification + parameters, hyperparameter optimization occurs. It determines a value for each + of the undefined parameters. ``_ @@ -3382,7 +3391,8 @@ def put_datafeed( an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') - at each interval. When Elasticsearch security features are enabled, your datafeed + at each interval. By default, the datafeed uses the following query: `{"match_all": + {"boost": 1}}`. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or @@ -3645,7 +3655,8 @@ def put_job( ) -> ObjectApiResponse[t.Any]: """ Create an anomaly detection job. If you include a `datafeed_config`, you must - have read index privileges on the source index. + have read index privileges on the source index. If you include a `datafeed_config` + but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. ``_ @@ -5451,7 +5462,7 @@ def validate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Validates an anomaly detection job. + Validate an anomaly detection job. ``_ diff --git a/elasticsearch/_sync/client/nodes.py b/elasticsearch/_sync/client/nodes.py index 13e5254ef..dfa10aab5 100644 --- a/elasticsearch/_sync/client/nodes.py +++ b/elasticsearch/_sync/client/nodes.py @@ -50,9 +50,9 @@ def clear_repositories_metering_archive( ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned - information. All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). - :param max_archive_version: Specifies the maximum [archive_version](https://www.elastic.co/guide/en/elasticsearch/reference/current/get-repositories-metering-api.html#get-repositories-metering-api-response-body) - to be cleared from the archive. + information. + :param max_archive_version: Specifies the maximum `archive_version` to be cleared + from the archive. """ if node_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'node_id'") @@ -138,7 +138,6 @@ def hot_threads( human: t.Optional[bool] = None, ignore_idle_threads: t.Optional[bool] = None, interval: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, snapshots: t.Optional[int] = None, sort: t.Optional[ @@ -161,9 +160,6 @@ def hot_threads( :param ignore_idle_threads: If true, known idle threads (e.g. waiting in a socket select, or to get a task from an empty queue) are filtered out. :param interval: The interval to do the second sampling of threads. - :param master_timeout: Period to wait for a connection to the master node. If - no response is received before the timeout expires, the request fails and - returns an error. :param snapshots: Number of samples of thread stacktrace. :param sort: The sort order for 'cpu' type (default: total) :param threads: Specifies the number of hot threads to provide information for. @@ -189,8 +185,6 @@ def hot_threads( __query["ignore_idle_threads"] = ignore_idle_threads if interval is not None: __query["interval"] = interval - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if snapshots is not None: @@ -223,7 +217,6 @@ def info( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: @@ -238,9 +231,6 @@ def info( :param metric: Limits the information returned to the specific metrics. Supports a comma-separated list, such as http,ingest. :param flat_settings: If true, returns settings in flat format. - :param master_timeout: Period to wait for a connection to the master node. If - no response is received before the timeout expires, the request fails and - returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ @@ -266,8 +256,6 @@ def info( __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: @@ -374,7 +362,6 @@ def stats( level: t.Optional[ t.Union[str, t.Literal["cluster", "indices", "shards"]] ] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, types: t.Optional[t.Sequence[str]] = None, @@ -406,9 +393,6 @@ def stats( from segments that are not loaded into memory. :param level: Indicates whether statistics are aggregated at the cluster, index, or shard level. - :param master_timeout: Period to wait for a connection to the master node. If - no response is received before the timeout expires, the request fails and - returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param types: A comma-separated list of document types for the indexing index @@ -467,8 +451,6 @@ def stats( __query["include_unloaded_segments"] = include_unloaded_segments if level is not None: __query["level"] = level - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: diff --git a/elasticsearch/_sync/client/query_rules.py b/elasticsearch/_sync/client/query_rules.py index 57e2d74ee..b05f8b291 100644 --- a/elasticsearch/_sync/client/query_rules.py +++ b/elasticsearch/_sync/client/query_rules.py @@ -37,7 +37,9 @@ def delete_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a query rule. Delete a query rule within a query ruleset. + Delete a query rule. Delete a query rule within a query ruleset. This is a destructive + action that is only recoverable by re-adding the same rule with the create or + update query rule API. ``_ @@ -85,7 +87,8 @@ def delete_ruleset( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a query ruleset. + Delete a query ruleset. Remove a query ruleset and its associated data. This + is a destructive action that is not recoverable. ``_ @@ -221,8 +224,8 @@ def list_rulesets( ``_ - :param from_: Starting offset (default: 0) - :param size: specifies a max number of results to get + :param from_: The offset from the first result to fetch. + :param size: The maximum number of results to retrieve. """ __path_parts: t.Dict[str, str] = {} __path = "/_query_rules" @@ -271,16 +274,25 @@ def put_rule( ) -> ObjectApiResponse[t.Any]: """ Create or update a query rule. Create or update a query rule within a query ruleset. + IMPORTANT: Due to limitations within pinned queries, you can only pin documents + using ids or docs, but cannot use both in single rule. It is advised to use one + or the other in query rulesets, to avoid errors. Additionally, pinned queries + have a maximum limit of 100 pinned hits. If multiple matching rules pin more + than 100 documents, only the first 100 documents are pinned in the order they + are specified in the ruleset. ``_ :param ruleset_id: The unique identifier of the query ruleset containing the - rule to be created or updated + rule to be created or updated. :param rule_id: The unique identifier of the query rule within the specified - ruleset to be created or updated - :param actions: - :param criteria: - :param type: + ruleset to be created or updated. + :param actions: The actions to take when the rule is matched. The format of this + action depends on the rule type. + :param criteria: The criteria that must be met for the rule to be applied. If + multiple criteria are specified for a rule, all criteria must be met for + the rule to be applied. + :param type: The type of rule. :param priority: """ if ruleset_id in SKIP_IN_PATH: @@ -345,12 +357,19 @@ def put_ruleset( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a query ruleset. + Create or update a query ruleset. There is a limit of 100 rules per ruleset. + This limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` + cluster setting. IMPORTANT: Due to limitations within pinned queries, you can + only select documents using `ids` or `docs`, but cannot use both in single rule. + It is advised to use one or the other in query rulesets, to avoid errors. Additionally, + pinned queries have a maximum limit of 100 pinned hits. If multiple matching + rules pin more than 100 documents, only the first 100 documents are pinned in + the order they are specified in the ruleset. ``_ :param ruleset_id: The unique identifier of the query ruleset to be created or - updated + updated. :param rules: """ if ruleset_id in SKIP_IN_PATH: @@ -405,7 +424,9 @@ def test( :param ruleset_id: The unique identifier of the query ruleset to be created or updated - :param match_criteria: + :param match_criteria: The match criteria to apply to rules in the given query + ruleset. Match criteria should match the keys defined in the `criteria.metadata` + field of the rule. """ if ruleset_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'ruleset_id'") diff --git a/elasticsearch/_sync/client/rollup.py b/elasticsearch/_sync/client/rollup.py index 6dd9f386e..f957ad80a 100644 --- a/elasticsearch/_sync/client/rollup.py +++ b/elasticsearch/_sync/client/rollup.py @@ -397,14 +397,37 @@ def rollup_search( rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given - the original query. + the original query. The request body supports a subset of features from the regular + search API. The following functionality is not available: `size`: Because rollups + work on pre-aggregated data, no search hits can be returned and so size must + be set to zero or omitted entirely. `highlighter`, `suggestors`, `post_filter`, + `profile`, `explain`: These are similarly disallowed. **Searching both historical + rollup and non-rollup data** The rollup search API has the capability to search + across both "live" non-rollup data and the aggregated rollup data. This is done + by simply adding the live indices to the URI. For example: ``` GET sensor-1,sensor_rollup/_rollup_search + { "size": 0, "aggregations": { "max_temperature": { "max": { "field": "temperature" + } } } } ``` The rollup search endpoint does two things when the search runs: + * The original request is sent to the non-rollup index unaltered. * A rewritten + version of the original request is sent to the rollup index. When the two responses + are received, the endpoint rewrites the rollup response and merges the two together. + During the merging process, if there is any overlap in buckets between the two + responses, the buckets from the non-rollup index are used. ``_ - :param index: Enables searching rolled-up data using the standard Query DSL. + :param index: A comma-separated list of data streams and indices used to limit + the request. This parameter has the following rules: * At least one data + stream, index, or wildcard expression must be specified. This target can + include a rollup or non-rollup index. For data streams, the stream's backing + indices can only serve as non-rollup indices. Omitting the parameter or using + `_all` are not permitted. * Multiple non-rollup indices may be specified. + * Only one rollup index may be specified. If more than one are supplied, + an exception occurs. * Wildcard expressions (`*`) may be used. If they match + more than one rollup index, an exception occurs. However, you can use an + expression to match multiple non-rollup indices or data streams. :param aggregations: Specifies aggregations. :param aggs: Specifies aggregations. - :param query: Specifies a DSL query. + :param query: Specifies a DSL query that is subject to some limitations. :param rest_total_hits_as_int: Indicates whether hits.total should be rendered as an integer or an object in the rest search response :param size: Must be zero if set, as rollups work on pre-aggregated data. @@ -506,14 +529,23 @@ def stop_job( ) -> ObjectApiResponse[t.Any]: """ Stop rollup jobs. If you try to stop a job that does not exist, an exception - occurs. If you try to stop a job that is already stopped, nothing happens. + occurs. If you try to stop a job that is already stopped, nothing happens. Since + only a stopped job can be deleted, it can be useful to block the API until the + indexer has fully stopped. This is accomplished with the `wait_for_completion` + query parameter, and optionally a timeout. For example: ``` POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s + ``` The parameter blocks the API call from returning until either the job has + moved to STOPPED or the specified time has elapsed. If the specified time elapses + without the job moving to STOPPED, a timeout exception occurs. ``_ :param id: Identifier for the rollup job. :param timeout: If `wait_for_completion` is `true`, the API blocks for (at maximum) the specified duration while waiting for the job to stop. If more than `timeout` - time has passed, the API throws a timeout exception. + time has passed, the API throws a timeout exception. NOTE: Even if a timeout + occurs, the stop request is still processing and eventually moves the job + to STOPPED. The timeout simply means the API call itself timed out while + waiting for the status change. :param wait_for_completion: If set to `true`, causes the API to block until the indexer state completely stops. If set to `false`, the API returns immediately and the indexer is stopped asynchronously in the background. diff --git a/elasticsearch/_sync/client/searchable_snapshots.py b/elasticsearch/_sync/client/searchable_snapshots.py index b4acbbef3..3ec1d0045 100644 --- a/elasticsearch/_sync/client/searchable_snapshots.py +++ b/elasticsearch/_sync/client/searchable_snapshots.py @@ -47,11 +47,9 @@ def cache_stats( Get cache statistics. Get statistics about the shared cache for partially mounted indices. - ``_ + ``_ - :param node_id: A comma-separated list of node IDs or names to limit the returned - information; use `_local` to return information from the node you're connecting - to, leave empty to get information from all nodes + :param node_id: The names of the nodes in the cluster to target. :param master_timeout: """ __path_parts: t.Dict[str, str] @@ -107,9 +105,10 @@ def clear_cache( Clear the cache. Clear indices and data streams from the shared cache for partially mounted indices. - ``_ + ``_ - :param index: A comma-separated list of index names + :param index: A comma-separated list of data streams, indices, and aliases to + clear from the cache. It supports wildcards (`*`). :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) @@ -184,17 +183,22 @@ def mount( ``_ :param repository: The name of the repository containing the snapshot of the - index to mount - :param snapshot: The name of the snapshot of the index to mount - :param index: - :param ignore_index_settings: - :param index_settings: - :param master_timeout: Explicit operation timeout for connection to master node - :param renamed_index: - :param storage: Selects the kind of local storage used to accelerate searches. - Experimental, and defaults to `full_copy` - :param wait_for_completion: Should this request wait until the operation has - completed before returning + index to mount. + :param snapshot: The name of the snapshot of the index to mount. + :param index: The name of the index contained in the snapshot whose data is to + be mounted. If no `renamed_index` is specified, this name will also be used + to create the new index. + :param ignore_index_settings: The names of settings that should be removed from + the index when it is mounted. + :param index_settings: The settings that should be added to the index when it + is mounted. + :param master_timeout: The period to wait for the master node. If the master + node is not available before the timeout expires, the request fails and returns + an error. To indicate that the request should never timeout, set it to `-1`. + :param renamed_index: The name of the index that will be created. + :param storage: The mount option for the searchable snapshot index. + :param wait_for_completion: If true, the request blocks until the operation is + complete. """ if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'repository'") @@ -261,9 +265,10 @@ def stats( """ Get searchable snapshot statistics. - ``_ + ``_ - :param index: A comma-separated list of index names + :param index: A comma-separated list of data streams and indices to retrieve + statistics for. :param level: Return stats aggregated at cluster, index or shard level """ __path_parts: t.Dict[str, str] diff --git a/elasticsearch/_sync/client/security.py b/elasticsearch/_sync/client/security.py index 5c962f5f8..a3d80d3f1 100644 --- a/elasticsearch/_sync/client/security.py +++ b/elasticsearch/_sync/client/security.py @@ -45,14 +45,33 @@ def activate_user_profile( ) -> ObjectApiResponse[t.Any]: """ Activate a user profile. Create or update a user profile on behalf of another - user. + user. NOTE: The user profile feature is designed only for use by Kibana and Elastic's + Observability, Enterprise Search, and Elastic Security solutions. Individual + users and external applications should not call this API directly. The calling + application must have either an `access_token` or a combination of `username` + and `password` for the user that the profile document is intended for. Elastic + reserves the right to change or remove this feature in future releases without + prior notice. This API creates or updates a profile document for end users with + information that is extracted from the user's authentication object including + `username`, `full_name,` `roles`, and the authentication realm. For example, + in the JWT `access_token` case, the profile user's `username` is extracted from + the JWT token claim pointed to by the `claims.principal` setting of the JWT realm + that authenticated the token. When updating a profile document, the API enables + the document if it was disabled. Any updates do not change existing content for + either the `labels` or `data` fields. ``_ - :param grant_type: - :param access_token: - :param password: - :param username: + :param grant_type: The type of grant. + :param access_token: The user's Elasticsearch access token or JWT. Both `access` + and `id` JWT token types are supported and they depend on the underlying + JWT realm configuration. If you specify the `access_token` grant type, this + parameter is required. It is not valid with other grant types. + :param password: The user's password. If you specify the `password` grant type, + this parameter is required. It is not valid with other grant types. + :param username: The username that identifies the user. If you specify the `password` + grant type, this parameter is required. It is not valid with other grant + types. """ if grant_type is None and body is None: raise ValueError("Empty value passed for parameter 'grant_type'") @@ -244,6 +263,94 @@ def bulk_put_role( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("ids", "expiration", "metadata", "role_descriptors"), + ) + def bulk_update_api_keys( + self, + *, + ids: t.Optional[t.Union[str, t.Sequence[str]]] = None, + error_trace: t.Optional[bool] = None, + expiration: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + metadata: t.Optional[t.Mapping[str, t.Any]] = None, + pretty: t.Optional[bool] = None, + role_descriptors: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Bulk update API keys. Update the attributes for multiple API keys. IMPORTANT: + It is not possible to use an API key as the authentication credential for this + API. To update API keys, the owner user's credentials are required. This API + is similar to the update API key API but enables you to apply the same update + to multiple API keys in one API call. This operation can greatly improve performance + over making individual updates. It is not possible to update expired or invalidated + API keys. This API supports updates to API key access scope, metadata and expiration. + The access scope of each API key is derived from the `role_descriptors` you specify + in the request and a snapshot of the owner user's permissions at the time of + the request. The snapshot of the owner's permissions is updated automatically + on every call. IMPORTANT: If you don't specify `role_descriptors` in the request, + a call to this API might still change an API key's access scope. This change + can occur if the owner user's permissions have changed since the API key was + created or last modified. A successful request returns a JSON structure that + contains the IDs of all updated API keys, the IDs of API keys that already had + the requested changes and did not require an update, and error details for any + failed update. + + ``_ + + :param ids: The API key identifiers. + :param expiration: Expiration time for the API keys. By default, API keys never + expire. This property can be omitted to leave the value unchanged. + :param metadata: Arbitrary nested metadata to associate with the API keys. Within + the `metadata` object, top-level keys beginning with an underscore (`_`) + are reserved for system usage. Any information specified with this parameter + fully replaces metadata previously associated with the API key. + :param role_descriptors: The role descriptors to assign to the API keys. An API + key's effective permissions are an intersection of its assigned privileges + and the point-in-time snapshot of permissions of the owner user. You can + assign new privileges by specifying them in this parameter. To remove assigned + privileges, supply the `role_descriptors` parameter as an empty object `{}`. + If an API key has no assigned privileges, it inherits the owner user's full + permissions. The snapshot of the owner's permissions is always updated, whether + you supply the `role_descriptors` parameter. The structure of a role descriptor + is the same as the request for the create API keys API. + """ + if ids is None and body is None: + raise ValueError("Empty value passed for parameter 'ids'") + __path_parts: t.Dict[str, str] = {} + __path = "/_security/api_key/_bulk_update" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if ids is not None: + __body["ids"] = ids + if expiration is not None: + __body["expiration"] = expiration + if metadata is not None: + __body["metadata"] = metadata + if role_descriptors is not None: + __body["role_descriptors"] = role_descriptors + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.bulk_update_api_keys", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=("password", "password_hash"), ) @@ -773,6 +880,74 @@ def create_service_token( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("x509_certificate_chain",), + ) + def delegate_pki( + self, + *, + x509_certificate_chain: t.Optional[t.Sequence[str]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Delegate PKI authentication. This API implements the exchange of an X509Certificate + chain for an Elasticsearch access token. The certificate chain is validated, + according to RFC 5280, by sequentially considering the trust configuration of + every installed PKI realm that has `delegation.enabled` set to `true`. A successfully + trusted client certificate is also subject to the validation of the subject distinguished + name according to thw `username_pattern` of the respective realm. This API is + called by smart and trusted proxies, such as Kibana, which terminate the user's + TLS session but still want to authenticate the user by using a PKI realm—-​as + if the user connected directly to Elasticsearch. IMPORTANT: The association between + the subject public key in the target certificate and the corresponding private + key is not validated. This is part of the TLS authentication process and it is + delegated to the proxy that calls this API. The proxy is trusted to have performed + the TLS authentication and this API translates that authentication into an Elasticsearch + access token. + + ``_ + + :param x509_certificate_chain: The X509Certificate chain, which is represented + as an ordered string array. Each string in the array is a base64-encoded + (Section 4 of RFC4648 - not base64url-encoded) of the certificate's DER encoding. + The first element is the target certificate that contains the subject distinguished + name that is requesting access. This may be followed by additional certificates; + each subsequent certificate is used to certify the previous one. + """ + if x509_certificate_chain is None and body is None: + raise ValueError( + "Empty value passed for parameter 'x509_certificate_chain'" + ) + __path_parts: t.Dict[str, str] = {} + __path = "/_security/delegate_pki" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if x509_certificate_chain is not None: + __body["x509_certificate_chain"] = x509_certificate_chain + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.delegate_pki", + path_parts=__path_parts, + ) + @_rewrite_parameters() def delete_privileges( self, @@ -1098,14 +1273,21 @@ def disable_user_profile( ) -> ObjectApiResponse[t.Any]: """ Disable a user profile. Disable user profiles so that they are not visible in - user profile searches. + user profile searches. NOTE: The user profile feature is designed only for use + by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security + solutions. Individual users and external applications should not call this API + directly. Elastic reserves the right to change or remove this feature in future + releases without prior notice. When you activate a user profile, its automatically + enabled and visible in user profile searches. You can use the disable user profile + API to disable a user profile so it’s not visible in these searches. To re-enable + a disabled user profile, use the enable user profile API . ``_ :param uid: Unique identifier for the user profile. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make - this operation visible to search, if 'wait_for' then wait for a refresh to - make this operation visible to search, if 'false' do nothing with refreshes. + this operation visible to search. If 'wait_for', it waits for a refresh to + make this operation visible to search. If 'false', it does nothing with refreshes. """ if uid in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'uid'") @@ -1195,14 +1377,20 @@ def enable_user_profile( ) -> ObjectApiResponse[t.Any]: """ Enable a user profile. Enable user profiles to make them visible in user profile - searches. + searches. NOTE: The user profile feature is designed only for use by Kibana and + Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual + users and external applications should not call this API directly. Elastic reserves + the right to change or remove this feature in future releases without prior notice. + When you activate a user profile, it's automatically enabled and visible in user + profile searches. If you later disable the user profile, you can use the enable + user profile API to make the profile visible in these searches again. ``_ - :param uid: Unique identifier for the user profile. + :param uid: A unique identifier for the user profile. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make - this operation visible to search, if 'wait_for' then wait for a refresh to - make this operation visible to search, if 'false' do nothing with refreshes. + this operation visible to search. If 'wait_for', it waits for a refresh to + make this operation visible to search. If 'false', nothing is done with refreshes. """ if uid in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'uid'") @@ -1667,6 +1855,49 @@ def get_service_credentials( path_parts=__path_parts, ) + @_rewrite_parameters() + def get_settings( + self, + *, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Get security index settings. Get the user-configurable settings for the security + internal index (`.security` and associated indices). + + ``_ + + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_security/settings" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="security.get_settings", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=( "grant_type", @@ -1860,15 +2091,19 @@ def get_user_profile( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a user profile. Get a user's profile using the unique profile ID. + Get a user profile. Get a user's profile using the unique profile ID. NOTE: The + user profile feature is designed only for use by Kibana and Elastic's Observability, + Enterprise Search, and Elastic Security solutions. Individual users and external + applications should not call this API directly. Elastic reserves the right to + change or remove this feature in future releases without prior notice. ``_ :param uid: A unique identifier for the user profile. - :param data: List of filters for the `data` field of the profile document. To - return all content use `data=*`. To return a subset of content use `data=` - to retrieve content nested under the specified ``. By default returns - no `data` content. + :param data: A comma-separated list of filters for the `data` field of the profile + document. To return all content use `data=*`. To return a subset of content + use `data=` to retrieve content nested under the specified ``. + By default returns no `data` content. """ if uid in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'uid'") @@ -2140,11 +2375,15 @@ def has_privileges_user_profile( ) -> ObjectApiResponse[t.Any]: """ Check user profile privileges. Determine whether the users associated with the - specified user profile IDs have all the requested privileges. + specified user profile IDs have all the requested privileges. NOTE: The user + profile feature is designed only for use by Kibana and Elastic's Observability, + Enterprise Search, and Elastic Security solutions. Individual users and external + applications should not call this API directly. Elastic reserves the right to + change or remove this feature in future releases without prior notice. ``_ - :param privileges: + :param privileges: An object containing all the privileges to be checked. :param uids: A list of profile IDs. The privileges are checked for associated users of the profiles. """ @@ -3312,13 +3551,25 @@ def saml_authenticate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Authenticate SAML. Submits a SAML response message to Elasticsearch for consumption. + Authenticate SAML. Submit a SAML response message to Elasticsearch for consumption. + NOTE: This API is intended for use by custom web applications other than Kibana. + If you are using Kibana, refer to the documentation for configuring SAML single-sign-on + on the Elastic Stack. The SAML message that is submitted can be: * A response + to a SAML authentication request that was previously created using the SAML prepare + authentication API. * An unsolicited SAML message in the case of an IdP-initiated + single sign-on (SSO) flow. In either case, the SAML message needs to be a base64 + encoded XML document with a root element of ``. After successful validation, + Elasticsearch responds with an Elasticsearch internal access token and refresh + token that can be subsequently used for authentication. This API endpoint essentially + exchanges SAML responses that indicate successful authentication in the IdP for + Elasticsearch access and refresh tokens, which can be used for authentication + against Elasticsearch. ``_ - :param content: The SAML response as it was sent by the user’s browser, usually + :param content: The SAML response as it was sent by the user's browser, usually a Base64 encoded XML document. - :param ids: A json array with all the valid SAML Request Ids that the caller + :param ids: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. :param realm: The name of the realm that should authenticate the SAML response. Useful in cases where many SAML realms are defined. @@ -3375,10 +3626,19 @@ def saml_complete_logout( ) -> ObjectApiResponse[t.Any]: """ Logout of SAML completely. Verifies the logout response sent from the SAML IdP. + NOTE: This API is intended for use by custom web applications other than Kibana. + If you are using Kibana, refer to the documentation for configuring SAML single-sign-on + on the Elastic Stack. The SAML IdP may send a logout response back to the SP + after handling the SP-initiated SAML Single Logout. This API verifies the response + by ensuring the content is relevant and validating its signature. An empty response + is returned if the verification process is successful. The response can be sent + by the IdP with either the HTTP-Redirect or the HTTP-Post binding. The caller + of this API must prepare the request accordingly so that this API can handle + either of them. ``_ - :param ids: A json array with all the valid SAML Request Ids that the caller + :param ids: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. :param realm: The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response. @@ -3440,25 +3700,33 @@ def saml_invalidate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Invalidate SAML. Submits a SAML LogoutRequest message to Elasticsearch for consumption. + Invalidate SAML. Submit a SAML LogoutRequest message to Elasticsearch for consumption. + NOTE: This API is intended for use by custom web applications other than Kibana. + If you are using Kibana, refer to the documentation for configuring SAML single-sign-on + on the Elastic Stack. The logout request comes from the SAML IdP during an IdP + initiated Single Logout. The custom web application can use this API to have + Elasticsearch process the `LogoutRequest`. After successful validation of the + request, Elasticsearch invalidates the access token and refresh token that corresponds + to that specific SAML principal and provides a URL that contains a SAML LogoutResponse + message. Thus the user can be redirected back to their IdP. ``_ :param query_string: The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. This query should include - a single parameter named SAMLRequest that contains a SAML logout request + a single parameter named `SAMLRequest` that contains a SAML logout request that is deflated and Base64 encoded. If the SAML IdP has signed the logout - request, the URL should include two extra parameters named SigAlg and Signature + request, the URL should include two extra parameters named `SigAlg` and `Signature` that contain the algorithm used for the signature and the signature value - itself. In order for Elasticsearch to be able to verify the IdP’s signature, - the value of the query_string field must be an exact match to the string + itself. In order for Elasticsearch to be able to verify the IdP's signature, + the value of the `query_string` field must be an exact match to the string provided by the browser. The client application must not attempt to parse or process the string in any way. :param acs: The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this - parameter or the realm parameter. + parameter or the `realm` parameter. :param realm: The name of the SAML realm in Elasticsearch the configuration. - You must specify either this parameter or the acs parameter. + You must specify either this parameter or the `acs` parameter. """ if query_string is None and body is None: raise ValueError("Empty value passed for parameter 'query_string'") @@ -3508,12 +3776,19 @@ def saml_logout( ) -> ObjectApiResponse[t.Any]: """ Logout of SAML. Submits a request to invalidate an access token and refresh token. + NOTE: This API is intended for use by custom web applications other than Kibana. + If you are using Kibana, refer to the documentation for configuring SAML single-sign-on + on the Elastic Stack. This API invalidates the tokens that were generated for + a user by the SAML authenticate API. If the SAML realm in Elasticsearch is configured + accordingly and the SAML IdP supports this, the Elasticsearch response contains + a URL to redirect the user to the IdP that contains a SAML logout request (starting + an SP-initiated SAML Single Logout). ``_ :param token: The access token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent token that was received - after refreshing the original one by using a refresh_token. + after refreshing the original one by using a `refresh_token`. :param refresh_token: The refresh token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent refresh token that was received after refreshing the original access token. @@ -3564,19 +3839,31 @@ def saml_prepare_authentication( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Prepare SAML authentication. Creates a SAML authentication request (``) - as a URL string, based on the configuration of the respective SAML realm in Elasticsearch. + Prepare SAML authentication. Create a SAML authentication request (``) + as a URL string based on the configuration of the respective SAML realm in Elasticsearch. + NOTE: This API is intended for use by custom web applications other than Kibana. + If you are using Kibana, refer to the documentation for configuring SAML single-sign-on + on the Elastic Stack. This API returns a URL pointing to the SAML Identity Provider. + You can use the URL to redirect the browser of the user in order to continue + the authentication process. The URL includes a single parameter named `SAMLRequest`, + which contains a SAML Authentication request that is deflated and Base64 encoded. + If the configuration dictates that SAML authentication requests should be signed, + the URL has two extra parameters named `SigAlg` and `Signature`. These parameters + contain the algorithm used for the signature and the signature value itself. + It also returns a random string that uniquely identifies this SAML Authentication + request. The caller of this API needs to store this identifier as it needs to + be used in a following step of the authentication process. ``_ :param acs: The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. The realm is used to generate the authentication - request. You must specify either this parameter or the realm parameter. + request. You must specify either this parameter or the `realm` parameter. :param realm: The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request. You must specify either this - parameter or the acs parameter. + parameter or the `acs` parameter. :param relay_state: A string that will be included in the redirect URL that this - API returns as the RelayState query parameter. If the Authentication Request + API returns as the `RelayState` query parameter. If the Authentication Request is signed, this value is used as part of the signature computation. """ __path_parts: t.Dict[str, str] = {} @@ -3621,7 +3908,10 @@ def saml_service_provider_metadata( ) -> ObjectApiResponse[t.Any]: """ Create SAML service provider metadata. Generate SAML metadata for a SAML 2.0 - Service Provider. + Service Provider. The SAML 2.0 specification provides a mechanism for Service + Providers to describe their capabilities and configuration using a metadata file. + This API generates Service Provider metadata based on the configuration of a + SAML realm in Elasticsearch. ``_ @@ -3668,21 +3958,27 @@ def suggest_user_profiles( ) -> ObjectApiResponse[t.Any]: """ Suggest a user profile. Get suggestions for user profiles that match specified - search criteria. + search criteria. NOTE: The user profile feature is designed only for use by Kibana + and Elastic's Observability, Enterprise Search, and Elastic Security solutions. + Individual users and external applications should not call this API directly. + Elastic reserves the right to change or remove this feature in future releases + without prior notice. ``_ - :param data: List of filters for the `data` field of the profile document. To - return all content use `data=*`. To return a subset of content use `data=` - to retrieve content nested under the specified ``. By default returns - no `data` content. + :param data: A comma-separated list of filters for the `data` field of the profile + document. To return all content use `data=*`. To return a subset of content, + use `data=` to retrieve content nested under the specified ``. + By default, the API returns no `data` content. It is an error to specify + `data` as both the query parameter and the request body field. :param hint: Extra search criteria to improve relevance of the suggestion result. Profiles matching the spcified hint are ranked higher in the response. Profiles - not matching the hint don't exclude the profile from the response as long - as the profile matches the `name` field query. - :param name: Query string used to match name-related fields in user profile documents. - Name-related fields are the user's `username`, `full_name`, and `email`. - :param size: Number of profiles to return. + not matching the hint aren't excluded from the response as long as the profile + matches the `name` field query. + :param name: A query string used to match name-related fields in user profile + documents. Name-related fields are the user's `username`, `full_name`, and + `email`. + :param size: The number of profiles to return. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/profile/_suggest" @@ -3824,7 +4120,18 @@ def update_cross_cluster_api_key( ) -> ObjectApiResponse[t.Any]: """ Update a cross-cluster API key. Update the attributes of an existing cross-cluster - API key, which is used for API key based remote cluster access. + API key, which is used for API key based remote cluster access. To use this API, + you must have at least the `manage_security` cluster privilege. Users can only + update API keys that they created. To update another user's API key, use the + `run_as` feature to submit a request on behalf of another user. IMPORTANT: It's + not possible to use an API key as the authentication credential for this API. + To update an API key, the owner user's credentials are required. It's not possible + to update expired API keys, or API keys that have been invalidated by the invalidate + API key API. This API supports updates to an API key's access scope, metadata, + and expiration. The owner user's information, such as the `username` and `realm`, + is also updated automatically on every call. NOTE: This API cannot update REST + API keys, which should be updated by either the update API key or bulk update + API keys API. ``_ @@ -3833,8 +4140,8 @@ def update_cross_cluster_api_key( of permissions for cross cluster search and cross cluster replication. At least one of them must be specified. When specified, the new access assignment fully replaces the previously assigned access. - :param expiration: Expiration time for the API key. By default, API keys never - expire. This property can be omitted to leave the value unchanged. + :param expiration: The expiration time for the API key. By default, API keys + never expire. This property can be omitted to leave the value unchanged. :param metadata: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. When specified, this information @@ -3874,6 +4181,81 @@ def update_cross_cluster_api_key( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("security", "security_profile", "security_tokens"), + parameter_aliases={ + "security-profile": "security_profile", + "security-tokens": "security_tokens", + }, + ) + def update_settings( + self, + *, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + security: t.Optional[t.Mapping[str, t.Any]] = None, + security_profile: t.Optional[t.Mapping[str, t.Any]] = None, + security_tokens: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Update security index settings. Update the user-configurable settings for the + security internal index (`.security` and associated indices). Only a subset of + settings are allowed to be modified, for example `index.auto_expand_replicas` + and `index.number_of_replicas`. If a specific index is not in use on the system + and settings are provided for it, the request will be rejected. This API does + not yet support configuring the settings for indices before they are in use. + + ``_ + + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. + :param security: Settings for the index used for most security configuration, + including native realm users and roles configured with the API. + :param security_profile: Settings for the index used to store profile information. + :param security_tokens: Settings for the index used to store tokens. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_security/settings" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + if not __body: + if security is not None: + __body["security"] = security + if security_profile is not None: + __body["security-profile"] = security_profile + if security_tokens is not None: + __body["security-tokens"] = security_tokens + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="security.update_settings", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=("data", "labels"), ) @@ -3896,22 +4278,37 @@ def update_user_profile_data( ) -> ObjectApiResponse[t.Any]: """ Update user profile data. Update specific data for the user profile that is associated - with a unique ID. + with a unique ID. NOTE: The user profile feature is designed only for use by + Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. + Individual users and external applications should not call this API directly. + Elastic reserves the right to change or remove this feature in future releases + without prior notice. To use this API, you must have one of the following privileges: + * The `manage_user_profile` cluster privilege. * The `update_profile_data` global + privilege for the namespaces that are referenced in the request. This API updates + the `labels` and `data` fields of an existing user profile document with JSON + objects. New keys and their values are added to the profile document and conflicting + keys are replaced by data that's included in the request. For both labels and + data, content is namespaced by the top-level fields. The `update_profile_data` + global privilege grants privileges for updating only the allowed namespaces. ``_ :param uid: A unique identifier for the user profile. :param data: Non-searchable data that you want to associate with the user profile. - This field supports a nested data structure. + This field supports a nested data structure. Within the `data` object, top-level + keys cannot begin with an underscore (`_`) or contain a period (`.`). The + data object is not searchable, but can be retrieved with the get user profile + API. :param if_primary_term: Only perform the operation if the document has this primary term. :param if_seq_no: Only perform the operation if the document has this sequence number. :param labels: Searchable data that you want to associate with the user profile. - This field supports a nested data structure. + This field supports a nested data structure. Within the labels object, top-level + keys cannot begin with an underscore (`_`) or contain a period (`.`). :param refresh: If 'true', Elasticsearch refreshes the affected shards to make - this operation visible to search, if 'wait_for' then wait for a refresh to - make this operation visible to search, if 'false' do nothing with refreshes. + this operation visible to search. If 'wait_for', it waits for a refresh to + make this operation visible to search. If 'false', nothing is done with refreshes. """ if uid in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'uid'") diff --git a/elasticsearch/_sync/client/shutdown.py b/elasticsearch/_sync/client/shutdown.py index bfa561089..29cdf5ff2 100644 --- a/elasticsearch/_sync/client/shutdown.py +++ b/elasticsearch/_sync/client/shutdown.py @@ -50,7 +50,7 @@ def delete_node( and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. - ``_ + ``_ :param node_id: The node id of node to be removed from the shutdown state :param master_timeout: Period to wait for a connection to the master node. If @@ -98,9 +98,6 @@ def get_node( t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, pretty: t.Optional[bool] = None, - timeout: t.Optional[ - t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] - ] = None, ) -> ObjectApiResponse[t.Any]: """ Get the shutdown status. Get information about nodes that are ready to be shut @@ -111,14 +108,12 @@ def get_node( the operator privileges feature is enabled, you must be an operator to use this API. - ``_ + ``_ :param node_id: Which node for which to retrieve the shutdown status :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if node_id not in SKIP_IN_PATH: @@ -138,8 +133,6 @@ def get_node( __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty - if timeout is not None: - __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", @@ -178,19 +171,23 @@ def put_node( """ Prepare a node to be shut down. NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. - Direct use is not supported. If the operator privileges feature is enabled, you - must be an operator to use this API. The API migrates ongoing tasks and index - shards to other nodes as needed to prepare a node to be restarted or shut down - and removed from the cluster. This ensures that Elasticsearch can be stopped - safely with minimal disruption to the cluster. You must specify the type of shutdown: - `restart`, `remove`, or `replace`. If a node is already being prepared for shutdown, - you can use this API to change the shutdown type. IMPORTANT: This API does NOT - terminate the Elasticsearch process. Monitor the node shutdown status to determine - when it is safe to stop Elasticsearch. + Direct use is not supported. If you specify a node that is offline, it will be + prepared for shut down when it rejoins the cluster. If the operator privileges + feature is enabled, you must be an operator to use this API. The API migrates + ongoing tasks and index shards to other nodes as needed to prepare a node to + be restarted or shut down and removed from the cluster. This ensures that Elasticsearch + can be stopped safely with minimal disruption to the cluster. You must specify + the type of shutdown: `restart`, `remove`, or `replace`. If a node is already + being prepared for shutdown, you can use this API to change the shutdown type. + IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the + node shutdown status to determine when it is safe to stop Elasticsearch. - ``_ + ``_ - :param node_id: The node id of node to be shut down + :param node_id: The node identifier. This parameter is not validated against + the cluster's active nodes. This enables you to register a node for shut + down while it is offline. No error is thrown if you specify an invalid node + ID. :param reason: A human-readable reason that the node is being shut down. This field provides information for other cluster operators; it does not affect the shut down process. @@ -211,17 +208,17 @@ def put_node( the index.unassigned.node_left.delayed_timeout setting. If you specify both a restart allocation delay and an index-level allocation delay, the longer of the two is used. - :param master_timeout: Period to wait for a connection to the master node. If - no response is received before the timeout expires, the request fails and - returns an error. + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. :param target_node_name: Only valid if type is replace. Specifies the name of the node that is replacing the node being shut down. Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. """ if node_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'node_id'") diff --git a/elasticsearch/_sync/client/simulate.py b/elasticsearch/_sync/client/simulate.py index ac1f7cc90..9d8dfd544 100644 --- a/elasticsearch/_sync/client/simulate.py +++ b/elasticsearch/_sync/client/simulate.py @@ -87,7 +87,7 @@ def ingest( This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request. - ``_ + ``_ :param docs: Sample documents to test in the pipeline. :param index: The index to simulate ingesting into. This value can be overridden diff --git a/elasticsearch/_sync/client/slm.py b/elasticsearch/_sync/client/slm.py index 024264344..c28277489 100644 --- a/elasticsearch/_sync/client/slm.py +++ b/elasticsearch/_sync/client/slm.py @@ -33,7 +33,9 @@ def delete_lifecycle( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Delete a policy. Delete a snapshot lifecycle policy definition. This operation @@ -43,6 +45,11 @@ def delete_lifecycle( ``_ :param policy_id: The id of the snapshot lifecycle policy to remove + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. """ if policy_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'policy_id'") @@ -55,8 +62,12 @@ def delete_lifecycle( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", @@ -75,7 +86,9 @@ def execute_lifecycle( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Run a policy. Immediately create a snapshot according to the snapshot lifecycle @@ -86,6 +99,11 @@ def execute_lifecycle( ``_ :param policy_id: The id of the snapshot lifecycle policy to be executed + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. """ if policy_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'policy_id'") @@ -98,8 +116,12 @@ def execute_lifecycle( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", @@ -117,7 +139,9 @@ def execute_retention( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Run a retention policy. Manually apply the retention policy to force immediate @@ -125,6 +149,12 @@ def execute_retention( retention rules. The retention policy is normally applied according to its schedule. ``_ + + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/_execute_retention" @@ -135,8 +165,12 @@ def execute_retention( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", @@ -155,7 +189,9 @@ def get_lifecycle( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Get policy information. Get snapshot lifecycle policy definitions and information @@ -164,6 +200,11 @@ def get_lifecycle( ``_ :param policy_id: Comma-separated list of snapshot lifecycle policies to retrieve + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if policy_id not in SKIP_IN_PATH: @@ -179,8 +220,12 @@ def get_lifecycle( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", @@ -198,13 +243,21 @@ def get_stats( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Get snapshot lifecycle management statistics. Get global and policy-level statistics about actions taken by snapshot lifecycle management. ``_ + + :param master_timeout: Period to wait for a connection to the master node. If + no response is received before the timeout expires, the request fails and + returns an error. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/stats" @@ -215,8 +268,12 @@ def get_stats( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", @@ -234,12 +291,22 @@ def get_status( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Get the snapshot lifecycle management status. ``_ + + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. To indicate that the request should never timeout, + set it to `-1`. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. To indicate + that the request should never timeout, set it to `-1`. """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/status" @@ -250,8 +317,12 @@ def get_status( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", @@ -292,9 +363,10 @@ def put_lifecycle( :param policy_id: The identifier for the snapshot lifecycle policy you want to create or update. :param config: Configuration for each snapshot created by the policy. - :param master_timeout: Period to wait for a connection to the master node. If - no response is received before the timeout expires, the request fails and - returns an error. + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. To indicate that the request should never timeout, + set it to `-1`. :param name: Name automatically assigned to each snapshot created by the policy. Date math is supported. To prevent conflicting snapshot names, a UUID is automatically appended to each snapshot name. @@ -305,8 +377,9 @@ def put_lifecycle( by the policy. :param schedule: Periodic or absolute schedule at which the policy creates snapshots. SLM applies schedule changes immediately. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. To indicate + that the request should never timeout, set it to `-1`. """ if policy_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'policy_id'") @@ -359,7 +432,9 @@ def start( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Start snapshot lifecycle management. Snapshot lifecycle management (SLM) starts @@ -367,6 +442,14 @@ def start( if it has been stopped using the stop SLM API. ``_ + + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. To indicate that the request should never timeout, + set it to `-1`. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. To indicate + that the request should never timeout, set it to `-1`. """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/start" @@ -377,8 +460,12 @@ def start( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", @@ -396,7 +483,9 @@ def stop( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Stop snapshot lifecycle management. Stop all snapshot lifecycle management (SLM) @@ -410,6 +499,14 @@ def stop( status API to see if SLM is running. ``_ + + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. To indicate that the request should never timeout, + set it to `-1`. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. To indicate + that the request should never timeout, set it to `-1`. """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/stop" @@ -420,8 +517,12 @@ def stop( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", diff --git a/elasticsearch/_sync/client/snapshot.py b/elasticsearch/_sync/client/snapshot.py index 8d6665239..c604be816 100644 --- a/elasticsearch/_sync/client/snapshot.py +++ b/elasticsearch/_sync/client/snapshot.py @@ -95,21 +95,19 @@ def clone( human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, - timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ Clone a snapshot. Clone part of all of a snapshot into another snapshot in the same repository. - ``_ + ``_ :param repository: A repository name :param snapshot: The name of the snapshot to clone from :param target_snapshot: The name of the cloned snapshot to create :param indices: :param master_timeout: Explicit operation timeout for connection to master node - :param timeout: """ if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'repository'") @@ -137,8 +135,6 @@ def clone( __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty - if timeout is not None: - __query["timeout"] = timeout if not __body: if indices is not None: __body["indices"] = indices @@ -185,7 +181,7 @@ def create( """ Create a snapshot. Take a snapshot of a cluster or of data streams and indices. - ``_ + ``_ :param repository: Repository for the snapshot. :param snapshot: Name of the snapshot. Must be unique in the repository. @@ -353,7 +349,7 @@ def delete( """ Delete snapshots. - ``_ + ``_ :param repository: A repository name :param snapshot: A comma-separated list of snapshot names @@ -406,7 +402,7 @@ def delete_repository( removes only the reference to the location where the repository is storing the snapshots. The snapshots themselves are left untouched and in place. - ``_ + ``_ :param name: Name of the snapshot repository to unregister. Wildcard (`*`) patterns are supported. @@ -480,7 +476,7 @@ def get( """ Get snapshot information. - ``_ + ``_ :param repository: Comma-separated list of snapshot repository names used to limit the request. Wildcard (*) expressions are supported. @@ -592,7 +588,7 @@ def get_repository( """ Get snapshot repository information. - ``_ + ``_ :param name: A comma-separated list of repository names :param local: Return local information, do not retrieve the state from master @@ -629,6 +625,225 @@ def get_repository( path_parts=__path_parts, ) + @_rewrite_parameters() + def repository_analyze( + self, + *, + name: str, + blob_count: t.Optional[int] = None, + concurrency: t.Optional[int] = None, + detailed: t.Optional[bool] = None, + early_read_node_count: t.Optional[int] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + max_blob_size: t.Optional[t.Union[int, str]] = None, + max_total_data_size: t.Optional[t.Union[int, str]] = None, + pretty: t.Optional[bool] = None, + rare_action_probability: t.Optional[float] = None, + rarely_abort_writes: t.Optional[bool] = None, + read_node_count: t.Optional[int] = None, + register_operation_count: t.Optional[int] = None, + seed: t.Optional[int] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Analyze a snapshot repository. Analyze the performance characteristics and any + incorrect behaviour found in a repository. The response exposes implementation + details of the analysis which may change from version to version. The response + body format is therefore not considered stable and may be different in newer + versions. There are a large number of third-party storage systems available, + not all of which are suitable for use as a snapshot repository by Elasticsearch. + Some storage systems behave incorrectly, or perform poorly, especially when accessed + concurrently by multiple clients as the nodes of an Elasticsearch cluster do. + This API performs a collection of read and write operations on your repository + which are designed to detect incorrect behaviour and to measure the performance + characteristics of your storage system. The default values for the parameters + are deliberately low to reduce the impact of running an analysis inadvertently + and to provide a sensible starting point for your investigations. Run your first + analysis with the default parameter values to check for simple problems. If successful, + run a sequence of increasingly large analyses until you encounter a failure or + you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, + a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of + at least `100`. Always specify a generous timeout, possibly `1h` or longer, to + allow time for each analysis to run to completion. Perform the analyses using + a multi-node cluster of a similar size to your production cluster so that it + can detect any problems that only arise when the repository is accessed by many + nodes at once. If the analysis fails, Elasticsearch detected that your repository + behaved unexpectedly. This usually means you are using a third-party storage + system with an incorrect or incompatible implementation of the API it claims + to support. If so, this storage system is not suitable for use as a snapshot + repository. You will need to work with the supplier of your storage system to + address the incompatibilities that Elasticsearch detects. If the analysis is + successful, the API returns details of the testing process, optionally including + how long each operation took. You can use this information to determine the performance + of your storage system. If any operation fails or returns an incorrect result, + the API returns an error. If the API returns an error, it may not have removed + all the data it wrote to the repository. The error will indicate the location + of any leftover data and this path is also recorded in the Elasticsearch logs. + You should verify that this location has been cleaned up correctly. If there + is still leftover data at the specified location, you should manually remove + it. If the connection from your client to Elasticsearch is closed while the client + is waiting for the result of the analysis, the test is cancelled. Some clients + are configured to close their connection if no response is received within a + certain timeout. An analysis takes a long time to complete so you might need + to relax any such client-side timeouts. On cancellation the analysis attempts + to clean up the data it was writing, but it may not be able to remove it all. + The path to the leftover data is recorded in the Elasticsearch logs. You should + verify that this location has been cleaned up correctly. If there is still leftover + data at the specified location, you should manually remove it. If the analysis + is successful then it detected no incorrect behaviour, but this does not mean + that correct behaviour is guaranteed. The analysis attempts to detect common + bugs but it does not offer 100% coverage. Additionally, it does not test the + following: * Your repository must perform durable writes. Once a blob has been + written it must remain in place until it is deleted, even after a power loss + or similar disaster. * Your repository must not suffer from silent data corruption. + Once a blob has been written, its contents must remain unchanged until it is + deliberately modified or deleted. * Your repository must behave correctly even + if connectivity from the cluster is disrupted. Reads and writes may fail in this + case, but they must not return incorrect results. IMPORTANT: An analysis writes + a substantial amount of data to your repository and then reads it back again. + This consumes bandwidth on the network between the cluster and the repository, + and storage space and I/O bandwidth on the repository itself. You must ensure + this load does not affect other users of these systems. Analyses respect the + repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec` + if available and the cluster setting `indices.recovery.max_bytes_per_sec` which + you can use to limit the bandwidth they consume. NOTE: This API is intended for + exploratory use by humans. You should expect the request parameters and the response + format to vary in future versions. NOTE: Different versions of Elasticsearch + may perform different checks for repository compatibility, with newer versions + typically being stricter than older ones. A storage system that passes repository + analysis with one version of Elasticsearch may fail with a different version. + This indicates it behaves incorrectly in ways that the former version did not + detect. You must work with the supplier of your storage system to address the + incompatibilities detected by the repository analysis API in any version of Elasticsearch. + NOTE: This API may not work correctly in a mixed-version cluster. *Implementation + details* NOTE: This section of documentation describes how the repository analysis + API works in this version of Elasticsearch, but you should expect the implementation + to vary between versions. The request parameters and response format depend on + details of the implementation so may also be different in newer versions. The + analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter + and a number of compare-and-exchange operations on linearizable registers, as + set by the `register_operation_count` parameter. These tasks are distributed + over the data and master-eligible nodes in the cluster for execution. For most + blob-level tasks, the executing node first writes a blob to the repository and + then instructs some of the other nodes in the cluster to attempt to read the + data it just wrote. The size of the blob is chosen randomly, according to the + `max_blob_size` and `max_total_data_size` parameters. If any of these reads fails + then the repository does not implement the necessary read-after-write semantics + that Elasticsearch requires. For some blob-level tasks, the executing node will + instruct some of its peers to attempt to read the data before the writing process + completes. These reads are permitted to fail, but must not return partial data. + If any read returns partial data then the repository does not implement the necessary + atomicity semantics that Elasticsearch requires. For some blob-level tasks, the + executing node will overwrite the blob while its peers are reading it. In this + case the data read may come from either the original or the overwritten blob, + but the read operation must not return partial data or a mix of data from the + two blobs. If any of these reads returns partial data or a mix of the two blobs + then the repository does not implement the necessary atomicity semantics that + Elasticsearch requires for overwrites. The executing node will use a variety + of different methods to write the blob. For instance, where applicable, it will + use both single-part and multi-part uploads. Similarly, the reading nodes will + use a variety of different methods to read the data back again. For instance + they may read the entire blob from start to end or may read only a subset of + the data. For some blob-level tasks, the executing node will cancel the write + before it is complete. In this case, it still instructs some of the other nodes + in the cluster to attempt to read the blob but all of these reads must fail to + find the blob. Linearizable registers are special blobs that Elasticsearch manipulates + using an atomic compare-and-exchange operation. This operation ensures correct + and strongly-consistent behavior even when the blob is accessed by multiple nodes + at the same time. The detailed implementation of the compare-and-exchange operation + on linearizable registers varies by repository type. Repository analysis verifies + that that uncontended compare-and-exchange operations on a linearizable register + blob always succeed. Repository analysis also verifies that contended operations + either succeed or report the contention but do not return incorrect results. + If an operation fails due to contention, Elasticsearch retries the operation + until it succeeds. Most of the compare-and-exchange operations performed by repository + analysis atomically increment a counter which is represented as an 8-byte blob. + Some operations also verify the behavior on small blobs with sizes other than + 8 bytes. + + ``_ + + :param name: The name of the repository. + :param blob_count: The total number of blobs to write to the repository during + the test. For realistic experiments, you should set it to at least `2000`. + :param concurrency: The number of operations to run concurrently during the test. + :param detailed: Indicates whether to return detailed results, including timing + information for every operation performed during the analysis. If false, + it returns only a summary of the analysis. + :param early_read_node_count: The number of nodes on which to perform an early + read operation while writing each blob. Early read operations are only rarely + performed. + :param max_blob_size: The maximum size of a blob to be written during the test. + For realistic experiments, you should set it to at least `2gb`. + :param max_total_data_size: An upper limit on the total size of all the blobs + written during the test. For realistic experiments, you should set it to + at least `1tb`. + :param rare_action_probability: The probability of performing a rare action such + as an early read, an overwrite, or an aborted write on each blob. + :param rarely_abort_writes: Indicates whether to rarely cancel writes before + they complete. + :param read_node_count: The number of nodes on which to read a blob after writing. + :param register_operation_count: The minimum number of linearizable register + operations to perform in total. For realistic experiments, you should set + it to at least `100`. + :param seed: The seed for the pseudo-random number generator used to generate + the list of operations performed during the test. To repeat the same set + of operations in multiple experiments, use the same seed in each experiment. + Note that the operations are performed concurrently so might not always happen + in the same order on each run. + :param timeout: The period of time to wait for the test to complete. If no response + is received before the timeout expires, the test is cancelled and returns + an error. + """ + if name in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'name'") + __path_parts: t.Dict[str, str] = {"repository": _quote(name)} + __path = f'/_snapshot/{__path_parts["repository"]}/_analyze' + __query: t.Dict[str, t.Any] = {} + if blob_count is not None: + __query["blob_count"] = blob_count + if concurrency is not None: + __query["concurrency"] = concurrency + if detailed is not None: + __query["detailed"] = detailed + if early_read_node_count is not None: + __query["early_read_node_count"] = early_read_node_count + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if max_blob_size is not None: + __query["max_blob_size"] = max_blob_size + if max_total_data_size is not None: + __query["max_total_data_size"] = max_total_data_size + if pretty is not None: + __query["pretty"] = pretty + if rare_action_probability is not None: + __query["rare_action_probability"] = rare_action_probability + if rarely_abort_writes is not None: + __query["rarely_abort_writes"] = rarely_abort_writes + if read_node_count is not None: + __query["read_node_count"] = read_node_count + if register_operation_count is not None: + __query["register_operation_count"] = register_operation_count + if seed is not None: + __query["seed"] = seed + if timeout is not None: + __query["timeout"] = timeout + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + endpoint_id="snapshot.repository_analyze", + path_parts=__path_parts, + ) + @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def repository_verify_integrity( @@ -684,7 +899,7 @@ def repository_verify_integrity( in future versions. NOTE: This API may not work correctly in a mixed-version cluster. - ``_ + ``_ :param name: A repository name :param blob_thread_pool_concurrency: Number of threads to use for reading blob @@ -794,7 +1009,7 @@ def restore( or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot. - ``_ + ``_ :param repository: A repository name :param snapshot: A snapshot name @@ -898,7 +1113,7 @@ def status( These requests can also tax machine resources and, when using cloud storage, incur high processing costs. - ``_ + ``_ :param repository: A repository name :param snapshot: A comma-separated list of snapshot names @@ -958,7 +1173,7 @@ def verify_repository( Verify a snapshot repository. Check for common misconfigurations in a snapshot repository. - ``_ + ``_ :param name: A repository name :param master_timeout: Explicit operation timeout for connection to master node diff --git a/elasticsearch/_sync/client/sql.py b/elasticsearch/_sync/client/sql.py index dc5f238e8..e8de30d55 100644 --- a/elasticsearch/_sync/client/sql.py +++ b/elasticsearch/_sync/client/sql.py @@ -85,11 +85,14 @@ def delete_async( ) -> ObjectApiResponse[t.Any]: """ Delete an async SQL search. Delete an async SQL search or a stored synchronous - SQL search. If the search is still running, the API cancels it. + SQL search. If the search is still running, the API cancels it. If the Elasticsearch + security features are enabled, only the following users can use this API to delete + a search: * Users with the `cancel_task` cluster privilege. * The user who first + submitted the search. ``_ - :param id: Identifier for the search. + :param id: The identifier for the search. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -132,20 +135,23 @@ def get_async( ) -> ObjectApiResponse[t.Any]: """ Get async SQL search results. Get the current status and available results for - an async SQL search or stored synchronous SQL search. + an async SQL search or stored synchronous SQL search. If the Elasticsearch security + features are enabled, only the user who first submitted the SQL search can retrieve + the search using this API. ``_ - :param id: Identifier for the search. - :param delimiter: Separator for CSV results. The API only supports this parameter - for CSV responses. - :param format: Format for the response. You must specify a format using this - parameter or the Accept HTTP header. If you specify both, the API uses this - parameter. - :param keep_alive: Retention period for the search and its results. Defaults + :param id: The identifier for the search. + :param delimiter: The separator for CSV results. The API supports this parameter + only for CSV responses. + :param format: The format for the response. You must specify a format using this + parameter or the `Accept` HTTP header. If you specify both, the API uses + this parameter. + :param keep_alive: The retention period for the search and its results. It defaults to the `keep_alive` period for the original SQL search. - :param wait_for_completion_timeout: Period to wait for complete results. Defaults - to no timeout, meaning the request waits for complete search results. + :param wait_for_completion_timeout: The period to wait for complete results. + It defaults to no timeout, meaning the request waits for complete search + results. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -194,7 +200,7 @@ def get_async_status( ``_ - :param id: Identifier for the search. + :param id: The identifier for the search. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -221,6 +227,7 @@ def get_async_status( @_rewrite_parameters( body_fields=( + "allow_partial_search_results", "catalog", "columnar", "cursor", @@ -243,6 +250,7 @@ def get_async_status( def query( self, *, + allow_partial_search_results: t.Optional[bool] = None, catalog: t.Optional[str] = None, columnar: t.Optional[bool] = None, cursor: t.Optional[str] = None, @@ -277,36 +285,45 @@ def query( ``_ - :param catalog: Default catalog (cluster) for queries. If unspecified, the queries - execute on the data in the local cluster only. - :param columnar: If true, the results in a columnar fashion: one row represents - all the values of a certain column from the current page of results. - :param cursor: Cursor used to retrieve a set of paginated results. If you specify - a cursor, the API only uses the `columnar` and `time_zone` request body parameters. - It ignores other request body parameters. - :param fetch_size: The maximum number of rows (or entries) to return in one response - :param field_multi_value_leniency: Throw an exception when encountering multiple - values for a field (default) or be lenient and return the first value from - the list (without any guarantees of what that will be - typically the first - in natural ascending order). - :param filter: Elasticsearch query DSL for additional filtering. - :param format: Format for the response. - :param index_using_frozen: If true, the search can run on frozen indices. Defaults - to false. - :param keep_alive: Retention period for an async or saved synchronous search. - :param keep_on_completion: If true, Elasticsearch stores synchronous searches - if you also specify the wait_for_completion_timeout parameter. If false, - Elasticsearch only stores async searches that don’t finish before the wait_for_completion_timeout. - :param page_timeout: The timeout before a pagination request fails. - :param params: Values for parameters in the query. - :param query: SQL query to run. + :param allow_partial_search_results: If `true`, the response has partial results + when there are shard request timeouts or shard failures. If `false`, the + API returns an error with no partial results. + :param catalog: The default catalog (cluster) for queries. If unspecified, the + queries execute on the data in the local cluster only. + :param columnar: If `true`, the results are in a columnar fashion: one row represents + all the values of a certain column from the current page of results. The + API supports this parameter only for CBOR, JSON, SMILE, and YAML responses. + :param cursor: The cursor used to retrieve a set of paginated results. If you + specify a cursor, the API only uses the `columnar` and `time_zone` request + body parameters. It ignores other request body parameters. + :param fetch_size: The maximum number of rows (or entries) to return in one response. + :param field_multi_value_leniency: If `false`, the API returns an exception when + encountering multiple values for a field. If `true`, the API is lenient and + returns the first value from the array with no guarantee of consistent results. + :param filter: The Elasticsearch query DSL for additional filtering. + :param format: The format for the response. You can also specify a format using + the `Accept` HTTP header. If you specify both this parameter and the `Accept` + HTTP header, this parameter takes precedence. + :param index_using_frozen: If `true`, the search can run on frozen indices. + :param keep_alive: The retention period for an async or saved synchronous search. + :param keep_on_completion: If `true`, Elasticsearch stores synchronous searches + if you also specify the `wait_for_completion_timeout` parameter. If `false`, + Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`. + :param page_timeout: The minimum retention period for the scroll cursor. After + this time period, a pagination request might fail because the scroll cursor + is no longer available. Subsequent scroll requests prolong the lifetime of + the scroll cursor by the duration of `page_timeout` in the scroll request. + :param params: The values for parameters in the query. + :param query: The SQL query to run. :param request_timeout: The timeout before the request fails. - :param runtime_mappings: Defines one or more runtime fields in the search request. - These fields take precedence over mapped fields with the same name. - :param time_zone: ISO-8601 time zone ID for the search. - :param wait_for_completion_timeout: Period to wait for complete results. Defaults - to no timeout, meaning the request waits for complete search results. If - the search doesn’t finish within this period, the search becomes async. + :param runtime_mappings: One or more runtime fields for the search request. These + fields take precedence over mapped fields with the same name. + :param time_zone: The ISO-8601 time zone ID for the search. + :param wait_for_completion_timeout: The period to wait for complete results. + It defaults to no timeout, meaning the request waits for complete search + results. If the search doesn't finish within this period, the search becomes + async. To save a synchronous search, you must specify this parameter and + the `keep_on_completion` parameter. """ __path_parts: t.Dict[str, str] = {} __path = "/_sql" @@ -323,6 +340,8 @@ def query( if pretty is not None: __query["pretty"] = pretty if not __body: + if allow_partial_search_results is not None: + __body["allow_partial_search_results"] = allow_partial_search_results if catalog is not None: __body["catalog"] = catalog if columnar is not None: @@ -384,14 +403,15 @@ def translate( ) -> ObjectApiResponse[t.Any]: """ Translate SQL into Elasticsearch queries. Translate an SQL search into a search - API request containing Query DSL. + API request containing Query DSL. It accepts the same request body parameters + as the SQL search API, excluding `cursor`. ``_ - :param query: SQL query to run. + :param query: The SQL query to run. :param fetch_size: The maximum number of rows (or entries) to return in one response. - :param filter: Elasticsearch query DSL for additional filtering. - :param time_zone: ISO-8601 time zone ID for the search. + :param filter: The Elasticsearch query DSL for additional filtering. + :param time_zone: The ISO-8601 time zone ID for the search. """ if query is None and body is None: raise ValueError("Empty value passed for parameter 'query'") diff --git a/elasticsearch/_sync/client/synonyms.py b/elasticsearch/_sync/client/synonyms.py index ccc4a6d89..606a85b04 100644 --- a/elasticsearch/_sync/client/synonyms.py +++ b/elasticsearch/_sync/client/synonyms.py @@ -36,11 +36,25 @@ def delete_synonym( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a synonym set. + Delete a synonym set. You can only delete a synonyms set that is not in use by + any index analyzer. Synonyms sets can be used in synonym graph token filters + and synonym token filters. These synonym filters can be used as part of search + analyzers. Analyzers need to be loaded when an index is restored (such as when + a node starts, or the index becomes open). Even if the analyzer is not used on + any field mapping, it still needs to be loaded on the index recovery phase. If + any analyzers cannot be loaded, the index becomes unavailable and the cluster + status becomes red or yellow as index shards are not available. To prevent that, + synonyms sets that are used in analyzers can't be deleted. A delete request in + this case will return a 400 response code. To remove a synonyms set, you must + first remove all indices that contain analyzers using it. You can migrate an + index by creating a new index that does not contain the token filter with the + synonyms set, and use the reindex API in order to copy over the index data. Once + finished, you can delete the index. When the synonyms set is not used in analyzers, + you will be able to delete it. ``_ - :param id: The id of the synonyms set to be deleted + :param id: The synonyms set identifier to delete. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -81,8 +95,8 @@ def delete_synonym_rule( ``_ - :param set_id: The id of the synonym set to be updated - :param rule_id: The id of the synonym rule to be deleted + :param set_id: The ID of the synonym set to update. + :param rule_id: The ID of the synonym rule to delete. """ if set_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'set_id'") @@ -131,9 +145,9 @@ def get_synonym( ``_ - :param id: "The id of the synonyms set to be retrieved - :param from_: Starting offset for query rules to be retrieved - :param size: specifies a max number of query rules to retrieve + :param id: The synonyms set identifier to retrieve. + :param from_: The starting offset for query rules to retrieve. + :param size: The max number of query rules to retrieve. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -178,8 +192,8 @@ def get_synonym_rule( ``_ - :param set_id: The id of the synonym set to retrieve the synonym rule from - :param rule_id: The id of the synonym rule to retrieve + :param set_id: The ID of the synonym set to retrieve the synonym rule from. + :param rule_id: The ID of the synonym rule to retrieve. """ if set_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'set_id'") @@ -225,10 +239,10 @@ def get_synonyms_sets( """ Get all synonym sets. Get a summary of all defined synonym sets. - ``_ + ``_ - :param from_: Starting offset - :param size: specifies a max number of results to get + :param from_: The starting offset for synonyms sets to retrieve. + :param size: The maximum number of synonyms sets to retrieve. """ __path_parts: t.Dict[str, str] = {} __path = "/_synonyms" @@ -274,12 +288,15 @@ def put_synonym( """ Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create - multiple synonym sets. + multiple synonym sets. When an existing synonyms set is updated, the search analyzers + that use the synonyms set are reloaded automatically for all indices. This is + equivalent to invoking the reload search analyzers API for all indices that use + the synonyms set. ``_ - :param id: The id of the synonyms set to be created or updated - :param synonyms_set: The synonym set information to update + :param id: The ID of the synonyms set to be created or updated. + :param synonyms_set: The synonym rules definitions for the synonyms set. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -328,13 +345,16 @@ def put_synonym_rule( ) -> ObjectApiResponse[t.Any]: """ Create or update a synonym rule. Create or update a synonym rule in a synonym - set. + set. If any of the synonym rules included is invalid, the API returns an error. + When you update a synonym rule, all analyzers using the synonyms set will be + reloaded automatically to reflect the new rule. ``_ - :param set_id: The id of the synonym set to be updated with the synonym rule - :param rule_id: The id of the synonym rule to be updated or created - :param synonyms: + :param set_id: The ID of the synonym set. + :param rule_id: The ID of the synonym rule to be updated or created. + :param synonyms: The synonym rule information definition, which must be in Solr + format. """ if set_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'set_id'") diff --git a/elasticsearch/_sync/client/tasks.py b/elasticsearch/_sync/client/tasks.py index 89f210a88..09d2c6be3 100644 --- a/elasticsearch/_sync/client/tasks.py +++ b/elasticsearch/_sync/client/tasks.py @@ -47,27 +47,30 @@ def cancel( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Cancel a task. A task may continue to run for some time after it has been cancelled - because it may not be able to safely stop its current activity straight away. - It is also possible that Elasticsearch must complete its work on other tasks - before it can process the cancellation. The get task information API will continue - to list these cancelled tasks until they complete. The cancelled flag in the - response indicates that the cancellation command has been processed and the task - will stop as soon as possible. To troubleshoot why a cancelled task does not - complete promptly, use the get task information API with the `?detailed` parameter - to identify the other tasks the system is running. You can also use the node - hot threads API to obtain detailed information about the work the system is doing + Cancel a task. WARNING: The task management API is new and should still be considered + a beta feature. The API may change in ways that are not backwards compatible. + A task may continue to run for some time after it has been cancelled because + it may not be able to safely stop its current activity straight away. It is also + possible that Elasticsearch must complete its work on other tasks before it can + process the cancellation. The get task information API will continue to list + these cancelled tasks until they complete. The cancelled flag in the response + indicates that the cancellation command has been processed and the task will + stop as soon as possible. To troubleshoot why a cancelled task does not complete + promptly, use the get task information API with the `?detailed` parameter to + identify the other tasks the system is running. You can also use the node hot + threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task. ``_ - :param task_id: ID of the task. - :param actions: Comma-separated list or wildcard expression of actions used to - limit the request. - :param nodes: Comma-separated list of node IDs or names used to limit the request. - :param parent_task_id: Parent task ID used to limit the tasks. - :param wait_for_completion: Should the request block until the cancellation of - the task and its descendant tasks is completed. Defaults to false + :param task_id: The task identifier. + :param actions: A comma-separated list or wildcard expression of actions that + is used to limit the request. + :param nodes: A comma-separated list of node IDs or names that is used to limit + the request. + :param parent_task_id: A parent task ID that is used to limit the tasks. + :param wait_for_completion: If true, the request blocks until all found tasks + are complete. """ __path_parts: t.Dict[str, str] if task_id not in SKIP_IN_PATH: @@ -118,12 +121,16 @@ def get( ) -> ObjectApiResponse[t.Any]: """ Get task information. Get information about a task currently running in the cluster. + WARNING: The task management API is new and should still be considered a beta + feature. The API may change in ways that are not backwards compatible. If the + task identifier is not found, a 404 response code indicates that there are no + resources that match the request. ``_ - :param task_id: ID of the task. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. + :param task_id: The task identifier. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. :param wait_for_completion: If `true`, the request blocks until the task has completed. """ @@ -167,7 +174,6 @@ def list( t.Union[str, t.Literal["nodes", "none", "parents"]] ] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, nodes: t.Optional[t.Union[str, t.Sequence[str]]] = None, parent_task_id: t.Optional[str] = None, pretty: t.Optional[bool] = None, @@ -176,25 +182,45 @@ def list( ) -> ObjectApiResponse[t.Any]: """ Get all tasks. Get information about the tasks currently running on one or more - nodes in the cluster. + nodes in the cluster. WARNING: The task management API is new and should still + be considered a beta feature. The API may change in ways that are not backwards + compatible. **Identifying running tasks** The `X-Opaque-Id header`, when provided + on the HTTP request header, is going to be returned as a header in the response + as well as in the headers field for in the task information. This enables you + to track certain calls or associate certain tasks with the client that started + them. For example: ``` curl -i -H "X-Opaque-Id: 123456" "http://localhost:9200/_tasks?group_by=parents" + ``` The API returns the following result: ``` HTTP/1.1 200 OK X-Opaque-Id: 123456 + content-type: application/json; charset=UTF-8 content-length: 831 { "tasks" : + { "u5lcZHqcQhu-rUoFaqDphA:45" : { "node" : "u5lcZHqcQhu-rUoFaqDphA", "id" : 45, + "type" : "transport", "action" : "cluster:monitor/tasks/lists", "start_time_in_millis" + : 1513823752749, "running_time_in_nanos" : 293139, "cancellable" : false, "headers" + : { "X-Opaque-Id" : "123456" }, "children" : [ { "node" : "u5lcZHqcQhu-rUoFaqDphA", + "id" : 46, "type" : "direct", "action" : "cluster:monitor/tasks/lists[n]", "start_time_in_millis" + : 1513823752750, "running_time_in_nanos" : 92133, "cancellable" : false, "parent_task_id" + : "u5lcZHqcQhu-rUoFaqDphA:45", "headers" : { "X-Opaque-Id" : "123456" } } ] } + } } ``` In this example, `X-Opaque-Id: 123456` is the ID as a part of the response + header. The `X-Opaque-Id` in the task `headers` is the ID for the task that was + initiated by the REST request. The `X-Opaque-Id` in the children `headers` is + the child task of the task that was initiated by the REST request. ``_ - :param actions: Comma-separated list or wildcard expression of actions used to - limit the request. + :param actions: A comma-separated list or wildcard expression of actions used + to limit the request. For example, you can use `cluser:*` to retrieve all + cluster-related tasks. :param detailed: If `true`, the response includes detailed information about - shard recoveries. This information is useful to distinguish tasks from each + the running tasks. This information is useful to distinguish tasks from each other but is more costly to run. - :param group_by: Key used to group tasks in the response. - :param master_timeout: Period to wait for a connection to the master node. If - no response is received before the timeout expires, the request fails and - returns an error. - :param nodes: Comma-separated list of node IDs or names used to limit returned - information. - :param parent_task_id: Parent task ID used to limit returned information. To - return all tasks, omit this parameter or use a value of `-1`. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. + :param group_by: A key that is used to group tasks in the response. The task + lists can be grouped either by nodes or by parent tasks. + :param nodes: A comma-separated list of node IDs or names that is used to limit + the returned information. + :param parent_task_id: A parent task identifier that is used to limit returned + information. To return all tasks, omit this parameter or use a value of `-1`. + If the parent task is not found, the API does not return a 404 response code. + :param timeout: The period to wait for each node to respond. If a node does not + respond before its timeout expires, the response does not include its information. + However, timed out nodes are included in the `node_failures` property. :param wait_for_completion: If `true`, the request blocks until the operation is complete. """ @@ -213,8 +239,6 @@ def list( __query["group_by"] = group_by if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if nodes is not None: __query["nodes"] = nodes if parent_task_id is not None: diff --git a/elasticsearch/_sync/client/text_structure.py b/elasticsearch/_sync/client/text_structure.py index 3a480c87b..bdfe65747 100644 --- a/elasticsearch/_sync/client/text_structure.py +++ b/elasticsearch/_sync/client/text_structure.py @@ -54,7 +54,21 @@ def find_field_structure( ) -> ObjectApiResponse[t.Any]: """ Find the structure of a text field. Find the structure of a text field in an - Elasticsearch index. + Elasticsearch index. This API provides a starting point for extracting further + information from log messages already ingested into Elasticsearch. For example, + if you have ingested data into a very simple index that has just `@timestamp` + and message fields, you can use this API to see what common structure exists + in the message field. The response from the API contains: * Sample messages. + * Statistics that reveal the most common values for all fields detected within + the text and basic numeric statistics for numeric fields. * Information about + the structure of the text, which is useful when you write ingest configurations + to index it or similarly formatted text. * Appropriate mappings for an Elasticsearch + index, which you could use to ingest the text. All this information can be calculated + by the structure finder with no guidance. However, you can optionally override + some of the decisions about the text structure by specifying one or more query + parameters. If the structure finder produces unexpected results, specify the + `explain` query parameter and an explanation will appear in the response. It + helps determine why the returned structure was chosen. ``_ @@ -84,9 +98,9 @@ def find_field_structure( `field1`, and `field2` are used in the `grok_pattern` output. The intention in that situation is that a user who knows the meanings will rename the fields before using them. - :param explain: If true, the response includes a field named `explanation`, which - is an array of strings that indicate how the structure finder produced its - result. + :param explain: If `true`, the response includes a field named `explanation`, + which is an array of strings that indicate how the structure finder produced + its result. :param format: The high level structure of the text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to @@ -107,7 +121,7 @@ def find_field_structure( :param should_trim_fields: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value - is true. Otherwise, the default value is false. + is true. Otherwise, the default value is `false`. :param timeout: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. :param timestamp_field: The name of the field that contains the primary timestamp @@ -236,7 +250,10 @@ def find_message_structure( Appropriate mappings for an Elasticsearch index, which you could use to ingest the text. All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about - the text structure by specifying one or more query parameters. + the text structure by specifying one or more query parameters. If the structure + finder produces unexpected results, specify the `explain` query parameter and + an explanation will appear in the response. It helps determine why the returned + structure was chosen. ``_ @@ -284,7 +301,7 @@ def find_message_structure( :param should_trim_fields: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value - is true. Otherwise, the default value is false. + is true. Otherwise, the default value is `false`. :param timeout: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. :param timestamp_field: The name of the field that contains the primary timestamp @@ -413,43 +430,51 @@ def find_structure( ``_ :param text_files: - :param charset: The text’s character set. It must be a character set that is - supported by the JVM that Elasticsearch uses. For example, UTF-8, UTF-16LE, - windows-1252, or EUC-JP. If this parameter is not specified, the structure + :param charset: The text's character set. It must be a character set that is + supported by the JVM that Elasticsearch uses. For example, `UTF-8`, `UTF-16LE`, + `windows-1252`, or `EUC-JP`. If this parameter is not specified, the structure finder chooses an appropriate character set. - :param column_names: If you have set format to delimited, you can specify the + :param column_names: If you have set format to `delimited`, you can specify the column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example. - :param delimiter: If you have set format to delimited, you can specify the character - used to delimit the values in each row. Only a single character is supported; - the delimiter cannot have multiple characters. By default, the API considers - the following possibilities: comma, tab, semi-colon, and pipe (|). In this - default scenario, all rows must have the same number of fields for the delimited - format to be detected. If you specify a delimiter, up to 10% of the rows - can have a different number of columns than the first row. - :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns - (disabled or v1, default: disabled). - :param explain: If this parameter is set to true, the response includes a field + :param delimiter: If you have set `format` to `delimited`, you can specify the + character used to delimit the values in each row. Only a single character + is supported; the delimiter cannot have multiple characters. By default, + the API considers the following possibilities: comma, tab, semi-colon, and + pipe (`|`). In this default scenario, all rows must have the same number + of fields for the delimited format to be detected. If you specify a delimiter, + up to 10% of the rows can have a different number of columns than the first + row. + :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns. + Use this parameter to specify whether to use ECS Grok patterns instead of + legacy ones when the structure finder creates a Grok pattern. Valid values + are `disabled` and `v1`. This setting primarily has an impact when a whole + message Grok pattern such as `%{CATALINALOG}` matches the input. If the structure + finder identifies a common structure but has no idea of meaning then generic + field names such as `path`, `ipaddress`, `field1`, and `field2` are used + in the `grok_pattern` output, with the intention that a user who knows the + meanings rename these fields before using it. + :param explain: If this parameter is set to `true`, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. - :param format: The high level structure of the text. Valid values are ndjson, - xml, delimited, and semi_structured_text. By default, the API chooses the - format. In this default scenario, all rows must have the same number of fields - for a delimited format to be detected. If the format is set to delimited - and the delimiter is not set, however, the API tolerates up to 5% of rows - that have a different number of columns than the first row. - :param grok_pattern: If you have set format to semi_structured_text, you can - specify a Grok pattern that is used to extract fields from every message + :param format: The high level structure of the text. Valid values are `ndjson`, + `xml`, `delimited`, and `semi_structured_text`. By default, the API chooses + the format. In this default scenario, all rows must have the same number + of fields for a delimited format to be detected. If the format is set to + `delimited` and the delimiter is not set, however, the API tolerates up to + 5% of rows that have a different number of columns than the first row. + :param grok_pattern: If you have set `format` to `semi_structured_text`, you + can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match - what is specified in the timestamp_field parameter. If that parameter is + what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match - "timestamp". If grok_pattern is not specified, the structure finder creates + "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. - :param has_header_row: If you have set format to delimited, you can use this + :param has_header_row: If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. @@ -459,26 +484,58 @@ def find_structure( that this may lead to very long processing times if the way to group lines into messages is misdetected. :param lines_to_sample: The number of lines to include in the structural analysis, - starting from the beginning of the text. The minimum is 2; If the value of + starting from the beginning of the text. The minimum is 2. If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of - the lines. - :param quote: If you have set format to delimited, you can specify the character + the lines. NOTE: The number of lines and the variation of the lines affects + the speed of the analysis. For example, if you upload text where the first + 1000 lines are all variations on the same message, the analysis will find + more commonality than would be seen with a bigger sample. If possible, however, + it is more efficient to upload sample text with more variety in the first + 1000 lines than to request analysis of 100000 lines to achieve some variety. + :param quote: If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not - specified, the default value is a double quote ("). If your delimited text + specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. - :param should_trim_fields: If you have set format to delimited, you can specify + :param should_trim_fields: If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. - If this parameter is not specified and the delimiter is pipe (|), the default - value is true. Otherwise, the default value is false. - :param timeout: Sets the maximum amount of time that the structure analysis can - take. If the analysis is still running when the timeout expires then it will - be stopped. - :param timestamp_field: Optional parameter to specify the timestamp field in - the file + If this parameter is not specified and the delimiter is pipe (`|`), the default + value is `true`. Otherwise, the default value is `false`. + :param timeout: The maximum amount of time that the structure analysis can take. + If the analysis is still running when the timeout expires then it will be + stopped. + :param timestamp_field: The name of the field that contains the primary timestamp + of each record in the text. In particular, if the text were ingested into + an index, this is the field that would be used to populate the `@timestamp` + field. If the `format` is `semi_structured_text`, this field must match the + name of the appropriate extraction in the `grok_pattern`. Therefore, for + semi-structured text, it is best not to specify this parameter unless `grok_pattern` + is also specified. For structured text, if you specify this parameter, the + field must exist within the text. If this parameter is not specified, the + structure finder makes a decision about which field (if any) is the primary + timestamp field. For structured text, it is not compulsory to have a timestamp + in the text. :param timestamp_format: The Java time format of the timestamp field in the text. + Only a subset of Java time format letter groups are supported: * `a` * `d` + * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` + * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter + groups (fractional seconds) of length one to nine are supported providing + they occur after `ss` and separated from the `ss` by a `.`, `,` or `:`. Spacing + and punctuation is also permitted with the exception of `?`, newline and + carriage return, together with literal text enclosed in single quotes. For + example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. One + valuable use case for this parameter is when the format is semi-structured + text, there are multiple timestamp formats in the text, and you know which + format corresponds to the primary timestamp, but you do not want to specify + the full `grok_pattern`. Another is when the timestamp format is one that + the structure finder does not consider by default. If this parameter is not + specified, the structure finder chooses the best format from a built-in set. + If the special value `null` is specified the structure finder will not look + for a primary timestamp in the text. When the format is semi-structured text + this will result in the structure finder treating the text as single-line + messages. """ if text_files is None and body is None: raise ValueError( @@ -556,10 +613,12 @@ def test_grok_pattern( ``_ - :param grok_pattern: Grok pattern to run on the text. - :param text: Lines of text to run the Grok pattern on. - :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns - (disabled or v1, default: disabled). + :param grok_pattern: The Grok pattern to run on the text. + :param text: The lines of text to run the Grok pattern on. + :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns. + Use this parameter to specify whether to use ECS Grok patterns instead of + legacy ones when the structure finder creates a Grok pattern. Valid values + are `disabled` and `v1`. """ if grok_pattern is None and body is None: raise ValueError("Empty value passed for parameter 'grok_pattern'") diff --git a/elasticsearch/_sync/client/transform.py b/elasticsearch/_sync/client/transform.py index 062e7ae3b..2faf60167 100644 --- a/elasticsearch/_sync/client/transform.py +++ b/elasticsearch/_sync/client/transform.py @@ -489,6 +489,7 @@ def reset_transform( force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ Reset a transform. Resets a transform. Before you can reset it, you must stop @@ -503,6 +504,8 @@ def reset_transform( :param force: If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform must be stopped before it can be reset. + :param timeout: Period to wait for a response. If no response is received before + the timeout expires, the request fails and returns an error. """ if transform_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'transform_id'") @@ -519,6 +522,8 @@ def reset_transform( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", diff --git a/elasticsearch/_sync/client/watcher.py b/elasticsearch/_sync/client/watcher.py index 010fc26d8..cc84cf9e4 100644 --- a/elasticsearch/_sync/client/watcher.py +++ b/elasticsearch/_sync/client/watcher.py @@ -42,11 +42,15 @@ def ack_watch( in the `status.actions..ack.state` structure. IMPORTANT: If the specified watch is currently being executed, this API will return an error The reason for this behavior is to prevent overwriting the watch status from a watch execution. + Acknowledging an action throttles further executions of that action until its + `ack.state` is reset to `awaits_successful_execution`. This happens when the + condition of the watch is not met (the condition evaluates to false). ``_ - :param watch_id: Watch ID - :param action_id: A comma-separated list of the action ids to be acked + :param watch_id: The watch identifier. + :param action_id: A comma-separated list of the action identifiers to acknowledge. + If you omit this parameter, all of the actions of the watch are acknowledged. """ if watch_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'watch_id'") @@ -96,7 +100,7 @@ def activate_watch( ``_ - :param watch_id: Watch ID + :param watch_id: The watch identifier. """ if watch_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'watch_id'") @@ -136,7 +140,7 @@ def deactivate_watch( ``_ - :param watch_id: Watch ID + :param watch_id: The watch identifier. """ if watch_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'watch_id'") @@ -182,7 +186,7 @@ def delete_watch( ``_ - :param id: Watch ID + :param id: The watch identifier. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -255,11 +259,17 @@ def execute_watch( and control whether a watch record would be written to the watch history after it runs. You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. This serves as great tool for testing - and debugging your watches prior to adding them to Watcher. + and debugging your watches prior to adding them to Watcher. When Elasticsearch + security features are enabled on your cluster, watches are run with the privileges + of the user that stored the watches. If your user is allowed to read index `a`, + but not index `b`, then the exact same set of rules will apply during execution + of a watch. When using the run watch API, the authorization data of the user + that called the API will be used as a base, instead of the information who stored + the watch. ``_ - :param id: Identifier for the watch. + :param id: The watch identifier. :param action_modes: Determines how to handle the watch actions as part of the watch execution. :param alternative_input: When present, the watch uses this object as a payload @@ -270,12 +280,12 @@ def execute_watch( :param record_execution: When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. In addition, the status of the watch is updated, possibly throttling - subsequent executions. This can also be specified as an HTTP parameter. + subsequent runs. This can also be specified as an HTTP parameter. :param simulated_actions: :param trigger_data: This structure is parsed as the data of the trigger event - that will be used during the watch execution + that will be used during the watch execution. :param watch: When present, this watch is used instead of the one specified in - the request. This watch is not persisted to the index and record_execution + the request. This watch is not persisted to the index and `record_execution` cannot be set. """ __path_parts: t.Dict[str, str] @@ -327,6 +337,50 @@ def execute_watch( path_parts=__path_parts, ) + @_rewrite_parameters() + def get_settings( + self, + *, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Get Watcher index settings. Get settings for the Watcher internal index (`.watches`). + Only a subset of settings are shown, for example `index.auto_expand_replicas` + and `index.number_of_replicas`. + + ``_ + + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_watcher/settings" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="watcher.get_settings", + path_parts=__path_parts, + ) + @_rewrite_parameters() def get_watch( self, @@ -342,7 +396,7 @@ def get_watch( ``_ - :param id: Watch ID + :param id: The watch identifier. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -374,6 +428,7 @@ def get_watch( "input", "metadata", "throttle_period", + "throttle_period_in_millis", "transform", "trigger", ), @@ -393,7 +448,8 @@ def put_watch( input: t.Optional[t.Mapping[str, t.Any]] = None, metadata: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, - throttle_period: t.Optional[str] = None, + throttle_period: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + throttle_period_in_millis: t.Optional[t.Any] = None, transform: t.Optional[t.Mapping[str, t.Any]] = None, trigger: t.Optional[t.Mapping[str, t.Any]] = None, version: t.Optional[int] = None, @@ -414,19 +470,28 @@ def put_watch( ``_ - :param id: Watch ID - :param actions: - :param active: Specify whether the watch is in/active by default - :param condition: + :param id: The identifier for the watch. + :param actions: The list of actions that will be run if the condition matches. + :param active: The initial state of the watch. The default value is `true`, which + means the watch is active by default. + :param condition: The condition that defines if the actions should be run. :param if_primary_term: only update the watch if the last operation that has changed the watch has the specified primary term :param if_seq_no: only update the watch if the last operation that has changed the watch has the specified sequence number - :param input: - :param metadata: - :param throttle_period: - :param transform: - :param trigger: + :param input: The input that defines the input that loads the data for the watch. + :param metadata: Metadata JSON that will be copied into the history entries. + :param throttle_period: The minimum time between actions being run. The default + is 5 seconds. This default can be changed in the config file with the setting + `xpack.watcher.throttle.period.default_period`. If both this value and the + `throttle_period_in_millis` parameter are specified, Watcher uses the last + parameter included in the request. + :param throttle_period_in_millis: Minimum time in milliseconds between actions + being run. Defaults to 5000. If both this value and the throttle_period parameter + are specified, Watcher uses the last parameter included in the request. + :param transform: The transform that processes the watch payload to prepare it + for the watch actions. + :param trigger: The trigger that defines when the watch should run. :param version: Explicit version number for concurrency control """ if id in SKIP_IN_PATH: @@ -462,6 +527,8 @@ def put_watch( __body["metadata"] = metadata if throttle_period is not None: __body["throttle_period"] = throttle_period + if throttle_period_in_millis is not None: + __body["throttle_period_in_millis"] = throttle_period_in_millis if transform is not None: __body["transform"] = transform if trigger is not None: @@ -508,16 +575,17 @@ def query_watches( ) -> ObjectApiResponse[t.Any]: """ Query watches. Get all registered watches in a paginated manner and optionally - filter watches by a query. + filter watches by a query. Note that only the `_id` and `metadata.*` fields are + queryable or sortable. ``_ - :param from_: The offset from the first result to fetch. Needs to be non-negative. - :param query: Optional, query filter watches to be returned. - :param search_after: Optional search After to do pagination using last hit’s - sort values. - :param size: The number of hits to return. Needs to be non-negative. - :param sort: Optional sort definition. + :param from_: The offset from the first result to fetch. It must be non-negative. + :param query: A query that filters the watches to be returned. + :param search_after: Retrieve the next page of hits using a set of sort values + from the previous page. + :param size: The number of hits to return. It must be non-negative. + :param sort: One or more fields used to sort the search results. """ __path_parts: t.Dict[str, str] = {} __path = "/_watcher/_query/watches" @@ -575,12 +643,15 @@ def start( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ Start the watch service. Start the Watcher service if it is not already running. ``_ + + :param master_timeout: Period to wait for a connection to the master node. """ __path_parts: t.Dict[str, str] = {} __path = "/_watcher/_start" @@ -591,6 +662,8 @@ def start( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -635,7 +708,8 @@ def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get Watcher statistics. + Get Watcher statistics. This API always returns basic metrics. You retrieve more + metrics by using the metric parameter. ``_ @@ -678,12 +752,17 @@ def stop( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ Stop the watch service. Stop the Watcher service if it is running. ``_ + + :param master_timeout: The period to wait for the master node. If the master + node is not available before the timeout expires, the request fails and returns + an error. To indicate that the request should never timeout, set it to `-1`. """ __path_parts: t.Dict[str, str] = {} __path = "/_watcher/_stop" @@ -694,6 +773,8 @@ def stop( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -705,3 +786,70 @@ def stop( endpoint_id="watcher.stop", path_parts=__path_parts, ) + + @_rewrite_parameters( + body_fields=("index_auto_expand_replicas", "index_number_of_replicas"), + parameter_aliases={ + "index.auto_expand_replicas": "index_auto_expand_replicas", + "index.number_of_replicas": "index_number_of_replicas", + }, + ) + def update_settings( + self, + *, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + index_auto_expand_replicas: t.Optional[str] = None, + index_number_of_replicas: t.Optional[int] = None, + master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + Update Watcher index settings. Update settings for the Watcher internal index + (`.watches`). Only a subset of settings can be modified. This includes `index.auto_expand_replicas` + and `index.number_of_replicas`. + + ``_ + + :param index_auto_expand_replicas: + :param index_number_of_replicas: + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. + """ + __path_parts: t.Dict[str, str] = {} + __path = "/_watcher/settings" + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if master_timeout is not None: + __query["master_timeout"] = master_timeout + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + if not __body: + if index_auto_expand_replicas is not None: + __body["index.auto_expand_replicas"] = index_auto_expand_replicas + if index_number_of_replicas is not None: + __body["index.number_of_replicas"] = index_number_of_replicas + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="watcher.update_settings", + path_parts=__path_parts, + ) diff --git a/elasticsearch/_sync/client/xpack.py b/elasticsearch/_sync/client/xpack.py index bb81d554a..50a085f57 100644 --- a/elasticsearch/_sync/client/xpack.py +++ b/elasticsearch/_sync/client/xpack.py @@ -96,9 +96,10 @@ def usage( ``_ - :param master_timeout: Period to wait for a connection to the master node. If - no response is received before the timeout expires, the request fails and - returns an error. + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. To indicate that the request should never timeout, + set it to `-1`. """ __path_parts: t.Dict[str, str] = {} __path = "/_xpack/usage" From b139828171a3efe2087261370e384116fa48a7b3 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 12:19:12 +0400 Subject: [PATCH 21/65] Fix simulate client docs (#2749) (#2752) --- .readthedocs.yml | 1 + docs/sphinx/api/simulate.rst | 2 +- elasticsearch/client.py | 2 ++ noxfile.py | 4 +++- 4 files changed, 7 insertions(+), 2 deletions(-) diff --git a/.readthedocs.yml b/.readthedocs.yml index 0bb2cebab..bcef38ebe 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -17,3 +17,4 @@ python: sphinx: configuration: docs/sphinx/conf.py + fail_on_warning: true diff --git a/docs/sphinx/api/simulate.rst b/docs/sphinx/api/simulate.rst index eabd3fc07..64607bbac 100644 --- a/docs/sphinx/api/simulate.rst +++ b/docs/sphinx/api/simulate.rst @@ -1,4 +1,4 @@ -.. _snapshot-lifecycle-management: +.. _simulate: Simulate -------- diff --git a/elasticsearch/client.py b/elasticsearch/client.py index 9ea6f3667..af25c5ae1 100644 --- a/elasticsearch/client.py +++ b/elasticsearch/client.py @@ -57,6 +57,7 @@ ) from ._sync.client.security import SecurityClient as SecurityClient # noqa: F401 from ._sync.client.shutdown import ShutdownClient as ShutdownClient # noqa: F401 +from ._sync.client.simulate import SimulateClient as SimulateClient # noqa: F401 from ._sync.client.slm import SlmClient as SlmClient # noqa: F401 from ._sync.client.snapshot import SnapshotClient as SnapshotClient # noqa: F401 from ._sync.client.sql import SqlClient as SqlClient # noqa: F401 @@ -107,6 +108,7 @@ "SearchableSnapshotsClient", "SecurityClient", "ShutdownClient", + "SimulateClient", "SlmClient", "SnapshotClient", "SqlClient", diff --git a/noxfile.py b/noxfile.py index b42ed0d2f..8242e1ce0 100644 --- a/noxfile.py +++ b/noxfile.py @@ -127,4 +127,6 @@ def lint(session): @nox.session() def docs(session): session.install(".[docs]") - session.run("sphinx-build", "docs/sphinx/", "docs/sphinx/_build", "-b", "html") + session.run( + "sphinx-build", "-W", "docs/sphinx/", "docs/sphinx/_build", "-b", "html" + ) From 07ec64530c785989a4032c69b063d4a1d4ea9ebc Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 16:59:21 +0400 Subject: [PATCH 22/65] Fix formatting of Markdown code blocks (#2750) (#2754) * Fix formatting of Markdown code blocks * Fix lint (cherry picked from commit 3599f01a189ddd1c3de709e2b35accce9650e6d4) Co-authored-by: Quentin Pradet --- docs/sphinx/_static/css/custom.css | 16 ++++++++++++++++ docs/sphinx/conf.py | 2 ++ 2 files changed, 18 insertions(+) create mode 100644 docs/sphinx/_static/css/custom.css diff --git a/docs/sphinx/_static/css/custom.css b/docs/sphinx/_static/css/custom.css new file mode 100644 index 000000000..e6ffd086e --- /dev/null +++ b/docs/sphinx/_static/css/custom.css @@ -0,0 +1,16 @@ +/* Display GitHub Flavored Markdown code blocks correctly */ + +.rst-content pre { + background-color: #f5f5f5; + border-radius: 6px; + padding: 16px; + margin: 16px 0; + overflow-x: auto; +} + +.rst-content pre code { + background-color: #f5f5f5; + white-space: pre; + border: none; + padding: 0; +} diff --git a/docs/sphinx/conf.py b/docs/sphinx/conf.py index d7c3f7751..7104660b5 100644 --- a/docs/sphinx/conf.py +++ b/docs/sphinx/conf.py @@ -44,6 +44,8 @@ pygments_style = "sphinx" html_theme = "sphinx_rtd_theme" +html_static_path = ["_static"] +html_css_files = ["css/custom.css"] intersphinx_mapping = { "python": ("https://docs.python.org/3", None), From e995008bfbe5607c96a2785285e079b7cf9aab02 Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Tue, 21 Jan 2025 17:53:58 +0400 Subject: [PATCH 23/65] Auto-generated code for 8.x (#2759) --- elasticsearch/_async/client/__init__.py | 955 +++++++++++++----- elasticsearch/_async/client/async_search.py | 8 +- elasticsearch/_async/client/autoscaling.py | 8 +- elasticsearch/_async/client/cat.py | 294 +++--- elasticsearch/_async/client/ccr.py | 26 +- elasticsearch/_async/client/cluster.py | 32 +- elasticsearch/_async/client/connector.py | 60 +- .../_async/client/dangling_indices.py | 6 +- elasticsearch/_async/client/enrich.py | 10 +- elasticsearch/_async/client/eql.py | 8 +- elasticsearch/_async/client/esql.py | 8 +- elasticsearch/_async/client/features.py | 4 +- elasticsearch/_async/client/fleet.py | 2 +- elasticsearch/_async/client/graph.py | 2 +- elasticsearch/_async/client/ilm.py | 22 +- elasticsearch/_async/client/indices.py | 120 +-- elasticsearch/_async/client/inference.py | 10 +- elasticsearch/_async/client/ingest.py | 24 +- elasticsearch/_async/client/license.py | 14 +- elasticsearch/_async/client/logstash.py | 6 +- elasticsearch/_async/client/migration.py | 6 +- elasticsearch/_async/client/ml.py | 146 +-- elasticsearch/_async/client/monitoring.py | 2 +- elasticsearch/_async/client/nodes.py | 14 +- elasticsearch/_async/client/query_rules.py | 16 +- elasticsearch/_async/client/rollup.py | 16 +- .../_async/client/search_application.py | 20 +- .../_async/client/searchable_snapshots.py | 8 +- elasticsearch/_async/client/security.py | 731 +++++++++----- elasticsearch/_async/client/shutdown.py | 6 +- elasticsearch/_async/client/simulate.py | 2 +- elasticsearch/_async/client/slm.py | 18 +- elasticsearch/_async/client/snapshot.py | 26 +- elasticsearch/_async/client/sql.py | 12 +- elasticsearch/_async/client/ssl.py | 2 +- elasticsearch/_async/client/synonyms.py | 14 +- elasticsearch/_async/client/tasks.py | 6 +- elasticsearch/_async/client/text_structure.py | 8 +- elasticsearch/_async/client/transform.py | 22 +- elasticsearch/_async/client/watcher.py | 26 +- elasticsearch/_async/client/xpack.py | 4 +- elasticsearch/_sync/client/__init__.py | 955 +++++++++++++----- elasticsearch/_sync/client/async_search.py | 8 +- elasticsearch/_sync/client/autoscaling.py | 8 +- elasticsearch/_sync/client/cat.py | 294 +++--- elasticsearch/_sync/client/ccr.py | 26 +- elasticsearch/_sync/client/cluster.py | 32 +- elasticsearch/_sync/client/connector.py | 60 +- .../_sync/client/dangling_indices.py | 6 +- elasticsearch/_sync/client/enrich.py | 10 +- elasticsearch/_sync/client/eql.py | 8 +- elasticsearch/_sync/client/esql.py | 8 +- elasticsearch/_sync/client/features.py | 4 +- elasticsearch/_sync/client/fleet.py | 2 +- elasticsearch/_sync/client/graph.py | 2 +- elasticsearch/_sync/client/ilm.py | 22 +- elasticsearch/_sync/client/indices.py | 120 +-- elasticsearch/_sync/client/inference.py | 10 +- elasticsearch/_sync/client/ingest.py | 24 +- elasticsearch/_sync/client/license.py | 14 +- elasticsearch/_sync/client/logstash.py | 6 +- elasticsearch/_sync/client/migration.py | 6 +- elasticsearch/_sync/client/ml.py | 146 +-- elasticsearch/_sync/client/monitoring.py | 2 +- elasticsearch/_sync/client/nodes.py | 14 +- elasticsearch/_sync/client/query_rules.py | 16 +- elasticsearch/_sync/client/rollup.py | 16 +- .../_sync/client/search_application.py | 20 +- .../_sync/client/searchable_snapshots.py | 8 +- elasticsearch/_sync/client/security.py | 731 +++++++++----- elasticsearch/_sync/client/shutdown.py | 6 +- elasticsearch/_sync/client/simulate.py | 2 +- elasticsearch/_sync/client/slm.py | 18 +- elasticsearch/_sync/client/snapshot.py | 26 +- elasticsearch/_sync/client/sql.py | 12 +- elasticsearch/_sync/client/ssl.py | 2 +- elasticsearch/_sync/client/synonyms.py | 14 +- elasticsearch/_sync/client/tasks.py | 6 +- elasticsearch/_sync/client/text_structure.py | 8 +- elasticsearch/_sync/client/transform.py | 22 +- elasticsearch/_sync/client/watcher.py | 26 +- elasticsearch/_sync/client/xpack.py | 4 +- 82 files changed, 3412 insertions(+), 2036 deletions(-) diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index fa2481973..7920715f4 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -724,7 +724,7 @@ async def bulk( only wait for those three shards to refresh. The other two shards that make up the index do not participate in the `_bulk` request at all. - ``_ + ``_ :param operations: :param index: The name of the data stream, index, or index alias to perform bulk @@ -842,7 +842,7 @@ async def clear_scroll( Clear a scrolling search. Clear the search context and results for a scrolling search. - ``_ + ``_ :param scroll_id: The scroll IDs to clear. To clear all scroll IDs, use `_all`. """ @@ -896,7 +896,7 @@ async def close_point_in_time( period has elapsed. However, keeping points in time has a cost; close them as soon as they are no longer required for search requests. - ``_ + ``_ :param id: The ID of the point-in-time. """ @@ -977,7 +977,7 @@ async def count( a replica is chosen and the search is run against it. This means that replicas increase the scalability of the count. - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, @@ -1117,38 +1117,119 @@ async def create( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Index a document. Adds a JSON document to the specified data stream or index - and makes it searchable. If the target is an index and the document already exists, - the request updates the document and increments its version. - - ``_ - - :param index: Name of the data stream or index to target. If the target doesn’t + Create a new document in the index. You can index a new JSON document with the + `//_doc/` or `//_create/<_id>` APIs Using `_create` guarantees + that the document is indexed only if it does not already exist. It returns a + 409 response when a document with a same ID already exists in the index. To update + an existing document, you must use the `//_doc/` API. If the Elasticsearch + security features are enabled, you must have the following index privileges for + the target data stream, index, or index alias: * To add a document using the + `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, + you must have the `create_doc`, `create`, `index`, or `write` index privilege. + * To automatically create a data stream or index with this API request, you must + have the `auto_configure`, `create_index`, or `manage` index privilege. Automatic + data stream creation requires a matching index template with data stream enabled. + **Automatically create data streams and indices** If the request's target doesn't + exist and matches an index template with a `data_stream` definition, the index + operation automatically creates the data stream. If the target doesn't exist + and doesn't match a data stream template, the operation automatically creates + the index and applies any matching index templates. NOTE: Elasticsearch includes + several built-in index templates. To avoid naming collisions with these templates, + refer to index pattern documentation. If no mapping exists, the index operation + creates a dynamic mapping. By default, new fields and objects are automatically + added to the mapping if needed. Automatic index creation is controlled by the + `action.auto_create_index` setting. If it is `true`, any index can be created + automatically. You can modify this setting to explicitly allow or block automatic + creation of indices that match specified patterns or set it to `false` to turn + off automatic index creation entirely. Specify a comma-separated list of patterns + you want to allow or prefix each pattern with `+` or `-` to indicate whether + it should be allowed or blocked. When a list is specified, the default behaviour + is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic + creation of indices only. It does not affect the creation of data streams. **Routing** + By default, shard placement — or routing — is controlled by using a hash of the + document's ID value. For more explicit control, the value fed into the hash function + used by the router can be directly specified on a per-operation basis using the + `routing` parameter. When setting up explicit mapping, you can also use the `_routing` + field to direct the index operation to extract the routing value from the document + itself. This does come at the (very minimal) cost of an additional document parsing + pass. If the `_routing` mapping is defined and set to be required, the index + operation will fail if no routing value is provided or extracted. NOTE: Data + streams do not support custom routing unless they were created with the `allow_custom_routing` + setting enabled in the template. **Distributed** The index operation is directed + to the primary shard based on its route and performed on the actual node containing + this shard. After the primary shard completes the operation, if needed, the update + is distributed to applicable replicas. **Active shards** To improve the resiliency + of writes to the system, indexing operations can be configured to wait for a + certain number of active shard copies before proceeding with the operation. If + the requisite number of active shard copies are not available, then the write + operation must wait and retry, until either the requisite shard copies have started + or a timeout occurs. By default, write operations only wait for the primary shards + to be active before proceeding (that is to say `wait_for_active_shards` is `1`). + This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. + To alter this behavior per operation, use the `wait_for_active_shards request` + parameter. Valid values are all or any positive integer up to the total number + of configured copies per shard in the index (which is `number_of_replicas`+1). + Specifying a negative value or a number greater than the number of shard copies + will throw an error. For example, suppose you have a cluster of three nodes, + A, B, and C and you create an index index with the number of replicas set to + 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt + an indexing operation, by default the operation will only ensure the primary + copy of each shard is available before proceeding. This means that even if B + and C went down and A hosted the primary shard copies, the indexing operation + would still proceed with only one copy of the data. If `wait_for_active_shards` + is set on the request to `3` (and all three nodes are up), the indexing operation + will require 3 active shard copies before proceeding. This requirement should + be met because there are 3 active nodes in the cluster, each one holding a copy + of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, + which is the same in this situation), the indexing operation will not proceed + as you do not have all 4 copies of each shard active in the index. The operation + will timeout unless a new node is brought up in the cluster to host the fourth + copy of the shard. It is important to note that this setting greatly reduces + the chances of the write operation not writing to the requisite number of shard + copies, but it does not completely eliminate the possibility, because this check + occurs before the write operation starts. After the write operation is underway, + it is still possible for replication to fail on any number of shard copies but + still succeed on the primary. The `_shards` section of the API response reveals + the number of shard copies on which replication succeeded and failed. + + ``_ + + :param index: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If - the target doesn’t exist and doesn’t match a data stream template, this request + the target doesn't exist and doesn’t match a data stream template, this request creates the index. - :param id: Unique identifier for the document. + :param id: A unique identifier for the document. To automatically generate a + document ID, use the `POST //_doc/` request format. :param document: - :param pipeline: ID of the pipeline to use to preprocess incoming documents. - If the index has a default ingest pipeline specified, then setting the value - to `_none` disables the default ingest pipeline for this request. If a final - pipeline is configured it will always run, regardless of the value of this + :param pipeline: The ID of the pipeline to use to preprocess incoming documents. + If the index has a default ingest pipeline specified, setting the value to + `_none` turns off the default ingest pipeline for this request. If a final + pipeline is configured, it will always run regardless of the value of this parameter. :param refresh: If `true`, Elasticsearch refreshes the affected shards to make - this operation visible to search, if `wait_for` then wait for a refresh to - make this operation visible to search, if `false` do nothing with refreshes. - Valid values: `true`, `false`, `wait_for`. - :param routing: Custom value used to route operations to a specific shard. - :param timeout: Period the request waits for the following operations: automatic - index creation, dynamic mapping updates, waiting for active shards. - :param version: Explicit version number for concurrency control. The specified - version must match the current version of the document for the request to - succeed. - :param version_type: Specific version type: `external`, `external_gte`. + this operation visible to search. If `wait_for`, it waits for a refresh to + make this operation visible to search. If `false`, it does nothing with refreshes. + :param routing: A custom value that is used to route operations to a specific + shard. + :param timeout: The period the request waits for the following operations: automatic + index creation, dynamic mapping updates, waiting for active shards. Elasticsearch + waits for at least the specified timeout period before failing. The actual + wait time could be longer, particularly when multiple waits occur. This parameter + is useful for situations where the primary shard assigned to perform the + operation might not be available when the operation runs. Some reasons for + this might be that the primary shard is currently recovering from a gateway + or undergoing relocation. By default, the operation will wait on the primary + shard to become available for at least 1 minute before failing and responding + with an error. The actual wait time could be longer, particularly when multiple + waits occur. + :param version: The explicit version number for concurrency control. It must + be a non-negative long number. + :param version_type: The version type. :param wait_for_active_shards: The number of shard copies that must be active - before proceeding with the operation. Set to `all` or any positive integer - up to the total number of shards in the index (`number_of_replicas+1`). + before proceeding with the operation. You can set it to `all` or any positive + integer up to the total number of shards in the index (`number_of_replicas+1`). + The default value of `1` means it waits for each primary shard to be active. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -1223,29 +1304,57 @@ async def delete( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a document. Removes a JSON document from the specified index. - - ``_ - - :param index: Name of the target index. - :param id: Unique identifier for the document. + Delete a document. Remove a JSON document from the specified index. NOTE: You + cannot send deletion requests directly to a data stream. To delete a document + in a data stream, you must target the backing index containing the document. + **Optimistic concurrency control** Delete operations can be made conditional + and only be performed if the last modification to the document was assigned the + sequence number and primary term specified by the `if_seq_no` and `if_primary_term` + parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` + and a status code of `409`. **Versioning** Each document indexed is versioned. + When deleting a document, the version can be specified to make sure the relevant + document you are trying to delete is actually being deleted and it has not changed + in the meantime. Every write operation run on a document, deletes included, causes + its version to be incremented. The version number of a deleted document remains + available for a short time after deletion to allow for control of concurrent + operations. The length of time for which a deleted document's version remains + available is determined by the `index.gc_deletes` index setting. **Routing** + If routing is used during indexing, the routing value also needs to be specified + to delete a document. If the `_routing` mapping is set to `required` and no routing + value is specified, the delete API throws a `RoutingMissingException` and rejects + the request. For example: ``` DELETE /my-index-000001/_doc/1?routing=shard-1 + ``` This request deletes the document with ID 1, but it is routed based on the + user. The document is not deleted if the correct routing is not specified. **Distributed** + The delete operation gets hashed into a specific shard ID. It then gets redirected + into the primary shard within that ID group and replicated (if needed) to shard + replicas within that ID group. + + ``_ + + :param index: The name of the target index. + :param id: A unique identifier for the document. :param if_primary_term: Only perform the operation if the document has this primary term. :param if_seq_no: Only perform the operation if the document has this sequence number. :param refresh: If `true`, Elasticsearch refreshes the affected shards to make - this operation visible to search, if `wait_for` then wait for a refresh to - make this operation visible to search, if `false` do nothing with refreshes. - Valid values: `true`, `false`, `wait_for`. - :param routing: Custom value used to route operations to a specific shard. - :param timeout: Period to wait for active shards. - :param version: Explicit version number for concurrency control. The specified - version must match the current version of the document for the request to - succeed. - :param version_type: Specific version type: `external`, `external_gte`. - :param wait_for_active_shards: The number of shard copies that must be active - before proceeding with the operation. Set to `all` or any positive integer - up to the total number of shards in the index (`number_of_replicas+1`). + this operation visible to search. If `wait_for`, it waits for a refresh to + make this operation visible to search. If `false`, it does nothing with refreshes. + :param routing: A custom value used to route operations to a specific shard. + :param timeout: The period to wait for active shards. This parameter is useful + for situations where the primary shard assigned to perform the delete operation + might not be available when the delete operation runs. Some reasons for this + might be that the primary shard is currently recovering from a store or undergoing + relocation. By default, the delete operation will wait on the primary shard + to become available for up to 1 minute before failing and responding with + an error. + :param version: An explicit version number for concurrency control. It must match + the current version of the document for the request to succeed. + :param version_type: The version type. + :param wait_for_active_shards: The minimum number of shard copies that must be + active before proceeding with the operation. You can set it to `all` or any + positive integer up to the total number of shards in the index (`number_of_replicas+1`). + The default value of `1` means it waits for each primary shard to be active. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -1347,7 +1456,7 @@ async def delete_by_query( """ Delete documents. Deletes documents that match the specified query. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this @@ -1528,7 +1637,7 @@ async def delete_by_query_rethrottle( takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. - ``_ + ``_ :param task_id: The ID for the task. :param requests_per_second: The throttle for this request in sub-requests per @@ -1574,7 +1683,7 @@ async def delete_script( """ Delete a script or search template. Deletes a stored script or search template. - ``_ + ``_ :param id: Identifier for the stored script or search template. :param master_timeout: Period to wait for a connection to the master node. If @@ -1640,32 +1749,54 @@ async def exists( ] = None, ) -> HeadApiResponse: """ - Check a document. Checks if a specified document exists. - - ``_ - - :param index: Comma-separated list of data streams, indices, and aliases. Supports - wildcards (`*`). - :param id: Identifier of the document. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. + Check a document. Verify that a document exists. For example, check to see if + a document with the `_id` 0 exists: ``` HEAD my-index-000001/_doc/0 ``` If the + document exists, the API returns a status code of `200 - OK`. If the document + doesn’t exist, the API returns `404 - Not Found`. **Versioning support** You + can use the `version` parameter to check the document only if its current version + is equal to the specified one. Internally, Elasticsearch has marked the old document + as deleted and added an entirely new document. The old version of the document + doesn't disappear immediately, although you won't be able to access it. Elasticsearch + cleans up deleted documents in the background as you continue to index more data. + + ``_ + + :param index: A comma-separated list of data streams, indices, and aliases. It + supports wildcards (`*`). + :param id: A unique document identifier. + :param preference: The node or shard the operation should be performed on. By + default, the operation is randomized between the shard replicas. If it is + set to `_local`, the operation will prefer to be run on a local allocated + shard when possible. If it is set to a custom value, the value is used to + guarantee that the same shards will be used for the same custom value. This + can help with "jumping values" when hitting different shards in different + refresh states. A sample value can be something like the web session ID or + the user name. :param realtime: If `true`, the request is real-time as opposed to near-real-time. - :param refresh: If `true`, Elasticsearch refreshes all shards involved in the - delete by query after the request completes. - :param routing: Target the specified primary shard. - :param source: `true` or `false` to return the `_source` field or not, or a list - of fields to return. - :param source_excludes: A comma-separated list of source fields to exclude in - the response. + :param refresh: If `true`, the request refreshes the relevant shards before retrieving + the document. Setting it to `true` should be done after careful thought and + verification that this does not cause a heavy load on the system (and slow + down indexing). + :param routing: A custom value used to route operations to a specific shard. + :param source: Indicates whether to return the `_source` field (`true` or `false`) + or lists the fields to return. + :param source_excludes: A comma-separated list of source fields to exclude from + the response. You can also use this parameter to exclude fields from the + subset specified in `_source_includes` query parameter. If the `_source` + parameter is `false`, this parameter is ignored. :param source_includes: A comma-separated list of source fields to include in - the response. - :param stored_fields: List of stored fields to return as part of a hit. If no - fields are specified, no stored fields are included in the response. If this - field is specified, the `_source` parameter defaults to false. + the response. If this parameter is specified, only these source fields are + returned. You can exclude fields from this subset using the `_source_excludes` + query parameter. If the `_source` parameter is `false`, this parameter is + ignored. + :param stored_fields: A comma-separated list of stored fields to return as part + of a hit. If no fields are specified, no stored fields are included in the + response. If this field is specified, the `_source` parameter defaults to + `false`. :param version: Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. - :param version_type: Specific version type: `external`, `external_gte`. + :param version_type: The version type. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -1741,29 +1872,32 @@ async def exists_source( ] = None, ) -> HeadApiResponse: """ - Check for a document source. Checks if a document's `_source` is stored. + Check for a document source. Check whether a document source exists in an index. + For example: ``` HEAD my-index-000001/_source/1 ``` A document's source is not + available if it is disabled in the mapping. - ``_ + ``_ - :param index: Comma-separated list of data streams, indices, and aliases. Supports - wildcards (`*`). - :param id: Identifier of the document. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. - :param realtime: If true, the request is real-time as opposed to near-real-time. - :param refresh: If `true`, Elasticsearch refreshes all shards involved in the - delete by query after the request completes. - :param routing: Target the specified primary shard. - :param source: `true` or `false` to return the `_source` field or not, or a list - of fields to return. + :param index: A comma-separated list of data streams, indices, and aliases. It + supports wildcards (`*`). + :param id: A unique identifier for the document. + :param preference: The node or shard the operation should be performed on. By + default, the operation is randomized between the shard replicas. + :param realtime: If `true`, the request is real-time as opposed to near-real-time. + :param refresh: If `true`, the request refreshes the relevant shards before retrieving + the document. Setting it to `true` should be done after careful thought and + verification that this does not cause a heavy load on the system (and slow + down indexing). + :param routing: A custom value used to route operations to a specific shard. + :param source: Indicates whether to return the `_source` field (`true` or `false`) + or lists the fields to return. :param source_excludes: A comma-separated list of source fields to exclude in the response. :param source_includes: A comma-separated list of source fields to include in the response. - :param version: Explicit version number for concurrency control. The specified - version must match the current version of the document for the request to - succeed. - :param version_type: Specific version type: `external`, `external_gte`. + :param version: The version number for concurrency control. It must match the + current version of the document for the request to succeed. + :param version_type: The version type. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -1844,7 +1978,7 @@ async def explain( Explain a document match result. Returns information about why a specific document matches, or doesn’t match, a query. - ``_ + ``_ :param index: Index names used to limit the request. Only a single index name can be provided to this parameter. @@ -1967,7 +2101,7 @@ async def field_caps( field. For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams @@ -2081,36 +2215,78 @@ async def get( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a document by its ID. Retrieves the document with the specified ID from an - index. - - ``_ - - :param index: Name of the index that contains the document. - :param id: Unique identifier of the document. - :param force_synthetic_source: Should this request force synthetic _source? Use - this to test if the mapping supports synthetic _source and to get a sense - of the worst case performance. Fetches with this enabled will be slower the - enabling synthetic source natively in the index. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. + Get a document by its ID. Get a document and its source or stored fields from + an index. By default, this API is realtime and is not affected by the refresh + rate of the index (when data will become visible for search). In the case where + stored fields are requested with the `stored_fields` parameter and the document + has been updated but is not yet refreshed, the API will have to parse and analyze + the source to extract the stored fields. To turn off realtime behavior, set the + `realtime` parameter to false. **Source filtering** By default, the API returns + the contents of the `_source` field unless you have used the `stored_fields` + parameter or the `_source` field is turned off. You can turn off `_source` retrieval + by using the `_source` parameter: ``` GET my-index-000001/_doc/0?_source=false + ``` If you only need one or two fields from the `_source`, use the `_source_includes` + or `_source_excludes` parameters to include or filter out particular fields. + This can be helpful with large documents where partial retrieval can save on + network overhead Both parameters take a comma separated list of fields or wildcard + expressions. For example: ``` GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities + ``` If you only want to specify includes, you can use a shorter notation: ``` + GET my-index-000001/_doc/0?_source=*.id ``` **Routing** If routing is used during + indexing, the routing value also needs to be specified to retrieve a document. + For example: ``` GET my-index-000001/_doc/2?routing=user1 ``` This request gets + the document with ID 2, but it is routed based on the user. The document is not + fetched if the correct routing is not specified. **Distributed** The GET operation + is hashed into a specific shard ID. It is then redirected to one of the replicas + within that shard ID and returns the result. The replicas are the primary shard + and its replicas within that shard ID group. This means that the more replicas + you have, the better your GET scaling will be. **Versioning support** You can + use the `version` parameter to retrieve the document only if its current version + is equal to the specified one. Internally, Elasticsearch has marked the old document + as deleted and added an entirely new document. The old version of the document + doesn't disappear immediately, although you won't be able to access it. Elasticsearch + cleans up deleted documents in the background as you continue to index more data. + + ``_ + + :param index: The name of the index that contains the document. + :param id: A unique document identifier. + :param force_synthetic_source: Indicates whether the request forces synthetic + `_source`. Use this paramater to test if the mapping supports synthetic `_source` + and to get a sense of the worst case performance. Fetches with this parameter + enabled will be slower than enabling synthetic source natively in the index. + :param preference: The node or shard the operation should be performed on. By + default, the operation is randomized between the shard replicas. If it is + set to `_local`, the operation will prefer to be run on a local allocated + shard when possible. If it is set to a custom value, the value is used to + guarantee that the same shards will be used for the same custom value. This + can help with "jumping values" when hitting different shards in different + refresh states. A sample value can be something like the web session ID or + the user name. :param realtime: If `true`, the request is real-time as opposed to near-real-time. - :param refresh: If true, Elasticsearch refreshes the affected shards to make - this operation visible to search. If false, do nothing with refreshes. - :param routing: Target the specified primary shard. - :param source: True or false to return the _source field or not, or a list of - fields to return. - :param source_excludes: A comma-separated list of source fields to exclude in - the response. + :param refresh: If `true`, the request refreshes the relevant shards before retrieving + the document. Setting it to `true` should be done after careful thought and + verification that this does not cause a heavy load on the system (and slow + down indexing). + :param routing: A custom value used to route operations to a specific shard. + :param source: Indicates whether to return the `_source` field (`true` or `false`) + or lists the fields to return. + :param source_excludes: A comma-separated list of source fields to exclude from + the response. You can also use this parameter to exclude fields from the + subset specified in `_source_includes` query parameter. If the `_source` + parameter is `false`, this parameter is ignored. :param source_includes: A comma-separated list of source fields to include in - the response. - :param stored_fields: List of stored fields to return as part of a hit. If no - fields are specified, no stored fields are included in the response. If this - field is specified, the `_source` parameter defaults to false. - :param version: Explicit version number for concurrency control. The specified - version must match the current version of the document for the request to - succeed. - :param version_type: Specific version type: internal, external, external_gte. + the response. If this parameter is specified, only these source fields are + returned. You can exclude fields from this subset using the `_source_excludes` + query parameter. If the `_source` parameter is `false`, this parameter is + ignored. + :param stored_fields: A comma-separated list of stored fields to return as part + of a hit. If no fields are specified, no stored fields are included in the + response. If this field is specified, the `_source` parameter defaults to + `false`. Only leaf fields can be retrieved with the `stored_field` option. + Object fields can't be returned;​if specified, the request fails. + :param version: The version number for concurrency control. It must match the + current version of the document for the request to succeed. + :param version_type: The version type. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -2173,7 +2349,7 @@ async def get_script( """ Get a script or search template. Retrieves a stored script or search template. - ``_ + ``_ :param id: Identifier for the stored script or search template. :param master_timeout: Specify timeout for connection to master @@ -2215,7 +2391,7 @@ async def get_script_context( """ Get script contexts. Get a list of supported script contexts and their methods. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_script_context" @@ -2250,7 +2426,7 @@ async def get_script_languages( """ Get script languages. Get a list of available script types, languages, and contexts. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_script_language" @@ -2303,29 +2479,34 @@ async def get_source( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a document's source. Returns the source of a document. + Get a document's source. Get the source of a document. For example: ``` GET my-index-000001/_source/1 + ``` You can use the source filtering parameters to control which parts of the + `_source` are returned: ``` GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities + ``` - ``_ + ``_ - :param index: Name of the index that contains the document. - :param id: Unique identifier of the document. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. - :param realtime: Boolean) If true, the request is real-time as opposed to near-real-time. - :param refresh: If true, Elasticsearch refreshes the affected shards to make - this operation visible to search. If false, do nothing with refreshes. - :param routing: Target the specified primary shard. - :param source: True or false to return the _source field or not, or a list of - fields to return. + :param index: The name of the index that contains the document. + :param id: A unique document identifier. + :param preference: The node or shard the operation should be performed on. By + default, the operation is randomized between the shard replicas. + :param realtime: If `true`, the request is real-time as opposed to near-real-time. + :param refresh: If `true`, the request refreshes the relevant shards before retrieving + the document. Setting it to `true` should be done after careful thought and + verification that this does not cause a heavy load on the system (and slow + down indexing). + :param routing: A custom value used to route operations to a specific shard. + :param source: Indicates whether to return the `_source` field (`true` or `false`) + or lists the fields to return. :param source_excludes: A comma-separated list of source fields to exclude in the response. :param source_includes: A comma-separated list of source fields to include in the response. - :param stored_fields: - :param version: Explicit version number for concurrency control. The specified - version must match the current version of the document for the request to - succeed. - :param version_type: Specific version type: internal, external, external_gte. + :param stored_fields: A comma-separated list of stored fields to return as part + of a hit. + :param version: The version number for concurrency control. It must match the + current version of the document for the request to succeed. + :param version_type: The version type. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -2407,7 +2588,7 @@ async def health_report( for health status, set verbose to false to disable the more expensive analysis logic. - ``_ + ``_ :param feature: A feature of the cluster, as returned by the top-level health report API. @@ -2480,44 +2661,170 @@ async def index( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Index a document. Adds a JSON document to the specified data stream or index - and makes it searchable. If the target is an index and the document already exists, - the request updates the document and increments its version. - - ``_ - - :param index: Name of the data stream or index to target. + Create or update a document in an index. Add a JSON document to the specified + data stream or index and make it searchable. If the target is an index and the + document already exists, the request updates the document and increments its + version. NOTE: You cannot use this API to send update requests for existing documents + in a data stream. If the Elasticsearch security features are enabled, you must + have the following index privileges for the target data stream, index, or index + alias: * To add or overwrite a document using the `PUT //_doc/<_id>` + request format, you must have the `create`, `index`, or `write` index privilege. + * To add a document using the `POST //_doc/` request format, you must + have the `create_doc`, `create`, `index`, or `write` index privilege. * To automatically + create a data stream or index with this API request, you must have the `auto_configure`, + `create_index`, or `manage` index privilege. Automatic data stream creation requires + a matching index template with data stream enabled. NOTE: Replica shards might + not all be started when an indexing operation returns successfully. By default, + only the primary is required. Set `wait_for_active_shards` to change this default + behavior. **Automatically create data streams and indices** If the request's + target doesn't exist and matches an index template with a `data_stream` definition, + the index operation automatically creates the data stream. If the target doesn't + exist and doesn't match a data stream template, the operation automatically creates + the index and applies any matching index templates. NOTE: Elasticsearch includes + several built-in index templates. To avoid naming collisions with these templates, + refer to index pattern documentation. If no mapping exists, the index operation + creates a dynamic mapping. By default, new fields and objects are automatically + added to the mapping if needed. Automatic index creation is controlled by the + `action.auto_create_index` setting. If it is `true`, any index can be created + automatically. You can modify this setting to explicitly allow or block automatic + creation of indices that match specified patterns or set it to `false` to turn + off automatic index creation entirely. Specify a comma-separated list of patterns + you want to allow or prefix each pattern with `+` or `-` to indicate whether + it should be allowed or blocked. When a list is specified, the default behaviour + is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic + creation of indices only. It does not affect the creation of data streams. **Optimistic + concurrency control** Index operations can be made conditional and only be performed + if the last modification to the document was assigned the sequence number and + primary term specified by the `if_seq_no` and `if_primary_term` parameters. If + a mismatch is detected, the operation will result in a `VersionConflictException` + and a status code of `409`. **Routing** By default, shard placement — or routing + — is controlled by using a hash of the document's ID value. For more explicit + control, the value fed into the hash function used by the router can be directly + specified on a per-operation basis using the `routing` parameter. When setting + up explicit mapping, you can also use the `_routing` field to direct the index + operation to extract the routing value from the document itself. This does come + at the (very minimal) cost of an additional document parsing pass. If the `_routing` + mapping is defined and set to be required, the index operation will fail if no + routing value is provided or extracted. NOTE: Data streams do not support custom + routing unless they were created with the `allow_custom_routing` setting enabled + in the template. **Distributed** The index operation is directed to the primary + shard based on its route and performed on the actual node containing this shard. + After the primary shard completes the operation, if needed, the update is distributed + to applicable replicas. **Active shards** To improve the resiliency of writes + to the system, indexing operations can be configured to wait for a certain number + of active shard copies before proceeding with the operation. If the requisite + number of active shard copies are not available, then the write operation must + wait and retry, until either the requisite shard copies have started or a timeout + occurs. By default, write operations only wait for the primary shards to be active + before proceeding (that is to say `wait_for_active_shards` is `1`). This default + can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. + To alter this behavior per operation, use the `wait_for_active_shards request` + parameter. Valid values are all or any positive integer up to the total number + of configured copies per shard in the index (which is `number_of_replicas`+1). + Specifying a negative value or a number greater than the number of shard copies + will throw an error. For example, suppose you have a cluster of three nodes, + A, B, and C and you create an index index with the number of replicas set to + 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt + an indexing operation, by default the operation will only ensure the primary + copy of each shard is available before proceeding. This means that even if B + and C went down and A hosted the primary shard copies, the indexing operation + would still proceed with only one copy of the data. If `wait_for_active_shards` + is set on the request to `3` (and all three nodes are up), the indexing operation + will require 3 active shard copies before proceeding. This requirement should + be met because there are 3 active nodes in the cluster, each one holding a copy + of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, + which is the same in this situation), the indexing operation will not proceed + as you do not have all 4 copies of each shard active in the index. The operation + will timeout unless a new node is brought up in the cluster to host the fourth + copy of the shard. It is important to note that this setting greatly reduces + the chances of the write operation not writing to the requisite number of shard + copies, but it does not completely eliminate the possibility, because this check + occurs before the write operation starts. After the write operation is underway, + it is still possible for replication to fail on any number of shard copies but + still succeed on the primary. The `_shards` section of the API response reveals + the number of shard copies on which replication succeeded and failed. **No operation + (noop) updates** When updating a document by using this API, a new version of + the document is always created even if the document hasn't changed. If this isn't + acceptable use the `_update` API with `detect_noop` set to `true`. The `detect_noop` + option isn't available on this API because it doesn’t fetch the old source and + isn't able to compare it against the new source. There isn't a definitive rule + for when noop updates aren't acceptable. It's a combination of lots of factors + like how frequently your data source sends updates that are actually noops and + how many queries per second Elasticsearch runs on the shard receiving the updates. + **Versioning** Each indexed document is given a version number. By default, internal + versioning is used that starts at 1 and increments with each update, deletes + included. Optionally, the version number can be set to an external value (for + example, if maintained in a database). To enable this functionality, `version_type` + should be set to `external`. The value provided must be a numeric, long value + greater than or equal to 0, and less than around `9.2e+18`. NOTE: Versioning + is completely real time, and is not affected by the near real time aspects of + search operations. If no version is provided, the operation runs without any + version checks. When using the external version type, the system checks to see + if the version number passed to the index request is greater than the version + of the currently stored document. If true, the document will be indexed and the + new version number used. If the value provided is less than or equal to the stored + document's version number, a version conflict will occur and the index operation + will fail. For example: ``` PUT my-index-000001/_doc/1?version=2&version_type=external + { "user": { "id": "elkbee" } } In this example, the operation will succeed since + the supplied version of 2 is higher than the current document version of 1. If + the document was already updated and its version was set to 2 or higher, the + indexing command will fail and result in a conflict (409 HTTP status code). A + nice side effect is that there is no need to maintain strict ordering of async + indexing operations run as a result of changes to a source database, as long + as version numbers from the source database are used. Even the simple case of + updating the Elasticsearch index using data from a database is simplified if + external versioning is used, as only the latest version will be used if the index + operations arrive out of order. + + ``_ + + :param index: The name of the data stream or index to target. If the target doesn't + exist and matches the name or wildcard (`*`) pattern of an index template + with a `data_stream` definition, this request creates the data stream. If + the target doesn't exist and doesn't match a data stream template, this request + creates the index. You can check for existing targets with the resolve index + API. :param document: - :param id: Unique identifier for the document. + :param id: A unique identifier for the document. To automatically generate a + document ID, use the `POST //_doc/` request format and omit this + parameter. :param if_primary_term: Only perform the operation if the document has this primary term. :param if_seq_no: Only perform the operation if the document has this sequence number. - :param op_type: Set to create to only index the document if it does not already + :param op_type: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, - the indexing operation will fail. Same as using the `/_create` endpoint. - Valid values: `index`, `create`. If document id is specified, it defaults - to `index`. Otherwise, it defaults to `create`. - :param pipeline: ID of the pipeline to use to preprocess incoming documents. + the indexing operation will fail. The behavior is the same as using the `/_create` + endpoint. If a document ID is specified, this paramater defaults to `index`. + Otherwise, it defaults to `create`. If the request targets a data stream, + an `op_type` of `create` is required. + :param pipeline: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. :param refresh: If `true`, Elasticsearch refreshes the affected shards to make - this operation visible to search, if `wait_for` then wait for a refresh to - make this operation visible to search, if `false` do nothing with refreshes. - Valid values: `true`, `false`, `wait_for`. + this operation visible to search. If `wait_for`, it waits for a refresh to + make this operation visible to search. If `false`, it does nothing with refreshes. :param require_alias: If `true`, the destination must be an index alias. - :param routing: Custom value used to route operations to a specific shard. - :param timeout: Period the request waits for the following operations: automatic - index creation, dynamic mapping updates, waiting for active shards. - :param version: Explicit version number for concurrency control. The specified - version must match the current version of the document for the request to - succeed. - :param version_type: Specific version type: `external`, `external_gte`. + :param routing: A custom value that is used to route operations to a specific + shard. + :param timeout: The period the request waits for the following operations: automatic + index creation, dynamic mapping updates, waiting for active shards. This + parameter is useful for situations where the primary shard assigned to perform + the operation might not be available when the operation runs. Some reasons + for this might be that the primary shard is currently recovering from a gateway + or undergoing relocation. By default, the operation will wait on the primary + shard to become available for at least 1 minute before failing and responding + with an error. The actual wait time could be longer, particularly when multiple + waits occur. + :param version: An explicit version number for concurrency control. It must be + a non-negative long number. + :param version_type: The version type. :param wait_for_active_shards: The number of shard copies that must be active - before proceeding with the operation. Set to all or any positive integer - up to the total number of shards in the index (`number_of_replicas+1`). + before proceeding with the operation. You can set it to `all` or any positive + integer up to the total number of shards in the index (`number_of_replicas+1`). + The default value of `1` means it waits for each primary shard to be active. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -2593,7 +2900,7 @@ async def info( """ Get cluster info. Get basic build, version, and cluster information. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/" @@ -2658,7 +2965,7 @@ async def knn_search( The kNN search API supports restricting the search using a filter. The search will return the top k documents that also match the filter query. - ``_ + ``_ :param index: A comma-separated list of index names to search; use `_all` or to perform the operation on all indices @@ -2762,7 +3069,7 @@ async def mget( IDs in the request body. To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. - ``_ + ``_ :param index: Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. @@ -2889,7 +3196,7 @@ async def msearch( Each newline character may be preceded by a carriage return `\\r`. When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. - ``_ + ``_ :param searches: :param index: Comma-separated list of data streams, indices, and index aliases @@ -3021,7 +3328,7 @@ async def msearch_template( """ Run multiple templated searches. - ``_ + ``_ :param search_templates: :param index: Comma-separated list of data streams, indices, and aliases to search. @@ -3120,7 +3427,7 @@ async def mtermvectors( with all the fetched termvectors. Each element has the structure provided by the termvectors API. - ``_ + ``_ :param index: Name of the index that contains the documents. :param docs: Array of existing or artificial documents. @@ -3240,7 +3547,7 @@ async def open_point_in_time( A point in time must be opened explicitly before being used in search requests. The `keep_alive` parameter tells Elasticsearch how long it should persist. - ``_ + ``_ :param index: A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices @@ -3328,7 +3635,7 @@ async def put_script( Create or update a script or search template. Creates or updates a stored script or search template. - ``_ + ``_ :param id: Identifier for the stored script or search template. Must be unique within the cluster. @@ -3414,7 +3721,7 @@ async def rank_eval( Evaluate ranked search results. Evaluate the quality of ranked search results over a set of typical search queries. - ``_ + ``_ :param requests: A set of typical search requests, together with their provided ratings. @@ -3506,33 +3813,191 @@ async def reindex( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reindex documents. Copies documents from a source to a destination. The source - can be any existing index, alias, or data stream. The destination must differ - from the source. For example, you cannot reindex a data stream into itself. - - ``_ + Reindex documents. Copy documents from a source to a destination. You can copy + all documents to the destination index or reindex a subset of the documents. + The source can be any existing index, alias, or data stream. The destination + must differ from the source. For example, you cannot reindex a data stream into + itself. IMPORTANT: Reindex requires `_source` to be enabled for all documents + in the source. The destination should be configured as wanted before calling + the reindex API. Reindex does not copy the settings from the source or its associated + template. Mappings, shard counts, and replicas, for example, must be configured + ahead of time. If the Elasticsearch security features are enabled, you must have + the following security privileges: * The `read` index privilege for the source + data stream, index, or alias. * The `write` index privilege for the destination + data stream, index, or index alias. * To automatically create a data stream or + index with a reindex API request, you must have the `auto_configure`, `create_index`, + or `manage` index privilege for the destination data stream, index, or alias. + * If reindexing from a remote cluster, the `source.remote.user` must have the + `monitor` cluster privilege and the `read` index privilege for the source data + stream, index, or alias. If reindexing from a remote cluster, you must explicitly + allow the remote host in the `reindex.remote.whitelist` setting. Automatic data + stream creation requires a matching index template with data stream enabled. + The `dest` element can be configured like the index API to control optimistic + concurrency control. Omitting `version_type` or setting it to `internal` causes + Elasticsearch to blindly dump documents into the destination, overwriting any + that happen to have the same ID. Setting `version_type` to `external` causes + Elasticsearch to preserve the `version` from the source, create any documents + that are missing, and update any documents that have an older version in the + destination than they do in the source. Setting `op_type` to `create` causes + the reindex API to create only missing documents in the destination. All existing + documents will cause a version conflict. IMPORTANT: Because data streams are + append-only, any reindex request to a destination data stream must have an `op_type` + of `create`. A reindex can only add new documents to a destination data stream. + It cannot update existing documents in a destination data stream. By default, + version conflicts abort the reindex process. To continue reindexing if there + are conflicts, set the `conflicts` request body property to `proceed`. In this + case, the response includes a count of the version conflicts that were encountered. + Note that the handling of other error types is unaffected by the `conflicts` + property. Additionally, if you opt to count version conflicts, the operation + could attempt to reindex more documents from the source than `max_docs` until + it has successfully indexed `max_docs` documents into the target or it has gone + through every document in the source query. NOTE: The reindex API makes no effort + to handle ID collisions. The last document written will "win" but the order isn't + usually predictable so it is not a good idea to rely on this behavior. Instead, + make sure that IDs are unique by using a script. **Running reindex asynchronously** + If the request contains `wait_for_completion=false`, Elasticsearch performs some + preflight checks, launches the request, and returns a task you can use to cancel + or get the status of the task. Elasticsearch creates a record of this task as + a document at `_tasks/`. **Reindex from multiple sources** If you have + many sources to reindex it is generally better to reindex them one at a time + rather than using a glob pattern to pick up multiple sources. That way you can + resume the process if there are any errors by removing the partially completed + source and starting over. It also makes parallelizing the process fairly simple: + split the list of sources to reindex and run each list in parallel. For example, + you can use a bash script like this: ``` for index in i1 i2 i3 i4 i5; do curl + -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{ "source": + { "index": "'$index'" }, "dest": { "index": "'$index'-reindexed" } }' done ``` + **Throttling** Set `requests_per_second` to any positive decimal number (`1.4`, + `6`, `1000`, for example) to throttle the rate at which reindex issues batches + of index operations. Requests are throttled by padding each batch with a wait + time. To turn off throttling, set `requests_per_second` to `-1`. The throttling + is done by waiting between batches so that the scroll that reindex uses internally + can be given a timeout that takes into account the padding. The padding time + is the difference between the batch size divided by the `requests_per_second` + and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` + is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time + = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the + batch is issued as a single bulk request, large batch sizes cause Elasticsearch + to create many requests and then wait for a while before starting the next set. + This is "bursty" instead of "smooth". **Slicing** Reindex supports sliced scroll + to parallelize the reindexing process. This parallelization can improve efficiency + and provide a convenient way to break the request down into smaller parts. NOTE: + Reindexing from remote clusters does not support manual or automatic slicing. + You can slice a reindex request manually by providing a slice ID and total number + of slices to each request. You can also let reindex automatically parallelize + by using sliced scroll to slice on `_id`. The `slices` parameter specifies the + number of slices to use. Adding `slices` to the reindex request just automates + the manual process, creating sub-requests which means it has some quirks: * You + can see these requests in the tasks API. These sub-requests are "child" tasks + of the task for the request with slices. * Fetching the status of the task for + the request with `slices` only contains the status of completed slices. * These + sub-requests are individually addressable for things like cancellation and rethrottling. + * Rethrottling the request with `slices` will rethrottle the unfinished sub-request + proportionally. * Canceling the request with `slices` will cancel each sub-request. + * Due to the nature of `slices`, each sub-request won't get a perfectly even + portion of the documents. All documents will be addressed, but some slices may + be larger than others. Expect larger slices to have a more even distribution. + * Parameters like `requests_per_second` and `max_docs` on a request with `slices` + are distributed proportionally to each sub-request. Combine that with the previous + point about distribution being uneven and you should conclude that using `max_docs` + with `slices` might not result in exactly `max_docs` documents being reindexed. + * Each sub-request gets a slightly different snapshot of the source, though these + are all taken at approximately the same time. If slicing automatically, setting + `slices` to `auto` will choose a reasonable number for most indices. If slicing + manually or otherwise tuning automatic slicing, use the following guidelines. + Query performance is most efficient when the number of slices is equal to the + number of shards in the index. If that number is large (for example, `500`), + choose a lower number as too many slices will hurt performance. Setting slices + higher than the number of shards generally does not improve efficiency and adds + overhead. Indexing performance scales linearly across available resources with + the number of slices. Whether query or indexing performance dominates the runtime + depends on the documents being reindexed and cluster resources. **Modify documents + during reindexing** Like `_update_by_query`, reindex operations support a script + that modifies the document. Unlike `_update_by_query`, the script is allowed + to modify the document's metadata. Just as in `_update_by_query`, you can set + `ctx.op` to change the operation that is run on the destination. For example, + set `ctx.op` to `noop` if your script decides that the document doesn’t have + to be indexed in the destination. This "no operation" will be reported in the + `noop` counter in the response body. Set `ctx.op` to `delete` if your script + decides that the document must be deleted from the destination. The deletion + will be reported in the `deleted` counter in the response body. Setting `ctx.op` + to anything else will return an error, as will setting any other field in `ctx`. + Think of the possibilities! Just be careful; you are able to change: * `_id` + * `_index` * `_version` * `_routing` Setting `_version` to `null` or clearing + it from the `ctx` map is just like not sending the version in an indexing request. + It will cause the document to be overwritten in the destination regardless of + the version on the target or the version type you use in the reindex API. **Reindex + from remote** Reindex supports reindexing from a remote Elasticsearch cluster. + The `host` parameter must contain a scheme, host, port, and optional path. The + `username` and `password` parameters are optional and when they are present the + reindex operation will connect to the remote Elasticsearch node using basic authentication. + Be sure to use HTTPS when using basic authentication or the password will be + sent in plain text. There are a range of settings available to configure the + behavior of the HTTPS connection. When using Elastic Cloud, it is also possible + to authenticate against the remote cluster through the use of a valid API key. + Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting. + It can be set to a comma delimited list of allowed remote host and port combinations. + Scheme is ignored; only the host and port are used. For example: ``` reindex.remote.whitelist: + [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"] ``` The list of + allowed hosts must be configured on any nodes that will coordinate the reindex. + This feature should work with remote clusters of any version of Elasticsearch. + This should enable you to upgrade from any version of Elasticsearch to the current + version by reindexing from a cluster of the old version. WARNING: Elasticsearch + does not support forward compatibility across major versions. For example, you + cannot reindex from a 7.x cluster into a 6.x cluster. To enable queries sent + to older versions of Elasticsearch, the `query` parameter is sent directly to + the remote host without validation or modification. NOTE: Reindexing from remote + clusters does not support manual or automatic slicing. Reindexing from a remote + server uses an on-heap buffer that defaults to a maximum size of 100mb. If the + remote index includes very large documents you'll need to use a smaller batch + size. It is also possible to set the socket read timeout on the remote connection + with the `socket_timeout` field and the connection timeout with the `connect_timeout` + field. Both default to 30 seconds. **Configuring SSL parameters** Reindex from + remote supports configurable SSL settings. These must be specified in the `elasticsearch.yml` + file, with the exception of the secure settings, which you add in the Elasticsearch + keystore. It is not possible to configure SSL in the body of the reindex request. + + ``_ :param dest: The destination you are copying to. :param source: The source you are copying from. - :param conflicts: Set to proceed to continue reindexing even if there are conflicts. - :param max_docs: The maximum number of documents to reindex. + :param conflicts: Indicates whether to continue reindexing even when there are + conflicts. + :param max_docs: The maximum number of documents to reindex. By default, all + documents are reindexed. If it is a value less then or equal to `scroll_size`, + a scroll will not be used to retrieve the results for the operation. If `conflicts` + is set to `proceed`, the reindex operation could attempt to reindex more + documents from the source than `max_docs` until it has successfully indexed + `max_docs` documents into the target or it has gone through every document + in the source query. :param refresh: If `true`, the request refreshes affected shards to make this operation visible to search. :param requests_per_second: The throttle for this request in sub-requests per - second. Defaults to no throttle. + second. By default, there is no throttle. :param require_alias: If `true`, the destination must be an index alias. :param script: The script to run to update the document source or metadata when reindexing. - :param scroll: Specifies how long a consistent view of the index should be maintained - for scrolled search. + :param scroll: The period of time that a consistent view of the index should + be maintained for scrolled search. :param size: - :param slices: The number of slices this task should be divided into. Defaults - to 1 slice, meaning the task isn’t sliced into subtasks. - :param timeout: Period each indexing waits for automatic index creation, dynamic - mapping updates, and waiting for active shards. + :param slices: The number of slices this task should be divided into. It defaults + to one slice, which means the task isn't sliced into subtasks. Reindex supports + sliced scroll to parallelize the reindexing process. This parallelization + can improve efficiency and provide a convenient way to break the request + down into smaller parts. NOTE: Reindexing from remote clusters does not support + manual or automatic slicing. If set to `auto`, Elasticsearch chooses the + number of slices to use. This setting will use one slice per shard, up to + a certain limit. If there are multiple sources, it will choose the number + of slices based on the index or backing index with the smallest number of + shards. + :param timeout: The period each indexing waits for automatic index creation, + dynamic mapping updates, and waiting for active shards. By default, Elasticsearch + waits for at least one minute before failing. The actual wait time could + be longer, particularly when multiple waits occur. :param wait_for_active_shards: The number of shard copies that must be active - before proceeding with the operation. Set to `all` or any positive integer - up to the total number of shards in the index (`number_of_replicas+1`). + before proceeding with the operation. Set it to `all` or any positive integer + up to the total number of shards in the index (`number_of_replicas+1`). The + default value is one, which means it waits for each primary shard to be active. :param wait_for_completion: If `true`, the request blocks until the operation is complete. """ @@ -3605,13 +4070,17 @@ async def reindex_rethrottle( ) -> ObjectApiResponse[t.Any]: """ Throttle a reindex operation. Change the number of requests per second for a - particular reindex operation. + particular reindex operation. For example: ``` POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 + ``` Rethrottling that speeds up the query takes effect immediately. Rethrottling + that slows down the query will take effect after completing the current batch. + This behavior prevents scroll timeouts. - ``_ + ``_ - :param task_id: Identifier for the task. + :param task_id: The task identifier, which can be found by using the tasks API. :param requests_per_second: The throttle for this request in sub-requests per - second. + second. It can be either `-1` to turn off throttling or any decimal number + like `1.7` or `12` to throttle to that level. """ if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_id'") @@ -3658,7 +4127,7 @@ async def render_search_template( """ Render a search template. Render a search template as a search request body. - ``_ + ``_ :param id: ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. @@ -3727,7 +4196,7 @@ async def scripts_painless_execute( """ Run a script. Runs a script and returns a result. - ``_ + ``_ :param context: The context that the script should run in. :param context_setup: Additional parameters for the `context`. @@ -3800,7 +4269,7 @@ async def scroll( of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. - ``_ + ``_ :param scroll_id: Scroll ID of the search. :param rest_total_hits_as_int: If true, the API response’s hit.total property @@ -3992,7 +4461,7 @@ async def search( can provide search queries using the `q` query string parameter or the request body. If both are specified, only the query parameter is used. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams and indices, omit this @@ -4422,7 +4891,7 @@ async def search_mvt( """ Search a vector tile. Search a vector tile for geospatial values. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, or aliases to search :param field: Field containing geospatial data to return @@ -4580,7 +5049,7 @@ async def search_shards( optimizations with routing and shard preferences. When filtered aliases are used, the filter is returned as part of the indices section. - ``_ + ``_ :param index: Returns the indices and shards that a search request would be executed against. @@ -4684,7 +5153,7 @@ async def search_template( """ Run a search with a search template. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (*). @@ -4824,7 +5293,7 @@ async def terms_enum( are actually deleted. Until that happens, the terms enum API will return terms from these documents. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and index aliases to search. Wildcard (*) expressions are supported. @@ -4923,7 +5392,7 @@ async def termvectors( Get term vector information. Get information and statistics about terms in the fields of a particular document. - ``_ + ``_ :param index: Name of the index that contains the document. :param id: Unique identifier of the document. @@ -5063,46 +5532,60 @@ async def update( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update a document. Updates a document by running a script or passing a partial - document. - - ``_ - - :param index: The name of the index - :param id: Document ID - :param detect_noop: Set to false to disable setting 'result' in the response - to 'noop' if no change to the document occurred. - :param doc: A partial update to an existing document. - :param doc_as_upsert: Set to true to use the contents of 'doc' as the value of - 'upsert' + Update a document. Update a document by running a script or passing a partial + document. If the Elasticsearch security features are enabled, you must have the + `index` or `write` index privilege for the target index or index alias. The script + can update, delete, or skip modifying the document. The API also supports passing + a partial document, which is merged into the existing document. To fully replace + an existing document, use the index API. This operation: * Gets the document + (collocated with the shard) from the index. * Runs the specified script. * Indexes + the result. The document must still be reindexed, but using this API removes + some network roundtrips and reduces chances of version conflicts between the + GET and the index operation. The `_source` field must be enabled to use this + API. In addition to `_source`, you can access the following variables through + the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the + current timestamp). + + ``_ + + :param index: The name of the target index. By default, the index is created + automatically if it doesn't exist. + :param id: A unique identifier for the document to be updated. + :param detect_noop: If `true`, the `result` in the response is set to `noop` + (no operation) when there are no changes to the document. + :param doc: A partial update to an existing document. If both `doc` and `script` + are specified, `doc` is ignored. + :param doc_as_upsert: If `true`, use the contents of 'doc' as the value of 'upsert'. + NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. :param if_primary_term: Only perform the operation if the document has this primary term. :param if_seq_no: Only perform the operation if the document has this sequence number. :param lang: The script language. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make - this operation visible to search, if 'wait_for' then wait for a refresh to - make this operation visible to search, if 'false' do nothing with refreshes. - :param require_alias: If true, the destination must be an index alias. - :param retry_on_conflict: Specify how many times should the operation be retried + this operation visible to search. If 'wait_for', it waits for a refresh to + make this operation visible to search. If 'false', it does nothing with refreshes. + :param require_alias: If `true`, the destination must be an index alias. + :param retry_on_conflict: The number of times the operation should be retried when a conflict occurs. - :param routing: Custom value used to route operations to a specific shard. - :param script: Script to execute to update the document. - :param scripted_upsert: Set to true to execute the script whether or not the - document exists. - :param source: Set to false to disable source retrieval. You can also specify - a comma-separated list of the fields you want to retrieve. - :param source_excludes: Specify the source fields you want to exclude. - :param source_includes: Specify the source fields you want to retrieve. - :param timeout: Period to wait for dynamic mapping updates and active shards. - This guarantees Elasticsearch waits for at least the timeout before failing. - The actual wait time could be longer, particularly when multiple waits occur. + :param routing: A custom value used to route operations to a specific shard. + :param script: The script to run to update the document. + :param scripted_upsert: If `true`, run the script whether or not the document + exists. + :param source: If `false`, turn off source retrieval. You can also specify a + comma-separated list of the fields you want to retrieve. + :param source_excludes: The source fields you want to exclude. + :param source_includes: The source fields you want to retrieve. + :param timeout: The period to wait for the following operations: dynamic mapping + updates and waiting for active shards. Elasticsearch waits for at least the + timeout period before failing. The actual wait time could be longer, particularly + when multiple waits occur. :param upsert: If the document does not already exist, the contents of 'upsert' - are inserted as a new document. If the document exists, the 'script' is executed. - :param wait_for_active_shards: The number of shard copies that must be active - before proceeding with the operations. Set to 'all' or any positive integer - up to the total number of shards in the index (number_of_replicas+1). Defaults - to 1 meaning the primary shard. + are inserted as a new document. If the document exists, the 'script' is run. + :param wait_for_active_shards: The number of copies of each shard that must be + active before proceeding with the operation. Set to 'all' or any positive + integer up to the total number of shards in the index (`number_of_replicas`+1). + The default value of `1` means it waits for each primary shard to be active. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -5232,7 +5715,7 @@ async def update_by_query( is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this @@ -5431,7 +5914,7 @@ async def update_by_query_rethrottle( takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. - ``_ + ``_ :param task_id: The ID for the task. :param requests_per_second: The throttle for this request in sub-requests per diff --git a/elasticsearch/_async/client/async_search.py b/elasticsearch/_async/client/async_search.py index 8e2bbecf9..c2c3f9526 100644 --- a/elasticsearch/_async/client/async_search.py +++ b/elasticsearch/_async/client/async_search.py @@ -42,7 +42,7 @@ async def delete( the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. - ``_ + ``_ :param id: A unique identifier for the async search. """ @@ -90,7 +90,7 @@ async def get( the results of a specific async search is restricted to the user or API key that submitted it. - ``_ + ``_ :param id: A unique identifier for the async search. :param keep_alive: Specifies how long the async search should be available in @@ -154,7 +154,7 @@ async def status( security features are enabled, use of this API is restricted to the `monitoring_user` role. - ``_ + ``_ :param id: A unique identifier for the async search. :param keep_alive: Specifies how long the async search needs to be available. @@ -336,7 +336,7 @@ async def submit( can be set by changing the `search.max_async_search_response_size` cluster level setting. - ``_ + ``_ :param index: A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices diff --git a/elasticsearch/_async/client/autoscaling.py b/elasticsearch/_async/client/autoscaling.py index 7c1b1f01c..82e0e6d8c 100644 --- a/elasticsearch/_async/client/autoscaling.py +++ b/elasticsearch/_async/client/autoscaling.py @@ -42,7 +42,7 @@ async def delete_autoscaling_policy( by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - ``_ + ``_ :param name: the name of the autoscaling policy :param master_timeout: Period to wait for a connection to the master node. If @@ -102,7 +102,7 @@ async def get_autoscaling_capacity( capacity was required. This information is provided for diagnosis only. Do not use this information to make autoscaling decisions. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -147,7 +147,7 @@ async def get_autoscaling_policy( Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - ``_ + ``_ :param name: the name of the autoscaling policy :param master_timeout: Period to wait for a connection to the master node. If @@ -200,7 +200,7 @@ async def put_autoscaling_policy( use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - ``_ + ``_ :param name: the name of the autoscaling policy :param policy: diff --git a/elasticsearch/_async/client/cat.py b/elasticsearch/_async/client/cat.py index c99745002..f51f75373 100644 --- a/elasticsearch/_async/client/cat.py +++ b/elasticsearch/_async/client/cat.py @@ -57,18 +57,20 @@ async def aliases( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get aliases. Retrieves the cluster’s index aliases, including filter and routing - information. The API does not return data stream aliases. CAT APIs are only intended + Get aliases. Get the cluster's index aliases, including filter and routing information. + This API does not return data stream aliases. IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. - ``_ + ``_ :param name: A comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. - :param expand_wildcards: Whether to expand wildcard expression to concrete indices - that are open, closed or both. + :param expand_wildcards: The type of index that wildcard patterns can match. + If the request can target data streams, this argument determines whether + wildcard expressions match hidden data streams. It supports comma-separated + values, such as `open,hidden`. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: List of columns to appear in the response. Supports simple wildcards. @@ -78,7 +80,10 @@ async def aliases( the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. + :param master_timeout: The period to wait for a connection to the master node. + If the master node is not available before the timeout expires, the request + fails and returns an error. To indicated that the request should never timeout, + you can set it to `-1`. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -147,13 +152,14 @@ async def allocation( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Provides a snapshot of the number of shards allocated to each data node and their - disk space. IMPORTANT: cat APIs are only intended for human consumption using - the command line or Kibana console. They are not intended for use by applications. + Get shard allocation information. Get a snapshot of the number of shards allocated + to each data node and their disk space. IMPORTANT: CAT APIs are only intended + for human consumption using the command line or Kibana console. They are not + intended for use by applications. - ``_ + ``_ - :param node_id: Comma-separated list of node identifiers or names used to limit + :param node_id: A comma-separated list of node identifiers or names used to limit the returned information. :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set @@ -231,17 +237,17 @@ async def component_templates( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get component templates. Returns information about component templates in a cluster. + Get component templates. Get information about component templates in a cluster. Component templates are building blocks for constructing index templates that - specify index mappings, settings, and aliases. CAT APIs are only intended for - human consumption using the command line or Kibana console. They are not intended - for use by applications. For application consumption, use the get component template - API. + specify index mappings, settings, and aliases. IMPORTANT: CAT APIs are only intended + for human consumption using the command line or Kibana console. They are not + intended for use by applications. For application consumption, use the get component + template API. - ``_ + ``_ - :param name: The name of the component template. Accepts wildcard expressions. - If omitted, all component templates are returned. + :param name: The name of the component template. It accepts wildcard expressions. + If it is omitted, all component templates are returned. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: List of columns to appear in the response. Supports simple wildcards. @@ -251,7 +257,7 @@ async def component_templates( the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. + :param master_timeout: The period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -313,17 +319,17 @@ async def count( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get a document count. Provides quick access to a document count for a data stream, + Get a document count. Get quick access to a document count for a data stream, an index, or an entire cluster. The document count only includes live documents, - not deleted documents which have not yet been removed by the merge process. CAT - APIs are only intended for human consumption using the command line or Kibana + not deleted documents which have not yet been removed by the merge process. IMPORTANT: + CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the count API. - ``_ + ``_ - :param index: Comma-separated list of data streams, indices, and aliases used - to limit the request. Supports wildcards (`*`). To target all data streams + :param index: A comma-separated list of data streams, indices, and aliases used + to limit the request. It supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -390,12 +396,13 @@ async def fielddata( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns the amount of heap memory currently used by the field data cache on every - data node in the cluster. IMPORTANT: cat APIs are only intended for human consumption - using the command line or Kibana console. They are not intended for use by applications. - For application consumption, use the nodes stats API. + Get field data cache information. Get the amount of heap memory currently used + by the field data cache on every data node in the cluster. IMPORTANT: cat APIs + are only intended for human consumption using the command line or Kibana console. + They are not intended for use by applications. For application consumption, use + the nodes stats API. - ``_ + ``_ :param fields: Comma-separated list of fields used to limit returned information. To retrieve all fields, omit this parameter. @@ -467,19 +474,19 @@ async def health( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns the health status of a cluster, similar to the cluster health API. IMPORTANT: - cat APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the cluster health API. This API is often used to check malfunctioning clusters. - To help you track cluster health alongside log files and alerting systems, the - API returns timestamps in two formats: `HH:MM:SS`, which is human-readable but - includes no date information; `Unix epoch time`, which is machine-sortable and - includes date information. The latter format is useful for cluster recoveries - that take multiple days. You can use the cat health API to verify cluster health - across multiple nodes. You also can use the API to track the recovery of a large - cluster over a longer period of time. - - ``_ + Get the cluster health status. IMPORTANT: CAT APIs are only intended for human + consumption using the command line or Kibana console. They are not intended for + use by applications. For application consumption, use the cluster health API. + This API is often used to check malfunctioning clusters. To help you track cluster + health alongside log files and alerting systems, the API returns timestamps in + two formats: `HH:MM:SS`, which is human-readable but includes no date information; + `Unix epoch time`, which is machine-sortable and includes date information. The + latter format is useful for cluster recoveries that take multiple days. You can + use the cat health API to verify cluster health across multiple nodes. You also + can use the API to track the recovery of a large cluster over a longer period + of time. + + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -531,9 +538,9 @@ async def health( @_rewrite_parameters() async def help(self) -> TextApiResponse: """ - Get CAT help. Returns help for the CAT APIs. + Get CAT help. Get help for the CAT APIs. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_cat" @@ -582,7 +589,7 @@ async def indices( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get index information. Returns high-level information about indices in a cluster, + Get index information. Get high-level information about indices in a cluster, including backing indices for data streams. Use this request to get the following information for each index in a cluster: - shard count - document count - deleted document count - primary store size - total store size of all shards, including @@ -593,7 +600,7 @@ async def indices( using the command line or Kibana console. They are not intended for use by applications. For application consumption, use an index endpoint. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -684,12 +691,12 @@ async def master( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about the master node, including the ID, bound IP address, - and name. IMPORTANT: cat APIs are only intended for human consumption using the - command line or Kibana console. They are not intended for use by applications. - For application consumption, use the nodes info API. + Get master node information. Get information about the master node, including + the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for + human consumption using the command line or Kibana console. They are not intended + for use by applications. For application consumption, use the nodes info API. - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -858,13 +865,13 @@ async def ml_data_frame_analytics( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get data frame analytics jobs. Returns configuration and usage information about - data frame analytics jobs. CAT APIs are only intended for human consumption using - the Kibana console or command line. They are not intended for use by applications. + Get data frame analytics jobs. Get configuration and usage information about + data frame analytics jobs. IMPORTANT: CAT APIs are only intended for human consumption + using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get data frame analytics jobs statistics API. - ``_ + ``_ :param id: The ID of the data frame analytics to fetch :param allow_no_match: Whether to ignore if a wildcard expression matches no @@ -1020,14 +1027,15 @@ async def ml_datafeeds( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get datafeeds. Returns configuration and usage information about datafeeds. This + Get datafeeds. Get configuration and usage information about datafeeds. This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` - cluster privileges to use this API. CAT APIs are only intended for human consumption - using the Kibana console or command line. They are not intended for use by applications. - For application consumption, use the get datafeed statistics API. + cluster privileges to use this API. IMPORTANT: CAT APIs are only intended for + human consumption using the Kibana console or command line. They are not intended + for use by applications. For application consumption, use the get datafeed statistics + API. - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. @@ -1381,15 +1389,15 @@ async def ml_jobs( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get anomaly detection jobs. Returns configuration and usage information for anomaly + Get anomaly detection jobs. Get configuration and usage information for anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, - or `manage` cluster privileges to use this API. CAT APIs are only intended for - human consumption using the Kibana console or command line. They are not intended - for use by applications. For application consumption, use the get anomaly detection - job statistics API. + or `manage` cluster privileges to use this API. IMPORTANT: CAT APIs are only + intended for human consumption using the Kibana console or command line. They + are not intended for use by applications. For application consumption, use the + get anomaly detection job statistics API. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param allow_no_match: Specifies what to do when the request: * Contains wildcard @@ -1565,12 +1573,12 @@ async def ml_trained_models( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get trained models. Returns configuration and usage information about inference - trained models. CAT APIs are only intended for human consumption using the Kibana - console or command line. They are not intended for use by applications. For application - consumption, use the get trained models statistics API. + Get trained models. Get configuration and usage information about inference trained + models. IMPORTANT: CAT APIs are only intended for human consumption using the + Kibana console or command line. They are not intended for use by applications. + For application consumption, use the get trained models statistics API. - ``_ + ``_ :param model_id: A unique identifier for the trained model. :param allow_no_match: Specifies what to do when the request: contains wildcard @@ -1656,12 +1664,12 @@ async def nodeattrs( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about custom node attributes. IMPORTANT: cat APIs are only - intended for human consumption using the command line or Kibana console. They - are not intended for use by applications. For application consumption, use the - nodes info API. + Get node attribute information. Get information about custom node attributes. + IMPORTANT: cat APIs are only intended for human consumption using the command + line or Kibana console. They are not intended for use by applications. For application + consumption, use the nodes info API. - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -1737,12 +1745,12 @@ async def nodes( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about the nodes in a cluster. IMPORTANT: cat APIs are only - intended for human consumption using the command line or Kibana console. They - are not intended for use by applications. For application consumption, use the - nodes info API. + Get node information. Get information about the nodes in a cluster. IMPORTANT: + cat APIs are only intended for human consumption using the command line or Kibana + console. They are not intended for use by applications. For application consumption, + use the nodes info API. - ``_ + ``_ :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set @@ -1822,12 +1830,12 @@ async def pending_tasks( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns cluster-level changes that have not yet been executed. IMPORTANT: cat - APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the pending cluster tasks API. + Get pending task information. Get information about cluster-level changes that + have not yet taken effect. IMPORTANT: cat APIs are only intended for human consumption + using the command line or Kibana console. They are not intended for use by applications. + For application consumption, use the pending cluster tasks API. - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -1900,12 +1908,12 @@ async def plugins( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns a list of plugins running on each node of a cluster. IMPORTANT: cat APIs - are only intended for human consumption using the command line or Kibana console. - They are not intended for use by applications. For application consumption, use - the nodes info API. + Get plugin information. Get a list of plugins running on each node of a cluster. + IMPORTANT: cat APIs are only intended for human consumption using the command + line or Kibana console. They are not intended for use by applications. For application + consumption, use the nodes info API. - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -1984,16 +1992,16 @@ async def recovery( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about ongoing and completed shard recoveries. Shard recovery - is the process of initializing a shard copy, such as restoring a primary shard - from a snapshot or syncing a replica shard from a primary shard. When a shard - recovery completes, the recovered shard is available for search and indexing. - For data streams, the API returns information about the stream’s backing indices. - IMPORTANT: cat APIs are only intended for human consumption using the command - line or Kibana console. They are not intended for use by applications. For application - consumption, use the index recovery API. + Get shard recovery information. Get information about ongoing and completed shard + recoveries. Shard recovery is the process of initializing a shard copy, such + as restoring a primary shard from a snapshot or syncing a replica shard from + a primary shard. When a shard recovery completes, the recovered shard is available + for search and indexing. For data streams, the API returns information about + the stream’s backing indices. IMPORTANT: cat APIs are only intended for human + consumption using the command line or Kibana console. They are not intended for + use by applications. For application consumption, use the index recovery API. - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2074,12 +2082,12 @@ async def repositories( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns the snapshot repositories for a cluster. IMPORTANT: cat APIs are only - intended for human consumption using the command line or Kibana console. They - are not intended for use by applications. For application consumption, use the - get snapshot repository API. + Get snapshot repository information. Get a list of snapshot repositories for + a cluster. IMPORTANT: cat APIs are only intended for human consumption using + the command line or Kibana console. They are not intended for use by applications. + For application consumption, use the get snapshot repository API. - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -2152,13 +2160,13 @@ async def segments( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns low-level information about the Lucene segments in index shards. For - data streams, the API returns information about the backing indices. IMPORTANT: - cat APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the index segments API. + Get segment information. Get low-level information about the Lucene segments + in index shards. For data streams, the API returns information about the backing + indices. IMPORTANT: cat APIs are only intended for human consumption using the + command line or Kibana console. They are not intended for use by applications. + For application consumption, use the index segments API. - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2244,12 +2252,12 @@ async def shards( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about the shards in a cluster. For data streams, the API - returns information about the backing indices. IMPORTANT: cat APIs are only intended - for human consumption using the command line or Kibana console. They are not - intended for use by applications. + Get shard information. Get information about the shards in a cluster. For data + streams, the API returns information about the backing indices. IMPORTANT: cat + APIs are only intended for human consumption using the command line or Kibana + console. They are not intended for use by applications. - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2330,13 +2338,13 @@ async def snapshots( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about the snapshots stored in one or more repositories. A - snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: - cat APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the get snapshot API. + Get snapshot information. Get information about the snapshots stored in one or + more repositories. A snapshot is a backup of an index or running Elasticsearch + cluster. IMPORTANT: cat APIs are only intended for human consumption using the + command line or Kibana console. They are not intended for use by applications. + For application consumption, use the get snapshot API. - ``_ + ``_ :param repository: A comma-separated list of snapshot repositories used to limit the request. Accepts wildcard expressions. `_all` returns all repositories. @@ -2422,12 +2430,12 @@ async def tasks( wait_for_completion: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about tasks currently executing in the cluster. IMPORTANT: - cat APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the task management API. + Get task information. Get information about tasks currently running in the cluster. + IMPORTANT: cat APIs are only intended for human consumption using the command + line or Kibana console. They are not intended for use by applications. For application + consumption, use the task management API. - ``_ + ``_ :param actions: The task action names, which are used to limit the response. :param detailed: If `true`, the response includes detailed information about @@ -2513,13 +2521,13 @@ async def templates( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about index templates in a cluster. You can use index templates - to apply index settings and field mappings to new indices at creation. IMPORTANT: - cat APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the get index template API. + Get index template information. Get information about the index templates in + a cluster. You can use index templates to apply index settings and field mappings + to new indices at creation. IMPORTANT: cat APIs are only intended for human consumption + using the command line or Kibana console. They are not intended for use by applications. + For application consumption, use the get index template API. - ``_ + ``_ :param name: The name of the template to return. Accepts wildcard expressions. If omitted, all templates are returned. @@ -2599,13 +2607,13 @@ async def thread_pool( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns thread pool statistics for each node in a cluster. Returned information - includes all built-in thread pools and custom thread pools. IMPORTANT: cat APIs - are only intended for human consumption using the command line or Kibana console. - They are not intended for use by applications. For application consumption, use - the nodes info API. + Get thread pool statistics. Get thread pool statistics for each node in a cluster. + Returned information includes all built-in thread pools and custom thread pools. + IMPORTANT: cat APIs are only intended for human consumption using the command + line or Kibana console. They are not intended for use by applications. For application + consumption, use the nodes info API. - ``_ + ``_ :param thread_pool_patterns: A comma-separated list of thread pool names used to limit the request. Accepts wildcard expressions. @@ -2853,12 +2861,12 @@ async def transforms( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get transforms. Returns configuration and usage information about transforms. + Get transform information. Get configuration and usage information about transforms. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get transform statistics API. - ``_ + ``_ :param transform_id: A transform identifier or a wildcard expression. If you do not specify one of these options, the API returns information for all diff --git a/elasticsearch/_async/client/ccr.py b/elasticsearch/_async/client/ccr.py index b7d24b26d..5dc4ae038 100644 --- a/elasticsearch/_async/client/ccr.py +++ b/elasticsearch/_async/client/ccr.py @@ -40,7 +40,7 @@ async def delete_auto_follow_pattern( Delete auto-follow patterns. Delete a collection of cross-cluster replication auto-follow patterns. - ``_ + ``_ :param name: The name of the auto follow pattern. :param master_timeout: Period to wait for a connection to the master node. @@ -122,7 +122,7 @@ async def follow( cross-cluster replication starts replicating operations from the leader index to the follower index. - ``_ + ``_ :param index: The name of the follower index. :param leader_index: The name of the index in the leader cluster to follow. @@ -249,7 +249,7 @@ async def follow_info( index names, replication options, and whether the follower indices are active or paused. - ``_ + ``_ :param index: A comma-separated list of index patterns; use `_all` to perform the operation on all indices @@ -296,7 +296,7 @@ async def follow_stats( shard-level stats about the "following tasks" associated with each shard for the specified indices. - ``_ + ``_ :param index: A comma-separated list of index patterns; use `_all` to perform the operation on all indices @@ -370,7 +370,7 @@ async def forget_follower( API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked. - ``_ + ``_ :param index: the name of the leader index for which specified follower retention leases should be removed @@ -431,7 +431,7 @@ async def get_auto_follow_pattern( """ Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. - ``_ + ``_ :param name: Specifies the auto-follow pattern collection that you want to retrieve. If you do not specify a name, the API returns information for all collections. @@ -486,7 +486,7 @@ async def pause_auto_follow_pattern( patterns. Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim. - ``_ + ``_ :param name: The name of the auto follow pattern that should pause discovering new indices to follow. @@ -534,7 +534,7 @@ async def pause_follow( resume following with the resume follower API. You can pause and resume a follower index to change the configuration of the following task. - ``_ + ``_ :param index: The name of the follower index that should pause following its leader index. @@ -620,7 +620,7 @@ async def put_auto_follow_pattern( that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns. - ``_ + ``_ :param name: The name of the collection of auto-follow patterns. :param remote_cluster: The remote cluster containing the leader indices to match @@ -752,7 +752,7 @@ async def resume_auto_follow_pattern( Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim. - ``_ + ``_ :param name: The name of the auto follow pattern to resume discovering new indices to follow. @@ -825,7 +825,7 @@ async def resume_follow( to failures during following tasks. When this API returns, the follower index will resume fetching operations from the leader index. - ``_ + ``_ :param index: The name of the follow index to resume following. :param master_timeout: Period to wait for a connection to the master node. @@ -913,7 +913,7 @@ async def stats( Get cross-cluster replication stats. This API returns stats about auto-following and the same shard-level stats as the get follower stats API. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. :param timeout: Period to wait for a response. If no response is received before @@ -964,7 +964,7 @@ async def unfollow( regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. - ``_ + ``_ :param index: The name of the follower index that should be turned into a regular index. diff --git a/elasticsearch/_async/client/cluster.py b/elasticsearch/_async/client/cluster.py index 7722dd7fc..4fb033e73 100644 --- a/elasticsearch/_async/client/cluster.py +++ b/elasticsearch/_async/client/cluster.py @@ -53,7 +53,7 @@ async def allocation_explain( or why a shard continues to remain on its current node when you might expect otherwise. - ``_ + ``_ :param current_node: Specifies the node ID or the name of the node to only explain a shard that is currently located on the specified node. @@ -126,7 +126,7 @@ async def delete_component_template( Delete component templates. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. - ``_ + ``_ :param name: Comma-separated list or wildcard expression of component template names used to limit the request. @@ -178,7 +178,7 @@ async def delete_voting_config_exclusions( Clear cluster voting config exclusions. Remove master-eligible nodes from the voting configuration exclusion list. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. :param wait_for_removal: Specifies whether to wait for all excluded nodes to @@ -229,7 +229,7 @@ async def exists_component_template( Check component templates. Returns information about whether a particular component template exists. - ``_ + ``_ :param name: Comma-separated list of component template names used to limit the request. Wildcard (*) expressions are supported. @@ -284,7 +284,7 @@ async def get_component_template( """ Get component templates. Get information about component templates. - ``_ + ``_ :param name: Comma-separated list of component template names used to limit the request. Wildcard (`*`) expressions are supported. @@ -348,7 +348,7 @@ async def get_settings( Get cluster-wide settings. By default, it returns only settings that have been explicitly defined. - ``_ + ``_ :param flat_settings: If `true`, returns settings in flat format. :param include_defaults: If `true`, returns default cluster settings from the @@ -439,7 +439,7 @@ async def health( high watermark health level. The cluster status is controlled by the worst index status. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target @@ -543,7 +543,7 @@ async def info( """ Get cluster info. Returns basic information about the cluster. - ``_ + ``_ :param target: Limits the information returned to the specific target. Supports a comma-separated list, such as http,ingest. @@ -592,7 +592,7 @@ async def pending_tasks( index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. - ``_ + ``_ :param local: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. @@ -667,7 +667,7 @@ async def post_voting_config_exclusions( master-ineligible nodes or when removing fewer than half of the master-eligible nodes. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. :param node_ids: A comma-separated list of the persistent ids of the nodes to @@ -746,7 +746,7 @@ async def put_component_template( template to a data stream or index. To be applied, a component template must be included in an index template's `composed_of` list. - ``_ + ``_ :param name: Name of the component template to create. Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; @@ -854,7 +854,7 @@ async def put_settings( settings can clear unexpectedly, resulting in a potentially undesired cluster configuration. - ``_ + ``_ :param flat_settings: Return settings in flat format (default: false) :param master_timeout: Explicit operation timeout for connection to master node @@ -910,7 +910,7 @@ async def remote_info( This API returns connection and endpoint information keyed by the configured remote cluster alias. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_remote/info" @@ -973,7 +973,7 @@ async def reroute( API with the `?retry_failed` URI query parameter, which will attempt a single retry round for these shards. - ``_ + ``_ :param commands: Defines the commands to perform. :param dry_run: If true, then the request simulates the operation. It will calculate @@ -1081,7 +1081,7 @@ async def state( external monitoring tools. Instead, obtain the information you require using other more stable cluster APIs. - ``_ + ``_ :param metric: Limit the information returned to the specified metrics :param index: A comma-separated list of index names; use `_all` or empty string @@ -1167,7 +1167,7 @@ async def stats( usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). - ``_ + ``_ :param node_id: Comma-separated list of node filters used to limit returned information. Defaults to all nodes in the cluster. diff --git a/elasticsearch/_async/client/connector.py b/elasticsearch/_async/client/connector.py index 9cf131642..e83cbaa53 100644 --- a/elasticsearch/_async/client/connector.py +++ b/elasticsearch/_async/client/connector.py @@ -46,7 +46,7 @@ async def check_in( Check in a connector. Update the `last_seen` field in the connector and set it to the current timestamp. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be checked in """ @@ -91,7 +91,7 @@ async def delete( ingest pipelines, or data indices associated with the connector. These need to be removed manually. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be deleted :param delete_sync_jobs: A flag indicating if associated sync jobs should be @@ -136,7 +136,7 @@ async def get( """ Get a connector. Get the details about a connector. - ``_ + ``_ :param connector_id: The unique identifier of the connector """ @@ -232,7 +232,7 @@ async def last_sync( Update the connector last sync stats. Update the fields related to the last sync of a connector. This action is used for analytics and monitoring. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param last_access_control_sync_error: @@ -327,7 +327,7 @@ async def list( """ Get all connectors. Get information about all connectors. - ``_ + ``_ :param connector_name: A comma-separated list of connector names to fetch connector documents for @@ -406,7 +406,7 @@ async def post( a managed service on Elastic Cloud. Self-managed connectors (Connector clients) are self-managed on your infrastructure. - ``_ + ``_ :param description: :param index_name: @@ -485,7 +485,7 @@ async def put( """ Create or update a connector. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. @@ -558,7 +558,7 @@ async def sync_job_cancel( connector service is then responsible for setting the status of connector sync jobs to cancelled. - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job """ @@ -607,7 +607,7 @@ async def sync_job_check_in( on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job to be checked in. @@ -665,7 +665,7 @@ async def sync_job_claim( service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job. :param worker_hostname: The host name of the current system that will run the @@ -723,7 +723,7 @@ async def sync_job_delete( Delete a connector sync job. Remove a connector sync job and its associated data. This is a destructive action that is not recoverable. - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job to be deleted @@ -774,7 +774,7 @@ async def sync_job_error( you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. - ``_ + ``_ :param connector_sync_job_id: The unique identifier for the connector sync job. :param error: The error for the connector sync job error field. @@ -825,7 +825,7 @@ async def sync_job_get( """ Get a connector sync job. - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job """ @@ -895,7 +895,7 @@ async def sync_job_list( Get all connector sync jobs. Get information about all stored connector sync jobs listed by their creation date in ascending order. - ``_ + ``_ :param connector_id: A connector id to fetch connector sync jobs for :param from_: Starting offset (default: 0) @@ -958,7 +958,7 @@ async def sync_job_post( Create a connector sync job. Create a connector sync job document in the internal index and initialize its counters and timestamps with default values. - ``_ + ``_ :param id: The id of the associated connector :param job_type: @@ -1031,7 +1031,7 @@ async def sync_job_update_stats( service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job. :param deleted_document_count: The number of documents the sync job deleted. @@ -1111,7 +1111,7 @@ async def update_active_filtering( Activate the connector draft filter. Activates the valid draft filtering for a connector. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated """ @@ -1161,7 +1161,7 @@ async def update_api_key_id( secret ID is required only for Elastic managed (native) connectors. Self-managed connectors (connector clients) do not use this field. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param api_key_id: @@ -1217,7 +1217,7 @@ async def update_configuration( Update the connector configuration. Update the configuration field in the connector document. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param configuration: @@ -1274,7 +1274,7 @@ async def update_error( to error. Otherwise, if the error is reset to null, the connector status is updated to connected. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param error: @@ -1334,7 +1334,7 @@ async def update_features( on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated. :param features: @@ -1392,7 +1392,7 @@ async def update_filtering( is activated once validated by the running Elastic connector service. The filtering property is used to configure sync rules (both basic and advanced) for a connector. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param advanced_snippet: @@ -1450,7 +1450,7 @@ async def update_filtering_validation( Update the connector draft filtering validation. Update the draft filtering validation info for a connector. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param validation: @@ -1504,7 +1504,7 @@ async def update_index_name( Update the connector index name. Update the `index_name` field of a connector, specifying the index where the data ingested by the connector is stored. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param index_name: @@ -1558,7 +1558,7 @@ async def update_name( """ Update the connector name and description. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param description: @@ -1612,7 +1612,7 @@ async def update_native( """ Update the connector is_native flag. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param is_native: @@ -1666,7 +1666,7 @@ async def update_pipeline( Update the connector pipeline. When you create a new connector, the configuration of an ingest pipeline is populated with default settings. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param pipeline: @@ -1719,7 +1719,7 @@ async def update_scheduling( """ Update the connector scheduling. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param scheduling: @@ -1772,7 +1772,7 @@ async def update_service_type( """ Update the connector service type. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param service_type: @@ -1832,7 +1832,7 @@ async def update_status( """ Update the connector status. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param status: diff --git a/elasticsearch/_async/client/dangling_indices.py b/elasticsearch/_async/client/dangling_indices.py index 59f5e3267..4f0fe7c82 100644 --- a/elasticsearch/_async/client/dangling_indices.py +++ b/elasticsearch/_async/client/dangling_indices.py @@ -44,7 +44,7 @@ async def delete_dangling_index( For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. - ``_ + ``_ :param index_uuid: The UUID of the index to delete. Use the get dangling indices API to find the UUID. @@ -103,7 +103,7 @@ async def import_dangling_index( For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. - ``_ + ``_ :param index_uuid: The UUID of the index to import. Use the get dangling indices API to locate the UUID. @@ -162,7 +162,7 @@ async def list_dangling_indices( indices while an Elasticsearch node is offline. Use this API to list dangling indices, which you can then import or delete. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_dangling" diff --git a/elasticsearch/_async/client/enrich.py b/elasticsearch/_async/client/enrich.py index f34e874c8..73ba3c227 100644 --- a/elasticsearch/_async/client/enrich.py +++ b/elasticsearch/_async/client/enrich.py @@ -39,7 +39,7 @@ async def delete_policy( """ Delete an enrich policy. Deletes an existing enrich policy and its enrich index. - ``_ + ``_ :param name: Enrich policy to delete. :param master_timeout: Period to wait for a connection to the master node. @@ -84,7 +84,7 @@ async def execute_policy( """ Run an enrich policy. Create the enrich index for an existing enrich policy. - ``_ + ``_ :param name: Enrich policy to execute. :param master_timeout: Period to wait for a connection to the master node. @@ -132,7 +132,7 @@ async def get_policy( """ Get an enrich policy. Returns information about an enrich policy. - ``_ + ``_ :param name: Comma-separated list of enrich policy names used to limit the request. To return information for all enrich policies, omit this parameter. @@ -186,7 +186,7 @@ async def put_policy( """ Create an enrich policy. Creates an enrich policy. - ``_ + ``_ :param name: Name of the enrich policy to create or update. :param geo_match: Matches enrich data to incoming documents based on a `geo_shape` @@ -244,7 +244,7 @@ async def stats( Get enrich stats. Returns enrich coordinator statistics and information about enrich policies that are currently executing. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. """ diff --git a/elasticsearch/_async/client/eql.py b/elasticsearch/_async/client/eql.py index 47af75be0..17c896e12 100644 --- a/elasticsearch/_async/client/eql.py +++ b/elasticsearch/_async/client/eql.py @@ -39,7 +39,7 @@ async def delete( Delete an async EQL search. Delete an async EQL search or a stored synchronous EQL search. The API also deletes results for the search. - ``_ + ``_ :param id: Identifier for the search to delete. A search ID is provided in the EQL search API's response for an async search. A search ID is also provided @@ -86,7 +86,7 @@ async def get( Get async EQL search results. Get the current status and available results for an async EQL search or a stored synchronous EQL search. - ``_ + ``_ :param id: Identifier for the search. :param keep_alive: Period for which the search and its results are stored on @@ -137,7 +137,7 @@ async def get_status( Get the async EQL status. Get the current status for an async EQL search or a stored synchronous EQL search without returning results. - ``_ + ``_ :param id: Identifier for the search. """ @@ -233,7 +233,7 @@ async def search( query. EQL assumes each document in a data stream or index corresponds to an event. - ``_ + ``_ :param index: The name of the index to scope the operation :param query: EQL query you wish to run. diff --git a/elasticsearch/_async/client/esql.py b/elasticsearch/_async/client/esql.py index 764f96658..7da230d9d 100644 --- a/elasticsearch/_async/client/esql.py +++ b/elasticsearch/_async/client/esql.py @@ -78,7 +78,7 @@ async def async_query( The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties. - ``_ + ``_ :param query: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. @@ -189,7 +189,7 @@ async def async_query_delete( authenticated user that submitted the original query request * Users with the `cancel_task` cluster privilege - ``_ + ``_ :param id: The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the @@ -240,7 +240,7 @@ async def async_query_get( features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API. - ``_ + ``_ :param id: The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the @@ -334,7 +334,7 @@ async def query( Run an ES|QL query. Get search results for an ES|QL (Elasticsearch query language) query. - ``_ + ``_ :param query: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. diff --git a/elasticsearch/_async/client/features.py b/elasticsearch/_async/client/features.py index f1d79ec34..d3391f777 100644 --- a/elasticsearch/_async/client/features.py +++ b/elasticsearch/_async/client/features.py @@ -48,7 +48,7 @@ async def get_features( this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. """ @@ -102,7 +102,7 @@ async def reset_features( on the master node if you have any doubts about which plugins are installed on individual nodes. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. """ diff --git a/elasticsearch/_async/client/fleet.py b/elasticsearch/_async/client/fleet.py index eb05f0352..ba6637b1d 100644 --- a/elasticsearch/_async/client/fleet.py +++ b/elasticsearch/_async/client/fleet.py @@ -49,7 +49,7 @@ async def global_checkpoints( Returns the current global checkpoints for an index. This API is design for internal use by the fleet server project. - ``_ + ``_ :param index: A single index or index alias that resolves to a single index. :param checkpoints: A comma separated list of previous global checkpoints. When diff --git a/elasticsearch/_async/client/graph.py b/elasticsearch/_async/client/graph.py index df8f3fdbe..e713aa26b 100644 --- a/elasticsearch/_async/client/graph.py +++ b/elasticsearch/_async/client/graph.py @@ -54,7 +54,7 @@ async def explore( from one more vertices of interest. You can exclude vertices that have already been returned. - ``_ + ``_ :param index: Name of the index. :param connections: Specifies or more fields from which you want to extract terms diff --git a/elasticsearch/_async/client/ilm.py b/elasticsearch/_async/client/ilm.py index 53c1d959f..f4adf9473 100644 --- a/elasticsearch/_async/client/ilm.py +++ b/elasticsearch/_async/client/ilm.py @@ -42,7 +42,7 @@ async def delete_lifecycle( If the policy is being used to manage any indices, the request fails and returns an error. - ``_ + ``_ :param name: Identifier for the policy. :param master_timeout: Period to wait for a connection to the master node. If @@ -98,7 +98,7 @@ async def explain_lifecycle( lifecycle state, provides the definition of the running phase, and information about any failures. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to target. Supports wildcards (`*`). To target all data streams and indices, use `*` @@ -156,7 +156,7 @@ async def get_lifecycle( """ Get lifecycle policies. - ``_ + ``_ :param name: Identifier for the policy. :param master_timeout: Period to wait for a connection to the master node. If @@ -207,7 +207,7 @@ async def get_status( """ Get the ILM status. Get the current index lifecycle management status. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ilm/status" @@ -259,7 +259,7 @@ async def migrate_to_data_tiers( stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`. - ``_ + ``_ :param dry_run: If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. This provides @@ -333,7 +333,7 @@ async def move_to_step( specified in the ILM policy are considered valid. An index cannot move to a step that is not part of its policy. - ``_ + ``_ :param index: The name of the index whose lifecycle step is to change :param current_step: The step that the index is expected to be in. @@ -398,7 +398,7 @@ async def put_lifecycle( and the policy version is incremented. NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions. - ``_ + ``_ :param name: Identifier for the policy. :param master_timeout: Period to wait for a connection to the master node. If @@ -458,7 +458,7 @@ async def remove_policy( Remove policies from an index. Remove the assigned lifecycle policies from an index or a data stream's backing indices. It also stops managing the indices. - ``_ + ``_ :param index: The name of the index to remove policy on """ @@ -501,7 +501,7 @@ async def retry( and runs the step. Use the explain lifecycle state API to determine whether an index is in the ERROR step. - ``_ + ``_ :param index: The name of the indices (comma-separated) whose failed lifecycle step is to be retry @@ -545,7 +545,7 @@ async def start( stopped. ILM is started automatically when the cluster is formed. Restarting ILM is necessary only when it has been stopped using the stop ILM API. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -597,7 +597,7 @@ async def stop( might continue to run until in-progress operations complete and the plugin can be safely stopped. Use the get ILM status API to check whether ILM is running. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index 1b0cb6332..0725a88e5 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -58,7 +58,7 @@ async def add_block( Add an index block. Limits the operations allowed on an index by blocking specific operation types. - ``_ + ``_ :param index: A comma separated list of indices to add a block to :param block: The block to add (one of read, write, read_only or metadata) @@ -150,7 +150,7 @@ async def analyze( of tokens gets generated, an error occurs. The `_analyze` endpoint without a specified index will always use `10000` as its limit. - ``_ + ``_ :param index: Index used to derive the analyzer. If specified, the `analyzer` or field parameter overrides this value. If no index is specified or the @@ -255,7 +255,7 @@ async def clear_cache( `query`, or `request` parameters. To clear the cache only of specific fields, use the `fields` parameter. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -377,7 +377,7 @@ async def clone( a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well. - ``_ + ``_ :param index: Name of the source index to clone. :param target: Name of the target index to create. @@ -482,7 +482,7 @@ async def close( can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. - ``_ + ``_ :param index: Comma-separated list or wildcard expression of index names used to limit the request. @@ -582,7 +582,7 @@ async def create( setting `index.write.wait_for_active_shards`. Note that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations. - ``_ + ``_ :param index: Name of the index you wish to create. :param aliases: Aliases for the index. @@ -656,7 +656,7 @@ async def create_data_stream( Create a data stream. Creates a data stream. You must have a matching index template with data stream enabled. - ``_ + ``_ :param name: Name of the data stream, which must meet the following criteria: Lowercase only; Cannot include `\\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, @@ -717,7 +717,7 @@ async def data_streams_stats( """ Get data stream stats. Retrieves statistics for one or more data streams. - ``_ + ``_ :param name: Comma-separated list of data streams used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams in a @@ -782,7 +782,7 @@ async def delete( delete the index, you must roll over the data stream so a new write index is created. You can then use the delete index API to delete the previous write index. - ``_ + ``_ :param index: Comma-separated list of indices to delete. You cannot specify index aliases. By default, this parameter does not support wildcards (`*`) or `_all`. @@ -852,7 +852,7 @@ async def delete_alias( """ Delete an alias. Removes a data stream or index from an alias. - ``_ + ``_ :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). @@ -917,7 +917,7 @@ async def delete_data_lifecycle( Delete data stream lifecycles. Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle. - ``_ + ``_ :param name: A comma-separated list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams @@ -977,7 +977,7 @@ async def delete_data_stream( """ Delete data streams. Deletes one or more data streams and their backing indices. - ``_ + ``_ :param name: Comma-separated list of data streams to delete. Wildcard (`*`) expressions are supported. @@ -1032,7 +1032,7 @@ async def delete_index_template( then there is no wildcard support and the provided names should match completely with existing templates. - ``_ + ``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. @@ -1084,7 +1084,7 @@ async def delete_template( """ Delete a legacy index template. - ``_ + ``_ :param name: The name of the legacy index template to delete. Wildcard (`*`) expressions are supported. @@ -1156,7 +1156,7 @@ async def disk_usage( The stored size of the `_id` field is likely underestimated while the `_source` field is overestimated. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. It’s recommended to execute this API with a single @@ -1237,7 +1237,7 @@ async def downsample( are supported. Neither field nor document level security can be defined on the source index. The source index must be read only (`index.blocks.write: true`). - ``_ + ``_ :param index: Name of the time series index to downsample. :param target_index: Name of the index to create. @@ -1305,7 +1305,7 @@ async def exists( """ Check indices. Check if one or more indices, index aliases, or data streams exist. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases. Supports wildcards (`*`). @@ -1383,7 +1383,7 @@ async def exists_alias( """ Check aliases. Checks if one or more data stream or index aliases exist. - ``_ + ``_ :param name: Comma-separated list of aliases to check. Supports wildcards (`*`). :param index: Comma-separated list of data streams or indices used to limit the @@ -1453,7 +1453,7 @@ async def exists_index_template( """ Check index templates. Check whether index templates exist. - ``_ + ``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. @@ -1506,7 +1506,7 @@ async def exists_template( templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. - ``_ + ``_ :param name: A comma-separated list of index template names used to limit the request. Wildcard (`*`) expressions are supported. @@ -1563,7 +1563,7 @@ async def explain_data_lifecycle( creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. - ``_ + ``_ :param index: The name of the index to explain :param include_defaults: indicates if the API should return the default values @@ -1631,7 +1631,7 @@ async def field_usage_stats( in the index. A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times. - ``_ + ``_ :param index: Comma-separated list or wildcard expression of index names used to limit the request. @@ -1725,7 +1725,7 @@ async def flush( documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to flush. Supports wildcards (`*`). To flush all data streams and indices, omit this @@ -1850,7 +1850,7 @@ async def forcemerge( searches. For example: ``` POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 ``` - ``_ + ``_ :param index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices @@ -1944,7 +1944,7 @@ async def get( Get index information. Get information about one or more indices. For data streams, the API returns information about the stream’s backing indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (*) are supported. @@ -2033,7 +2033,7 @@ async def get_alias( """ Get aliases. Retrieves information for one or more data stream or index aliases. - ``_ + ``_ :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, @@ -2116,7 +2116,7 @@ async def get_data_lifecycle( Get data stream lifecycles. Retrieves the data stream lifecycle configuration of one or more data streams. - ``_ + ``_ :param name: Comma-separated list of data streams to limit the request. Supports wildcards (`*`). To target all data streams, omit this parameter or use `*` @@ -2171,7 +2171,7 @@ async def get_data_lifecycle_stats( Get data stream lifecycle stats. Get statistics about the data streams that are managed by a data stream lifecycle. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_lifecycle/stats" @@ -2218,7 +2218,7 @@ async def get_data_stream( """ Get data streams. Retrieves information about one or more data streams. - ``_ + ``_ :param name: Comma-separated list of data stream names used to limit the request. Wildcard (`*`) expressions are supported. If omitted, all data streams are @@ -2296,7 +2296,7 @@ async def get_field_mapping( This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields. - ``_ + ``_ :param fields: Comma-separated list or wildcard expression of fields used to limit returned information. Supports wildcards (`*`). @@ -2373,7 +2373,7 @@ async def get_index_template( """ Get index templates. Get information about one or more index templates. - ``_ + ``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. @@ -2447,7 +2447,7 @@ async def get_mapping( Get mapping definitions. For data streams, the API retrieves mappings for the stream’s backing indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2532,7 +2532,7 @@ async def get_settings( Get index settings. Get setting information for one or more indices. For data streams, it returns setting information for the stream's backing indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2621,7 +2621,7 @@ async def get_template( This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. - ``_ + ``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (`*`) expressions are supported. To return all index templates, @@ -2687,7 +2687,7 @@ async def migrate_to_data_stream( with the same name. The indices for the alias become hidden backing indices for the stream. The write index for the alias becomes the write index for the stream. - ``_ + ``_ :param name: Name of the index alias to convert to a data stream. :param master_timeout: Period to wait for a connection to the master node. If @@ -2740,7 +2740,7 @@ async def modify_data_stream( Update data streams. Performs one or more data stream modification actions in a single atomic operation. - ``_ + ``_ :param actions: Actions to perform. """ @@ -2820,7 +2820,7 @@ async def open( setting on index creation applies to the `_open` and `_close` index actions as well. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). By default, you must explicitly @@ -2906,7 +2906,7 @@ async def promote_data_stream( a matching index template is created. This will affect the lifecycle management of the data stream and interfere with the data stream size and retention. - ``_ + ``_ :param name: The name of the data stream :param master_timeout: Period to wait for a connection to the master node. If @@ -2968,7 +2968,7 @@ async def put_alias( """ Create or update an alias. Adds a data stream or index to an alias. - ``_ + ``_ :param index: Comma-separated list of data streams or indices to add. Supports wildcards (`*`). Wildcard patterns that match both data streams and indices @@ -3070,7 +3070,7 @@ async def put_data_lifecycle( Update data stream lifecycles. Update the data stream lifecycle of the specified data streams. - ``_ + ``_ :param name: Comma-separated list of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. @@ -3189,7 +3189,7 @@ async def put_index_template( default new `dynamic_templates` entries are appended onto the end. If an entry already exists with the same key, then it is overwritten by the new definition. - ``_ + ``_ :param name: Index or template name :param allow_auto_create: This setting overrides the value of the `action.auto_create_index` @@ -3373,7 +3373,7 @@ async def put_mapping( invalidate data already indexed under the old field name. Instead, add an alias field to create an alternate field name. - ``_ + ``_ :param index: A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. @@ -3516,7 +3516,7 @@ async def put_settings( existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. - ``_ + ``_ :param settings: :param index: Comma-separated list of data streams, indices, and aliases used @@ -3637,7 +3637,7 @@ async def put_template( and higher orders overriding them. NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order. - ``_ + ``_ :param name: The name of the template :param aliases: Aliases for the index. @@ -3738,7 +3738,7 @@ async def recovery( onto a different node then the information about the original recovery will not be shown in the recovery API. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -3812,7 +3812,7 @@ async def refresh( query parameter option. This option ensures the indexing operation waits for a periodic refresh before running the search. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -3896,7 +3896,7 @@ async def reload_search_analyzers( a shard replica--before using this API. This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future. - ``_ + ``_ :param index: A comma-separated list of index names to reload analyzers for :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves @@ -3991,7 +3991,7 @@ async def resolve_cluster( errors will be shown.) * A remote cluster is an older version that does not support the feature you want to use in your search. - ``_ + ``_ :param name: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified @@ -4065,7 +4065,7 @@ async def resolve_index( Resolve indices. Resolve the names and/or index patterns for indices, aliases, and data streams. Multiple patterns and remote clusters are supported. - ``_ + ``_ :param name: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified @@ -4164,7 +4164,7 @@ async def rollover( If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. - ``_ + ``_ :param alias: Name of the data stream or index alias to roll over. :param new_index: Name of the index to create. Supports date math. Data streams @@ -4271,7 +4271,7 @@ async def segments( shards. For data streams, the API returns information about the stream's backing indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -4357,7 +4357,7 @@ async def shard_stores( information only for primary shards that are unassigned or have one or more unassigned replica shards. - ``_ + ``_ :param index: List of data streams, indices, and aliases used to limit the request. :param allow_no_indices: If false, the request returns an error if any wildcard @@ -4460,7 +4460,7 @@ async def shrink( must have sufficient free disk space to accommodate a second copy of the existing index. - ``_ + ``_ :param index: Name of the source index to shrink. :param target: Name of the target index to create. @@ -4536,7 +4536,7 @@ async def simulate_index_template( Simulate an index. Get the index configuration that would be applied to the specified index from an existing index template. - ``_ + ``_ :param name: Name of the index to simulate :param include_defaults: If true, returns all relevant default configurations @@ -4614,7 +4614,7 @@ async def simulate_template( Simulate an index template. Get the index configuration that would be applied by a particular index template. - ``_ + ``_ :param name: Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit this parameter and specify the template @@ -4769,7 +4769,7 @@ async def split( in the source index. * The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index. - ``_ + ``_ :param index: Name of the source index to split. :param target: Name of the target index to create. @@ -4868,7 +4868,7 @@ async def stats( cleared. Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed. - ``_ + ``_ :param index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices @@ -4972,7 +4972,7 @@ async def unfreeze( Unfreeze an index. When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again. - ``_ + ``_ :param index: Identifier for the index. :param allow_no_indices: If `false`, the request returns an error if any wildcard @@ -5046,7 +5046,7 @@ async def update_aliases( """ Create or update an alias. Adds a data stream or index to an alias. - ``_ + ``_ :param actions: Actions to perform. :param master_timeout: Period to wait for a connection to the master node. If @@ -5121,7 +5121,7 @@ async def validate_query( """ Validate a query. Validates a query without running it. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index 47bba65d2..0b124e281 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -46,7 +46,7 @@ async def delete( """ Delete an inference endpoint - ``_ + ``_ :param inference_id: The inference Id :param task_type: The task type @@ -111,7 +111,7 @@ async def get( """ Get an inference endpoint - ``_ + ``_ :param task_type: The task type :param inference_id: The inference Id @@ -174,7 +174,7 @@ async def inference( """ Perform inference on the service - ``_ + ``_ :param inference_id: The inference Id :param input: Inference input. Either a string or an array of strings. @@ -271,7 +271,7 @@ async def put( to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. - ``_ + ``_ :param inference_id: The inference Id :param inference_config: @@ -350,7 +350,7 @@ async def update( or if you want to use non-NLP models, use the machine learning trained model APIs. - ``_ + ``_ :param inference_id: The unique identifier of the inference endpoint. :param inference_config: diff --git a/elasticsearch/_async/client/ingest.py b/elasticsearch/_async/client/ingest.py index 92fbd8c93..c8dc21c50 100644 --- a/elasticsearch/_async/client/ingest.py +++ b/elasticsearch/_async/client/ingest.py @@ -41,7 +41,7 @@ async def delete_geoip_database( Delete GeoIP database configurations. Delete one or more IP geolocation database configurations. - ``_ + ``_ :param id: A comma-separated list of geoip database configurations to delete :param master_timeout: Period to wait for a connection to the master node. If @@ -92,7 +92,7 @@ async def delete_ip_location_database( """ Delete IP geolocation database configurations. - ``_ + ``_ :param id: A comma-separated list of IP location database configurations. :param master_timeout: The period to wait for a connection to the master node. @@ -145,7 +145,7 @@ async def delete_pipeline( """ Delete pipelines. Delete one or more ingest pipelines. - ``_ + ``_ :param id: Pipeline ID or wildcard expression of pipeline IDs used to limit the request. To delete all ingest pipelines in a cluster, use a value of `*`. @@ -195,7 +195,7 @@ async def geo_ip_stats( Get GeoIP statistics. Get download statistics for GeoIP2 databases that are used with the GeoIP processor. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ingest/geoip/stats" @@ -232,7 +232,7 @@ async def get_geoip_database( Get GeoIP database configurations. Get information about one or more IP geolocation database configurations. - ``_ + ``_ :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit @@ -278,7 +278,7 @@ async def get_ip_location_database( """ Get IP geolocation database configurations. - ``_ + ``_ :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit @@ -332,7 +332,7 @@ async def get_pipeline( Get pipelines. Get information about one or more ingest pipelines. This API returns a local reference of the pipeline. - ``_ + ``_ :param id: Comma-separated list of pipeline IDs to retrieve. Wildcard (`*`) expressions are supported. To get all ingest pipelines, omit this parameter or use `*`. @@ -386,7 +386,7 @@ async def processor_grok( as the grok pattern you expect will match. A grok pattern is like a regular expression that supports aliased expressions that can be reused. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ingest/processor/grok" @@ -430,7 +430,7 @@ async def put_geoip_database( Create or update a GeoIP database configuration. Refer to the create or update IP geolocation database configuration API. - ``_ + ``_ :param id: ID of the database configuration to create or update. :param maxmind: The configuration necessary to identify which IP geolocation @@ -502,7 +502,7 @@ async def put_ip_location_database( """ Create or update an IP geolocation database configuration. - ``_ + ``_ :param id: The database configuration identifier. :param configuration: @@ -584,7 +584,7 @@ async def put_pipeline( """ Create or update a pipeline. Changes made using this API take effect immediately. - ``_ + ``_ :param id: ID of the ingest pipeline to create or update. :param deprecated: Marks this ingest pipeline as deprecated. When a deprecated @@ -678,7 +678,7 @@ async def simulate( You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request. - ``_ + ``_ :param docs: Sample documents to test in the pipeline. :param id: Pipeline to test. If you don’t specify a `pipeline` in the request diff --git a/elasticsearch/_async/client/license.py b/elasticsearch/_async/client/license.py index 41eeb0aa9..e5ffac7a5 100644 --- a/elasticsearch/_async/client/license.py +++ b/elasticsearch/_async/client/license.py @@ -41,7 +41,7 @@ async def delete( to Basic. If the operator privileges feature is enabled, only operator users can use this API. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. :param timeout: Period to wait for a response. If no response is received before @@ -90,7 +90,7 @@ async def get( Not Found` response. If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. - ``_ + ``_ :param accept_enterprise: If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum @@ -136,7 +136,7 @@ async def get_basic_status( """ Get the basic license status. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_license/basic_status" @@ -171,7 +171,7 @@ async def get_trial_status( """ Get the trial status. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_license/trial_status" @@ -221,7 +221,7 @@ async def post( TLS on the transport networking layer before you install the license. If the operator privileges feature is enabled, only operator users can use this API. - ``_ + ``_ :param acknowledge: Specifies whether you acknowledge the license changes. :param license: @@ -290,7 +290,7 @@ async def post_start_basic( parameter set to `true`. To check the status of your basic license, use the get basic license API. - ``_ + ``_ :param acknowledge: whether the user has acknowledged acknowledge messages (default: false) @@ -345,7 +345,7 @@ async def post_start_trial( however, request an extended trial at https://www.elastic.co/trialextension. To check the status of your trial, use the get trial status API. - ``_ + ``_ :param acknowledge: whether the user has acknowledged acknowledge messages (default: false) diff --git a/elasticsearch/_async/client/logstash.py b/elasticsearch/_async/client/logstash.py index a98c85368..308588e32 100644 --- a/elasticsearch/_async/client/logstash.py +++ b/elasticsearch/_async/client/logstash.py @@ -40,7 +40,7 @@ async def delete_pipeline( Management. If the request succeeds, you receive an empty response with an appropriate status code. - ``_ + ``_ :param id: An identifier for the pipeline. """ @@ -80,7 +80,7 @@ async def get_pipeline( """ Get Logstash pipelines. Get pipelines that are used for Logstash Central Management. - ``_ + ``_ :param id: A comma-separated list of pipeline identifiers. """ @@ -128,7 +128,7 @@ async def put_pipeline( Create or update a Logstash pipeline. Create a pipeline that is used for Logstash Central Management. If the specified pipeline exists, it is replaced. - ``_ + ``_ :param id: An identifier for the pipeline. :param pipeline: diff --git a/elasticsearch/_async/client/migration.py b/elasticsearch/_async/client/migration.py index c1eef1d4e..4bf4cead0 100644 --- a/elasticsearch/_async/client/migration.py +++ b/elasticsearch/_async/client/migration.py @@ -41,7 +41,7 @@ async def deprecations( in the next major version. TIP: This APIs is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. - ``_ + ``_ :param index: Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. @@ -88,7 +88,7 @@ async def get_feature_upgrade_status( in progress. TIP: This API is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_migration/system_features" @@ -127,7 +127,7 @@ async def post_feature_upgrade( unavailable during the migration process. TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_migration/system_features" diff --git a/elasticsearch/_async/client/ml.py b/elasticsearch/_async/client/ml.py index e4b2ec65e..813fe128f 100644 --- a/elasticsearch/_async/client/ml.py +++ b/elasticsearch/_async/client/ml.py @@ -42,7 +42,7 @@ async def clear_trained_model_deployment_cache( may be cached on that individual node. Calling this API clears the caches without restarting the deployment. - ``_ + ``_ :param model_id: The unique identifier of the trained model. """ @@ -102,7 +102,7 @@ async def close_job( force parameters as the close job request. When a datafeed that has a specified end date stops, it automatically closes its associated job. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection @@ -164,7 +164,7 @@ async def delete_calendar( Delete a calendar. Removes all scheduled events from a calendar, then deletes it. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. """ @@ -205,7 +205,7 @@ async def delete_calendar_event( """ Delete events from a calendar. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param event_id: Identifier for the scheduled event. You can obtain this identifier @@ -253,7 +253,7 @@ async def delete_calendar_job( """ Delete anomaly jobs from a calendar. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param job_id: An identifier for the anomaly detection jobs. It can be a job @@ -302,7 +302,7 @@ async def delete_data_frame_analytics( """ Delete a data frame analytics job. - ``_ + ``_ :param id: Identifier for the data frame analytics job. :param force: If `true`, it deletes a job that is not stopped; this method is @@ -350,7 +350,7 @@ async def delete_datafeed( """ Delete a datafeed. - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -408,7 +408,7 @@ async def delete_expired_data( expired data for all anomaly detection jobs by using _all, by specifying * as the , or by omitting the . - ``_ + ``_ :param job_id: Identifier for an anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. @@ -469,7 +469,7 @@ async def delete_filter( delete the filter. You must update or delete the job before you can delete the filter. - ``_ + ``_ :param filter_id: A string that uniquely identifies a filter. """ @@ -515,7 +515,7 @@ async def delete_forecast( in the forecast jobs API. The delete forecast API enables you to delete one or more forecasts before they expire. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param forecast_id: A comma-separated list of forecast identifiers. If you do @@ -587,7 +587,7 @@ async def delete_job( delete datafeed API with the same timeout and force parameters as the delete job request. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param delete_user_annotations: Specifies whether annotations that have been @@ -643,7 +643,7 @@ async def delete_model_snapshot( that snapshot, first revert to a different one. To identify the active model snapshot, refer to the `model_snapshot_id` in the results from the get jobs API. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: Identifier for the model snapshot. @@ -692,7 +692,7 @@ async def delete_trained_model( Delete an unreferenced trained model. The request deletes a trained inference model that is not referenced by an ingest pipeline. - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param force: Forcefully deletes a trained model that is referenced by ingest @@ -743,7 +743,7 @@ async def delete_trained_model_alias( to a trained model. If the model alias is missing or refers to a model other than the one identified by the `model_id`, this API returns an error. - ``_ + ``_ :param model_id: The trained model ID to which the model alias refers. :param model_alias: The model alias to delete. @@ -800,7 +800,7 @@ async def estimate_model_memory( an anomaly detection job model. It is based on analysis configuration details for the job and cardinality estimates for the fields it references. - ``_ + ``_ :param analysis_config: For a list of the properties that you can specify in the `analysis_config` component of the body of this API. @@ -868,7 +868,7 @@ async def evaluate_data_frame( for use on indexes created by data frame analytics. Evaluation requires both a ground truth field and an analytics result field to be present. - ``_ + ``_ :param evaluation: Defines the type of evaluation you want to perform. :param index: Defines the `index` in which the evaluation will be performed. @@ -948,7 +948,7 @@ async def explain_data_frame_analytics( setting later on. If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -1055,7 +1055,7 @@ async def flush_job( and persists the model state to disk and the job must be opened again before analyzing further data. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param advance_time: Refer to the description for the `advance_time` query parameter. @@ -1126,7 +1126,7 @@ async def forecast( for a job that has an `over_field_name` in its configuration. Forcasts predict future behavior based on historical data. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. The job must be open when you create a forecast; otherwise, an error occurs. @@ -1209,7 +1209,7 @@ async def get_buckets( Get anomaly detection job results for buckets. The API presents a chronological view of the records, grouped by bucket. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param timestamp: The timestamp of a single bucket result. If you do not specify @@ -1304,7 +1304,7 @@ async def get_calendar_events( """ Get info about events in calendars. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids @@ -1370,7 +1370,7 @@ async def get_calendars( """ Get calendar configuration info. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids @@ -1443,7 +1443,7 @@ async def get_categories( """ Get anomaly detection job results for categories. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param category_id: Identifier for the category, which is unique in the job. @@ -1527,7 +1527,7 @@ async def get_data_frame_analytics( multiple data frame analytics jobs in a single API request by using a comma-separated list of data frame analytics jobs or a wildcard expression. - ``_ + ``_ :param id: Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame @@ -1599,7 +1599,7 @@ async def get_data_frame_analytics_stats( """ Get data frame analytics jobs usage info. - ``_ + ``_ :param id: Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame @@ -1669,7 +1669,7 @@ async def get_datafeed_stats( the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds. - ``_ + ``_ :param datafeed_id: Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the @@ -1729,7 +1729,7 @@ async def get_datafeeds( `*` as the ``, or by omitting the ``. This API returns a maximum of 10,000 datafeeds. - ``_ + ``_ :param datafeed_id: Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the @@ -1792,7 +1792,7 @@ async def get_filters( """ Get filters. You can get a single filter or all filters. - ``_ + ``_ :param filter_id: A string that uniquely identifies a filter. :param from_: Skips the specified number of filters. @@ -1856,7 +1856,7 @@ async def get_influencers( that have contributed to, or are to blame for, the anomalies. Influencer results are available only if an `influencer_field_name` is specified in the job configuration. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param desc: If true, the results are sorted in descending order. @@ -1937,7 +1937,7 @@ async def get_job_stats( """ Get anomaly detection jobs usage info. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. If @@ -1998,7 +1998,7 @@ async def get_jobs( detection jobs by using `_all`, by specifying `*` as the ``, or by omitting the ``. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. If you do not specify one of these @@ -2061,7 +2061,7 @@ async def get_memory_stats( jobs and trained models are using memory, on each node, both within the JVM heap, and natively, outside of the JVM. - ``_ + ``_ :param node_id: The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or `ml:true` @@ -2116,7 +2116,7 @@ async def get_model_snapshot_upgrade_stats( """ Get anomaly detection job model snapshot upgrade usage info. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: A numerical character string that uniquely identifies the @@ -2187,7 +2187,7 @@ async def get_model_snapshots( """ Get model snapshots info. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: A numerical character string that uniquely identifies the @@ -2300,7 +2300,7 @@ async def get_overall_buckets( its default), the `overall_score` is the maximum `overall_score` of the overall buckets that have a span equal to the jobs' largest bucket span. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, a comma-separated list of jobs or groups, or a wildcard expression. @@ -2405,7 +2405,7 @@ async def get_records( found in each bucket, which relates to the number of time series being modeled and the number of detectors. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param desc: Refer to the description for the `desc` query parameter. @@ -2501,7 +2501,7 @@ async def get_trained_models( """ Get trained model configuration info. - ``_ + ``_ :param model_id: The unique identifier of the trained model or a model alias. You can get information for multiple trained models in a single API request @@ -2589,7 +2589,7 @@ async def get_trained_models_stats( models in a single API request by using a comma-separated list of model IDs or a wildcard expression. - ``_ + ``_ :param model_id: The unique identifier of the trained model or a model alias. It can be a comma-separated list or a wildcard expression. @@ -2652,7 +2652,7 @@ async def infer_trained_model( """ Evaluate a trained model. - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param docs: An array of objects to pass to the model for inference. The objects @@ -2714,7 +2714,7 @@ async def info( what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ml/info" @@ -2759,7 +2759,7 @@ async def open_job( job is ready to resume its analysis from where it left off, once new data is received. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param timeout: Refer to the description for the `timeout` query parameter. @@ -2813,7 +2813,7 @@ async def post_calendar_events( """ Add scheduled events to the calendar. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param events: A list of one of more scheduled events. The event’s start and @@ -2871,7 +2871,7 @@ async def post_data( data can be accepted from only a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. @@ -2935,7 +2935,7 @@ async def preview_data_frame_analytics( Preview features used by data frame analytics. Previews the extracted features used by a data frame analytics config. - ``_ + ``_ :param id: Identifier for the data frame analytics job. :param config: A data frame analytics config as described in create data frame @@ -3005,7 +3005,7 @@ async def preview_datafeed( that accurately reflects the behavior of the datafeed, use the appropriate credentials. You can also use secondary authorization headers to supply the credentials. - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -3081,7 +3081,7 @@ async def put_calendar( """ Create a calendar. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param description: A description of the calendar. @@ -3135,7 +3135,7 @@ async def put_calendar_job( """ Add anomaly detection job to calendar. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param job_id: An identifier for the anomaly detection jobs. It can be a job @@ -3216,7 +3216,7 @@ async def put_data_frame_analytics( parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters. - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -3400,7 +3400,7 @@ async def put_datafeed( directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -3559,7 +3559,7 @@ async def put_filter( more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. - ``_ + ``_ :param filter_id: A string that uniquely identifies a filter. :param description: A description of the filter. @@ -3658,7 +3658,7 @@ async def put_job( have read index privileges on the source index. If you include a `datafeed_config` but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. - ``_ + ``_ :param job_id: The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and @@ -3863,7 +3863,7 @@ async def put_trained_model( Create a trained model. Enable you to supply a trained model that is not created by data frame analytics. - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param compressed_definition: The compressed (GZipped and Base64 encoded) inference @@ -3977,7 +3977,7 @@ async def put_trained_model_alias( common between the old and new trained models for the model alias, the API returns a warning. - ``_ + ``_ :param model_id: The identifier for the trained model that the alias refers to. :param model_alias: The alias to create or update. This value cannot end in numbers. @@ -4035,7 +4035,7 @@ async def put_trained_model_definition_part( """ Create part of a trained model definition. - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param part: The definition part number. When the definition is loaded for inference @@ -4114,7 +4114,7 @@ async def put_trained_model_vocabulary( processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param vocabulary: The model vocabulary, which must not be empty. @@ -4172,7 +4172,7 @@ async def reset_job( job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list. - ``_ + ``_ :param job_id: The ID of the job to reset. :param delete_user_annotations: Specifies whether annotations that have been @@ -4232,7 +4232,7 @@ async def revert_model_snapshot( For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: You can specify `empty` as the . Reverting to @@ -4302,7 +4302,7 @@ async def set_upgrade_mode( the current value for the upgrade_mode setting by using the get machine learning info API. - ``_ + ``_ :param enabled: When `true`, it enables `upgrade_mode` which temporarily halts all job and datafeed tasks and prohibits new job and datafeed tasks from @@ -4357,7 +4357,7 @@ async def start_data_frame_analytics( exists, it is used as is. You can therefore set up the destination index in advance with custom settings and mappings. - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -4419,7 +4419,7 @@ async def start_datafeed( headers when you created or updated the datafeed, those credentials are used instead. - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -4489,7 +4489,7 @@ async def start_trained_model_deployment( Start a trained model deployment. It allocates the model to every machine learning node. - ``_ + ``_ :param model_id: The unique identifier of the trained model. Currently, only PyTorch models are supported. @@ -4573,7 +4573,7 @@ async def stop_data_frame_analytics( Stop data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -4639,7 +4639,7 @@ async def stop_datafeed( Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. - ``_ + ``_ :param datafeed_id: Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a @@ -4701,7 +4701,7 @@ async def stop_trained_model_deployment( """ Stop a trained model deployment. - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param allow_no_match: Specifies what to do when the request: contains wildcard @@ -4766,7 +4766,7 @@ async def update_data_frame_analytics( """ Update a data frame analytics job. - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -4878,7 +4878,7 @@ async def update_datafeed( query using those same roles. If you provide secondary authorization headers, those credentials are used instead. - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -5042,7 +5042,7 @@ async def update_filter( Update a filter. Updates the description of a filter, adds items, or removes items from the list. - ``_ + ``_ :param filter_id: A string that uniquely identifies a filter. :param add_items: The items to add to the filter. @@ -5133,7 +5133,7 @@ async def update_job( Update an anomaly detection job. Updates certain properties of an anomaly detection job. - ``_ + ``_ :param job_id: Identifier for the job. :param allow_lazy_open: Advanced configuration option. Specifies whether this @@ -5261,7 +5261,7 @@ async def update_model_snapshot( """ Update a snapshot. Updates certain properties of a snapshot. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: Identifier for the model snapshot. @@ -5322,7 +5322,7 @@ async def update_trained_model_deployment( """ Update a trained model deployment. - ``_ + ``_ :param model_id: The unique identifier of the trained model. Currently, only PyTorch models are supported. @@ -5388,7 +5388,7 @@ async def upgrade_job_snapshot( a time and the upgraded snapshot cannot be the current snapshot of the anomaly detection job. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: A numerical character string that uniquely identifies the @@ -5464,7 +5464,7 @@ async def validate( """ Validate an anomaly detection job. - ``_ + ``_ :param analysis_config: :param analysis_limits: @@ -5534,7 +5534,7 @@ async def validate_detector( """ Validate an anomaly detection job. - ``_ + ``_ :param detector: """ diff --git a/elasticsearch/_async/client/monitoring.py b/elasticsearch/_async/client/monitoring.py index 8c2d962fd..2439d73d7 100644 --- a/elasticsearch/_async/client/monitoring.py +++ b/elasticsearch/_async/client/monitoring.py @@ -45,7 +45,7 @@ async def bulk( Send monitoring data. This API is used by the monitoring features to send monitoring data. - ``_ + ``_ :param interval: Collection interval (e.g., '10s' or '10000ms') of the payload :param operations: diff --git a/elasticsearch/_async/client/nodes.py b/elasticsearch/_async/client/nodes.py index 02fce0788..9dfdb9b67 100644 --- a/elasticsearch/_async/client/nodes.py +++ b/elasticsearch/_async/client/nodes.py @@ -47,7 +47,7 @@ async def clear_repositories_metering_archive( Clear the archived repositories metering. Clear the archived repositories metering information in the cluster. - ``_ + ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. @@ -100,7 +100,7 @@ async def get_repositories_metering_info( over a period of time. Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts. - ``_ + ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). @@ -154,7 +154,7 @@ async def hot_threads( node in the cluster. The output is plain text with a breakdown of the top hot threads for each node. - ``_ + ``_ :param node_id: List of node IDs or names used to limit returned information. :param ignore_idle_threads: If true, known idle threads (e.g. waiting in a socket @@ -224,7 +224,7 @@ async def info( Get node information. By default, the API returns all attributes and core settings for cluster nodes. - ``_ + ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. @@ -299,7 +299,7 @@ async def reload_secure_settings( by locally accessing the API and passing the node-specific Elasticsearch keystore password. - ``_ + ``_ :param node_id: The names of particular nodes in the cluster to target. :param secure_settings_password: The password for the Elasticsearch keystore. @@ -370,7 +370,7 @@ async def stats( Get node statistics. Get statistics for nodes in a cluster. By default, all stats are returned. You can limit the returned information by using metrics. - ``_ + ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. @@ -482,7 +482,7 @@ async def usage( """ Get feature usage information. - ``_ + ``_ :param node_id: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting diff --git a/elasticsearch/_async/client/query_rules.py b/elasticsearch/_async/client/query_rules.py index b98a1d762..3380840c5 100644 --- a/elasticsearch/_async/client/query_rules.py +++ b/elasticsearch/_async/client/query_rules.py @@ -41,7 +41,7 @@ async def delete_rule( action that is only recoverable by re-adding the same rule with the create or update query rule API. - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset containing the rule to delete @@ -90,7 +90,7 @@ async def delete_ruleset( Delete a query ruleset. Remove a query ruleset and its associated data. This is a destructive action that is not recoverable. - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset to delete """ @@ -131,7 +131,7 @@ async def get_rule( """ Get a query rule. Get details about a query rule within a query ruleset. - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset containing the rule to retrieve @@ -179,7 +179,7 @@ async def get_ruleset( """ Get a query ruleset. Get details about a query ruleset. - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset """ @@ -222,7 +222,7 @@ async def list_rulesets( """ Get all query rulesets. Get summarized information about the query rulesets. - ``_ + ``_ :param from_: The offset from the first result to fetch. :param size: The maximum number of results to retrieve. @@ -281,7 +281,7 @@ async def put_rule( than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset containing the rule to be created or updated. @@ -366,7 +366,7 @@ async def put_ruleset( rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset to be created or updated. @@ -420,7 +420,7 @@ async def test( Test a query ruleset. Evaluate match criteria against a query ruleset to identify the rules that would match that criteria. - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset to be created or updated diff --git a/elasticsearch/_async/client/rollup.py b/elasticsearch/_async/client/rollup.py index 8fe54394f..fcf4dda78 100644 --- a/elasticsearch/_async/client/rollup.py +++ b/elasticsearch/_async/client/rollup.py @@ -58,7 +58,7 @@ async def delete_job( index. For example: ``` POST my_rollup_index/_delete_by_query { "query": { "term": { "_rollup.id": "the_rollup_job_id" } } } ``` - ``_ + ``_ :param id: Identifier for the job. """ @@ -103,7 +103,7 @@ async def get_jobs( any details about it. For details about a historical rollup job, the rollup capabilities API may be more useful. - ``_ + ``_ :param id: Identifier for the rollup job. If it is `_all` or omitted, the API returns all rollup jobs. @@ -156,7 +156,7 @@ async def get_rollup_caps( the first question, what fields were rolled up, what aggregations can be performed, and where does the data live? - ``_ + ``_ :param id: Index, indices or index-pattern to return rollup capabilities for. `_all` may be used to fetch rollup capabilities from all jobs. @@ -206,7 +206,7 @@ async def get_rollup_index_caps( via a pattern)? * What target indices were rolled up, what fields were used in those rollups, and what aggregations can be performed on each job? - ``_ + ``_ :param index: Data stream or index to check for rollup capabilities. Wildcard (`*`) expressions are supported. @@ -278,7 +278,7 @@ async def put_job( and what metrics to collect for each group. Jobs are created in a `STOPPED` state. You can start them with the start rollup jobs API. - ``_ + ``_ :param id: Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the data that is associated with the rollup job. @@ -413,7 +413,7 @@ async def rollup_search( During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used. - ``_ + ``_ :param index: A comma-separated list of data streams and indices used to limit the request. This parameter has the following rules: * At least one data @@ -487,7 +487,7 @@ async def start_job( Start rollup jobs. If you try to start a job that does not exist, an exception occurs. If you try to start a job that is already started, nothing happens. - ``_ + ``_ :param id: Identifier for the rollup job. """ @@ -537,7 +537,7 @@ async def stop_job( moved to STOPPED or the specified time has elapsed. If the specified time elapses without the job moving to STOPPED, a timeout exception occurs. - ``_ + ``_ :param id: Identifier for the rollup job. :param timeout: If `wait_for_completion` is `true`, the API blocks for (at maximum) diff --git a/elasticsearch/_async/client/search_application.py b/elasticsearch/_async/client/search_application.py index 7ef00087f..72e1ca23e 100644 --- a/elasticsearch/_async/client/search_application.py +++ b/elasticsearch/_async/client/search_application.py @@ -46,7 +46,7 @@ async def delete( Delete a search application. Remove a search application and its associated alias. Indices attached to the search application are not removed. - ``_ + ``_ :param name: The name of the search application to delete """ @@ -88,7 +88,7 @@ async def delete_behavioral_analytics( Delete a behavioral analytics collection. The associated data stream is also deleted. - ``_ + ``_ :param name: The name of the analytics collection to be deleted """ @@ -129,7 +129,7 @@ async def get( """ Get search application details. - ``_ + ``_ :param name: The name of the search application """ @@ -170,7 +170,7 @@ async def get_behavioral_analytics( """ Get behavioral analytics collections. - ``_ + ``_ :param name: A list of analytics collections to limit the returned information """ @@ -218,7 +218,7 @@ async def list( """ Get search applications. Get information about search applications. - ``_ + ``_ :param from_: Starting offset. :param q: Query in the Lucene query string syntax. @@ -271,7 +271,7 @@ async def post_behavioral_analytics_event( """ Create a behavioral analytics collection event. - ``_ + ``_ :param collection_name: The name of the behavioral analytics collection. :param event_type: The analytics event type. @@ -335,7 +335,7 @@ async def put( """ Create or update a search application. - ``_ + ``_ :param name: The name of the search application to be created or updated. :param search_application: @@ -389,7 +389,7 @@ async def put_behavioral_analytics( """ Create a behavioral analytics collection. - ``_ + ``_ :param name: The name of the analytics collection to be created or updated. """ @@ -441,7 +441,7 @@ async def render_query( generated and run by calling the search application search API. You must have `read` privileges on the backing alias of the search application. - ``_ + ``_ :param name: The name of the search application to render teh query for. :param params: @@ -503,7 +503,7 @@ async def search( the search application or default template. Unspecified template parameters are assigned their default values if applicable. - ``_ + ``_ :param name: The name of the search application to be searched. :param params: Query parameters specific to this request, which will override diff --git a/elasticsearch/_async/client/searchable_snapshots.py b/elasticsearch/_async/client/searchable_snapshots.py index 7985c936b..ac3975751 100644 --- a/elasticsearch/_async/client/searchable_snapshots.py +++ b/elasticsearch/_async/client/searchable_snapshots.py @@ -47,7 +47,7 @@ async def cache_stats( Get cache statistics. Get statistics about the shared cache for partially mounted indices. - ``_ + ``_ :param node_id: The names of the nodes in the cluster to target. :param master_timeout: @@ -105,7 +105,7 @@ async def clear_cache( Clear the cache. Clear indices and data streams from the shared cache for partially mounted indices. - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases to clear from the cache. It supports wildcards (`*`). @@ -180,7 +180,7 @@ async def mount( this API for snapshots managed by index lifecycle management (ILM). Manually mounting ILM-managed snapshots can interfere with ILM processes. - ``_ + ``_ :param repository: The name of the repository containing the snapshot of the index to mount. @@ -265,7 +265,7 @@ async def stats( """ Get searchable snapshot statistics. - ``_ + ``_ :param index: A comma-separated list of data streams and indices to retrieve statistics for. diff --git a/elasticsearch/_async/client/security.py b/elasticsearch/_async/client/security.py index c87daecfd..5be7612a3 100644 --- a/elasticsearch/_async/client/security.py +++ b/elasticsearch/_async/client/security.py @@ -60,7 +60,7 @@ async def activate_user_profile( the document if it was disabled. Any updates do not change existing content for either the `labels` or `data` fields. - ``_ + ``_ :param grant_type: The type of grant. :param access_token: The user's Elasticsearch access token or JWT. Both `access` @@ -124,7 +124,7 @@ async def authenticate( and information about the realms that authenticated and authorized the user. If the user cannot be authenticated, this API returns a 401 status code. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/_authenticate" @@ -168,7 +168,7 @@ async def bulk_delete_role( manage roles, rather than using file-based role management. The bulk delete roles API cannot delete roles that are defined in roles files. - ``_ + ``_ :param names: An array of role names to delete :param refresh: If `true` (the default) then refresh the affected shards to make @@ -226,7 +226,7 @@ async def bulk_put_role( way to manage roles, rather than using file-based role management. The bulk create or update roles API cannot update roles that are defined in roles files. - ``_ + ``_ :param roles: A dictionary of role name to RoleDescriptor objects to add or update :param refresh: If `true` (the default) then refresh the affected shards to make @@ -298,7 +298,7 @@ async def bulk_update_api_keys( the requested changes and did not require an update, and error details for any failed update. - ``_ + ``_ :param ids: The API key identifiers. :param expiration: Expiration time for the API keys. By default, API keys never @@ -373,7 +373,7 @@ async def change_password( Change passwords. Change the passwords of users in the native realm and built-in users. - ``_ + ``_ :param username: The user whose password you want to change. If you do not specify this parameter, the password is changed for the current user. @@ -436,7 +436,7 @@ async def clear_api_key_cache( Clear the API key cache. Evict a subset of all entries from the API key cache. The cache is also automatically cleared on state changes of the security index. - ``_ + ``_ :param ids: Comma-separated list of API key IDs to evict from the API key cache. To evict all API keys, use `*`. Does not support other wildcard patterns. @@ -479,9 +479,10 @@ async def clear_cached_privileges( cache. The cache is also automatically cleared for applications that have their privileges updated. - ``_ + ``_ - :param application: A comma-separated list of application names + :param application: A comma-separated list of applications. To clear all applications, + use an asterism (`*`). It does not support other wildcard patterns. """ if application in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'application'") @@ -519,12 +520,19 @@ async def clear_cached_realms( ) -> ObjectApiResponse[t.Any]: """ Clear the user cache. Evict users from the user cache. You can completely clear - the cache or evict specific users. + the cache or evict specific users. User credentials are cached in memory on each + node to avoid connecting to a remote authentication service or hitting the disk + for every incoming request. There are realm settings that you can use to configure + the user cache. For more information, refer to the documentation about controlling + the user cache. - ``_ + ``_ - :param realms: Comma-separated list of realms to clear - :param usernames: Comma-separated list of usernames to clear from the cache + :param realms: A comma-separated list of realms. To clear all realms, use an + asterisk (`*`). It does not support other wildcard patterns. + :param usernames: A comma-separated list of the users to clear from the cache. + If you do not specify this parameter, the API evicts all users from the user + cache. """ if realms in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'realms'") @@ -564,9 +572,11 @@ async def clear_cached_roles( """ Clear the roles cache. Evict roles from the native role cache. - ``_ + ``_ - :param name: Role name + :param name: A comma-separated list of roles to evict from the role cache. To + evict all roles, use an asterisk (`*`). It does not support other wildcard + patterns. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -605,13 +615,20 @@ async def clear_cached_service_tokens( ) -> ObjectApiResponse[t.Any]: """ Clear service account token caches. Evict a subset of all entries from the service - account token caches. + account token caches. Two separate caches exist for service account tokens: one + cache for tokens backed by the `service_tokens` file, and another for tokens + backed by the `.security` index. This API clears matching entries from both caches. + The cache for service account tokens backed by the `.security` index is cleared + automatically on state changes of the security index. The cache for tokens backed + by the `service_tokens` file is cleared automatically on file changes. - ``_ + ``_ - :param namespace: An identifier for the namespace - :param service: An identifier for the service name - :param name: A comma-separated list of service token names + :param namespace: The namespace, which is a top-level grouping of service accounts. + :param service: The name of the service, which must be unique within its namespace. + :param name: A comma-separated list of token names to evict from the service + account token caches. Use a wildcard (`*`) to evict all tokens that belong + to a service account. It does not support other wildcard patterns. """ if namespace in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'namespace'") @@ -665,30 +682,40 @@ async def create_api_key( ) -> ObjectApiResponse[t.Any]: """ Create an API key. Create an API key for access without requiring basic authentication. - A successful request returns a JSON structure that contains the API key, its - unique id, and its name. If applicable, it also returns expiration information - for the API key in milliseconds. NOTE: By default, API keys never expire. You - can specify expiration information when you create the API keys. + IMPORTANT: If the credential that is used to authenticate this request is an + API key, the derived API key cannot have any privileges. If you specify privileges, + the API returns an error. A successful request returns a JSON structure that + contains the API key, its unique id, and its name. If applicable, it also returns + expiration information for the API key in milliseconds. NOTE: By default, API + keys never expire. You can specify expiration information when you create the + API keys. The API keys are created by the Elasticsearch API key service, which + is automatically enabled. To configure or turn off the API key service, refer + to API key service setting documentation. + + ``_ - ``_ - - :param expiration: Expiration time for the API key. By default, API keys never - expire. + :param expiration: The expiration time for the API key. By default, API keys + never expire. :param metadata: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. - :param name: Specifies the name for this API key. + :param name: A name for the API key. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - :param role_descriptors: An array of role descriptors for this API key. This - parameter is optional. When it is not specified or is an empty array, then - the API key will have a point in time snapshot of permissions of the authenticated - user. If you supply role descriptors then the resultant permissions would - be an intersection of API keys permissions and authenticated user’s permissions - thereby limiting the access scope for API keys. The structure of role descriptor - is the same as the request for create role API. For more details, see create - or update roles API. + :param role_descriptors: An array of role descriptors for this API key. When + it is not specified or it is an empty array, the API key will have a point + in time snapshot of permissions of the authenticated user. If you supply + role descriptors, the resultant permissions are an intersection of API keys + permissions and the authenticated user's permissions thereby limiting the + access scope for API keys. The structure of role descriptor is the same as + the request for the create role API. For more details, refer to the create + or update roles API. NOTE: Due to the way in which this permission intersection + is calculated, it is not possible to create an API key that is a child of + another API key, unless the derived key is created without any privileges. + In this case, you must explicitly specify a role descriptor with no privileges. + The derived API key can be used for authentication; it will not have authority + to call Elasticsearch APIs. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/api_key" @@ -757,7 +784,7 @@ async def create_cross_cluster_api_key( API key API. Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error. - ``_ + ``_ :param access: The access to be granted to this API key. The access is composed of permissions for cross-cluster search and cross-cluster replication. At @@ -825,13 +852,21 @@ async def create_service_token( ) -> ObjectApiResponse[t.Any]: """ Create a service account token. Create a service accounts token for access without - requiring basic authentication. - - ``_ - - :param namespace: An identifier for the namespace - :param service: An identifier for the service name - :param name: An identifier for the token name + requiring basic authentication. NOTE: Service account tokens never expire. You + must actively delete them if they are no longer needed. + + ``_ + + :param namespace: The name of the namespace, which is a top-level grouping of + service accounts. + :param service: The name of the service. + :param name: The name for the service account token. If omitted, a random name + will be generated. Token names must be at least one and no more than 256 + characters. They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes + (`-`), and underscores (`_`), but cannot begin with an underscore. NOTE: + Token names must be unique in the context of the associated service account. + They must also be globally unique with their fully qualified names, which + are comprised of the service account principal and token name, such as `//`. :param refresh: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. @@ -909,7 +944,7 @@ async def delegate_pki( the TLS authentication and this API translates that authentication into an Elasticsearch access token. - ``_ + ``_ :param x509_certificate_chain: The X509Certificate chain, which is represented as an ordered string array. Each string in the array is a base64-encoded @@ -963,12 +998,16 @@ async def delete_privileges( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete application privileges. + Delete application privileges. To use this API, you must have one of the following + privileges: * The `manage_security` cluster privilege (or a greater privilege + such as `all`). * The "Manage Application Privileges" global privilege for the + application being referenced in the request. - ``_ + ``_ - :param application: Application name - :param name: Privilege name + :param application: The name of the application. Application privileges are always + associated with exactly one application. + :param name: The name of the privilege. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. @@ -1019,11 +1058,14 @@ async def delete_role( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete roles. Delete roles in the native realm. + Delete roles. Delete roles in the native realm. The role management APIs are + generally the preferred way to manage roles, rather than using file-based role + management. The delete roles API cannot remove roles that are defined in roles + files. - ``_ + ``_ - :param name: Role name + :param name: The name of the role. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. @@ -1067,11 +1109,16 @@ async def delete_role_mapping( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete role mappings. + Delete role mappings. Role mappings define which roles are assigned to each user. + The role mapping APIs are generally the preferred way to manage role mappings + rather than using role mapping files. The delete role mappings API cannot remove + role mappings that are defined in role mapping files. - ``_ + ``_ - :param name: Role-mapping name + :param name: The distinct name that identifies the role mapping. The name is + used solely as an identifier to facilitate interaction via the API; it does + not affect the behavior of the mapping in any way. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. @@ -1120,11 +1167,11 @@ async def delete_service_token( Delete service account tokens. Delete service account tokens for a service in a specified namespace. - ``_ + ``_ - :param namespace: An identifier for the namespace - :param service: An identifier for the service name - :param name: An identifier for the token name + :param namespace: The namespace, which is a top-level grouping of service accounts. + :param service: The service name. + :param name: The name of the service account token. :param refresh: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. @@ -1178,9 +1225,9 @@ async def delete_user( """ Delete users. Delete users from the native realm. - ``_ + ``_ - :param username: username + :param username: An identifier for the user. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. @@ -1224,11 +1271,12 @@ async def disable_user( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Disable users. Disable users in the native realm. + Disable users. Disable users in the native realm. By default, when you create + users, they are enabled. You can use this API to revoke a user's access to Elasticsearch. - ``_ + ``_ - :param username: The username of the user to disable + :param username: An identifier for the user. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. @@ -1282,7 +1330,7 @@ async def disable_user_profile( API to disable a user profile so it’s not visible in these searches. To re-enable a disabled user profile, use the enable user profile API . - ``_ + ``_ :param uid: Unique identifier for the user profile. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make @@ -1328,11 +1376,12 @@ async def enable_user( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Enable users. Enable users in the native realm. + Enable users. Enable users in the native realm. By default, when you create users, + they are enabled. - ``_ + ``_ - :param username: The username of the user to enable + :param username: An identifier for the user. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. @@ -1385,7 +1434,7 @@ async def enable_user_profile( profile searches. If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again. - ``_ + ``_ :param uid: A unique identifier for the user profile. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make @@ -1428,9 +1477,12 @@ async def enroll_kibana( ) -> ObjectApiResponse[t.Any]: """ Enroll Kibana. Enable a Kibana instance to configure itself for communication - with a secured Elasticsearch cluster. + with a secured Elasticsearch cluster. NOTE: This API is currently intended for + internal use only by Kibana. Kibana uses this API internally to configure itself + for communications with an Elasticsearch cluster that already has security features + enabled. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/enroll/kibana" @@ -1464,9 +1516,13 @@ async def enroll_node( ) -> ObjectApiResponse[t.Any]: """ Enroll a node. Enroll a new node to allow it to join an existing cluster with - security features enabled. + security features enabled. The response contains all the necessary information + for the joining node to bootstrap discovery and security related settings so + that it can successfully join the cluster. The response contains key and certificate + material that allows the caller to generate valid signed certificates for the + HTTP layer of all nodes in the cluster. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/enroll/node" @@ -1513,7 +1569,7 @@ async def get_api_key( privileges (including `manage_security`), this API returns all API keys regardless of ownership. - ``_ + ``_ :param active_only: A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, @@ -1588,7 +1644,7 @@ async def get_builtin_privileges( Get builtin privileges. Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/privilege/_builtin" @@ -1623,12 +1679,18 @@ async def get_privileges( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get application privileges. + Get application privileges. To use this API, you must have one of the following + privileges: * The `read_security` cluster privilege (or a greater privilege such + as `manage_security` or `all`). * The "Manage Application Privileges" global + privilege for the application being referenced in the request. - ``_ + ``_ - :param application: Application name - :param name: Privilege name + :param application: The name of the application. Application privileges are always + associated with exactly one application. If you do not specify this parameter, + the API returns information about all privileges for all applications. + :param name: The name of the privilege. If you do not specify this parameter, + the API returns information about all privileges for the requested application. """ __path_parts: t.Dict[str, str] if application not in SKIP_IN_PATH and name not in SKIP_IN_PATH: @@ -1674,7 +1736,7 @@ async def get_role( the preferred way to manage roles, rather than using file-based role management. The get roles API cannot retrieve roles that are defined in roles files. - ``_ + ``_ :param name: The name of the role. You can specify multiple roles as a comma-separated list. If you do not specify this parameter, the API returns information about @@ -1722,7 +1784,7 @@ async def get_role_mapping( rather than using role mapping files. The get role mappings API cannot retrieve role mappings that are defined in role mapping files. - ``_ + ``_ :param name: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does @@ -1769,14 +1831,15 @@ async def get_service_accounts( ) -> ObjectApiResponse[t.Any]: """ Get service accounts. Get a list of service accounts that match the provided - path parameters. + path parameters. NOTE: Currently, only the `elastic/fleet-server` service account + is available. - ``_ + ``_ - :param namespace: Name of the namespace. Omit this parameter to retrieve information - about all service accounts. If you omit this parameter, you must also omit - the `service` parameter. - :param service: Name of the service name. Omit this parameter to retrieve information + :param namespace: The name of the namespace. Omit this parameter to retrieve + information about all service accounts. If you omit this parameter, you must + also omit the `service` parameter. + :param service: The service name. Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. """ __path_parts: t.Dict[str, str] @@ -1820,12 +1883,19 @@ async def get_service_credentials( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get service account credentials. + Get service account credentials. To use this API, you must have at least the + `read_security` cluster privilege (or a greater privilege such as `manage_service_account` + or `manage_security`). The response includes service account tokens that were + created with the create service account tokens API as well as file-backed tokens + from all nodes of the cluster. NOTE: For tokens backed by the `service_tokens` + file, the API collects them from all nodes of the cluster. Tokens with the same + name from different nodes are assumed to be the same token and are only counted + once towards the total number of service tokens. - ``_ + ``_ - :param namespace: Name of the namespace. - :param service: Name of the service name. + :param namespace: The name of the namespace. + :param service: The service name. """ if namespace in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'namespace'") @@ -1867,9 +1937,11 @@ async def get_settings( ) -> ObjectApiResponse[t.Any]: """ Get security index settings. Get the user-configurable settings for the security - internal index (`.security` and associated indices). + internal index (`.security` and associated indices). Only a subset of the index + settings — those that are user-configurable—will be shown. This includes: * `index.auto_expand_replicas` + * `index.number_of_replicas` - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -1932,15 +2004,39 @@ async def get_token( ) -> ObjectApiResponse[t.Any]: """ Get a token. Create a bearer token for access without requiring basic authentication. - - ``_ - - :param grant_type: - :param kerberos_ticket: - :param password: - :param refresh_token: - :param scope: - :param username: + The tokens are created by the Elasticsearch Token Service, which is automatically + enabled when you configure TLS on the HTTP interface. Alternatively, you can + explicitly enable the `xpack.security.authc.token.enabled` setting. When you + are running in production mode, a bootstrap check prevents you from enabling + the token service unless you also enable TLS on the HTTP interface. The get token + API takes the same parameters as a typical OAuth 2.0 token API except for the + use of a JSON request body. A successful get token API call returns a JSON structure + that contains the access token, the amount of time (seconds) that the token expires + in, the type, and the scope if available. The tokens returned by the get token + API have a finite period of time for which they are valid and after that time + period, they can no longer be used. That time period is defined by the `xpack.security.authc.token.timeout` + setting. If you want to invalidate a token immediately, you can do so by using + the invalidate token API. + + ``_ + + :param grant_type: The type of grant. Supported grant types are: `password`, + `_kerberos`, `client_credentials`, and `refresh_token`. + :param kerberos_ticket: The base64 encoded kerberos ticket. If you specify the + `_kerberos` grant type, this parameter is required. This parameter is not + valid with any other supported grant type. + :param password: The user's password. If you specify the `password` grant type, + this parameter is required. This parameter is not valid with any other supported + grant type. + :param refresh_token: The string that was returned when you created the token, + which enables you to extend its life. If you specify the `refresh_token` + grant type, this parameter is required. This parameter is not valid with + any other supported grant type. + :param scope: The scope of the token. Currently tokens are only issued for a + scope of FULL regardless of the value sent with the request. + :param username: The username that identifies the user. If you specify the `password` + grant type, this parameter is required. This parameter is not valid with + any other supported grant type. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/oauth2/token" @@ -1992,13 +2088,13 @@ async def get_user( """ Get users. Get information about users in the native realm and built-in users. - ``_ + ``_ :param username: An identifier for the user. You can specify multiple usernames as a comma-separated list. If you omit this parameter, the API retrieves information about all users. - :param with_profile_uid: If true will return the User Profile ID for a user, - if any. + :param with_profile_uid: Determines whether to retrieve the user profile UID, + if it exists, for the users. """ __path_parts: t.Dict[str, str] if username not in SKIP_IN_PATH: @@ -2041,9 +2137,12 @@ async def get_user_privileges( username: t.Optional[t.Union[None, str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get user privileges. + Get user privileges. Get the security privileges for the logged in user. All + users can use this API, but only to determine their own privileges. To check + the privileges of other users, you must use the run as feature. To check whether + a user has a specific list of privileges, use the has privileges API. - ``_ + ``_ :param application: The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, @@ -2097,7 +2196,7 @@ async def get_user_profile( applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. - ``_ + ``_ :param uid: A unique identifier for the user profile. :param data: A comma-separated list of filters for the `data` field of the profile @@ -2162,28 +2261,30 @@ async def grant_api_key( Grant an API key. Create an API key on behalf of another user. This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. The caller must have authentication - credentials (either an access token, or a username and password) for the user - on whose behalf the API key will be created. It is not possible to use this API - to create an API key without that user’s credentials. The user, for whom the - authentication credentials is provided, can optionally "run as" (impersonate) - another user. In this case, the API key will be created on behalf of the impersonated - user. This API is intended be used by applications that need to create and manage - API keys for end users, but cannot guarantee that those users have permission - to create API keys on their own behalf. A successful grant API key API call returns - a JSON structure that contains the API key, its unique id, and its name. If applicable, + credentials for the user on whose behalf the API key will be created. It is not + possible to use this API to create an API key without that user's credentials. + The supported user authentication credential types are: * username and password + * Elasticsearch access tokens * JWTs The user, for whom the authentication credentials + is provided, can optionally "run as" (impersonate) another user. In this case, + the API key will be created on behalf of the impersonated user. This API is intended + be used by applications that need to create and manage API keys for end users, + but cannot guarantee that those users have permission to create API keys on their + own behalf. The API keys are created by the Elasticsearch API key service, which + is automatically enabled. A successful grant API key API call returns a JSON + structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. By default, API keys never expire. You can specify expiration information when you create the API keys. - ``_ + ``_ - :param api_key: Defines the API key. + :param api_key: The API key. :param grant_type: The type of grant. Supported grant types are: `access_token`, `password`. - :param access_token: The user’s access token. If you specify the `access_token` + :param access_token: The user's access token. If you specify the `access_token` grant type, this parameter is required. It is not valid with other grant types. - :param password: The user’s password. If you specify the `password` grant type, + :param password: The user's password. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. :param run_as: The name of the user to be impersonated. :param username: The user name that identifies the user. If you specify the `password` @@ -2315,9 +2416,10 @@ async def has_privileges( ) -> ObjectApiResponse[t.Any]: """ Check user privileges. Determine whether the specified user has a specified list - of privileges. + of privileges. All users can use this API, but only to determine their own privileges. + To check the privileges of other users, you must use the run as feature. - ``_ + ``_ :param user: Username :param application: @@ -2381,7 +2483,7 @@ async def has_privileges_user_profile( applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. - ``_ + ``_ :param privileges: An object containing all the privileges to be checked. :param uids: A list of profile IDs. The privileges are checked for associated @@ -2442,29 +2544,33 @@ async def invalidate_api_key( key or grant API key APIs. Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically - deleted. The `manage_api_key` privilege allows deleting any API keys. The `manage_own_api_key` - only allows deleting API keys that are owned by the user. In addition, with the - `manage_own_api_key` privilege, an invalidation request must be issued in one - of the three formats: - Set the parameter `owner=true`. - Or, set both `username` - and `realm_name` to match the user’s identity. - Or, if the request is issued - by an API key, that is to say an API key invalidates itself, specify its ID in - the `ids` field. - - ``_ + deleted. To use this API, you must have at least the `manage_security`, `manage_api_key`, + or `manage_own_api_key` cluster privileges. The `manage_security` privilege allows + deleting any API key, including both REST and cross cluster API keys. The `manage_api_key` + privilege allows deleting any REST API key, but not cross cluster API keys. The + `manage_own_api_key` only allows deleting REST API keys that are owned by the + user. In addition, with the `manage_own_api_key` privilege, an invalidation request + must be issued in one of the three formats: - Set the parameter `owner=true`. + - Or, set both `username` and `realm_name` to match the user's identity. - Or, + if the request is issued by an API key, that is to say an API key invalidates + itself, specify its ID in the `ids` field. + + ``_ :param id: :param ids: A list of API key ids. This parameter cannot be used with any of `name`, `realm_name`, or `username`. :param name: An API key name. This parameter cannot be used with any of `ids`, `realm_name` or `username`. - :param owner: Can be used to query API keys owned by the currently authenticated - user. The `realm_name` or `username` parameters cannot be specified when - this parameter is set to `true` as they are assumed to be the currently authenticated - ones. + :param owner: Query API keys owned by the currently authenticated user. The `realm_name` + or `username` parameters cannot be specified when this parameter is set to + `true` as they are assumed to be the currently authenticated ones. NOTE: + At least one of `ids`, `name`, `username`, and `realm_name` must be specified + if `owner` is `false`. :param realm_name: The name of an authentication realm. This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. :param username: The username of a user. This parameter cannot be used with either - `ids` or `name`, or when `owner` flag is set to `true`. + `ids` or `name` or when `owner` flag is set to `true`. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/api_key" @@ -2524,14 +2630,21 @@ async def invalidate_token( longer be used. The time period is defined by the `xpack.security.authc.token.timeout` setting. The refresh tokens returned by the get token API are only valid for 24 hours. They can also be used exactly once. If you want to invalidate one or - more access or refresh tokens immediately, use this invalidate token API. + more access or refresh tokens immediately, use this invalidate token API. NOTE: + While all parameters are optional, at least one of them is required. More specifically, + either one of `token` or `refresh_token` parameters is required. If none of these + two are specified, then `realm_name` and/or `username` need to be specified. - ``_ + ``_ - :param realm_name: - :param refresh_token: - :param token: - :param username: + :param realm_name: The name of an authentication realm. This parameter cannot + be used with either `refresh_token` or `token`. + :param refresh_token: A refresh token. This parameter cannot be used if any of + `refresh_token`, `realm_name`, or `username` are used. + :param token: An access token. This parameter cannot be used if any of `refresh_token`, + `realm_name`, or `username` are used. + :param username: The username of a user. This parameter cannot be used with either + `refresh_token` or `token`. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/oauth2/token" @@ -2589,7 +2702,7 @@ async def oidc_authenticate( are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. - ``_ + ``_ :param nonce: Associate a client session with an ID token and mitigate replay attacks. This value needs to be the same as the one that was provided to @@ -2670,7 +2783,7 @@ async def oidc_logout( Connect based authentication, but can also be used by other, custom web applications or other clients. - ``_ + ``_ :param access_token: The access token to be invalidated. :param refresh_token: The refresh token to be invalidated. @@ -2733,7 +2846,7 @@ async def oidc_prepare_authentication( Connect based authentication, but can also be used by other, custom web applications or other clients. - ``_ + ``_ :param iss: In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request @@ -2808,9 +2921,22 @@ async def put_privileges( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update application privileges. - - ``_ + Create or update application privileges. To use this API, you must have one of + the following privileges: * The `manage_security` cluster privilege (or a greater + privilege such as `all`). * The "Manage Application Privileges" global privilege + for the application being referenced in the request. Application names are formed + from a prefix, with an optional suffix that conform to the following rules: * + The prefix must begin with a lowercase ASCII letter. * The prefix must contain + only ASCII letters or digits. * The prefix must be at least 3 characters long. + * If the suffix exists, it must begin with either a dash `-` or `_`. * The suffix + cannot contain any of the following characters: `\\`, `/`, `*`, `?`, `"`, `<`, + `>`, `|`, `,`, `*`. * No part of the name can contain whitespace. Privilege names + must begin with a lowercase ASCII letter and must contain only ASCII letters + and digits along with the characters `_`, `-`, and `.`. Action names can contain + any number of printable ASCII characters and must contain at least one of the + following characters: `/`, `*`, `:`. + + ``_ :param privileges: :param refresh: If `true` (the default) then refresh the affected shards to make @@ -2959,7 +3085,7 @@ async def put_role( The create or update roles API cannot update roles that are defined in roles files. File-based role management is not available in Elastic Serverless. - ``_ + ``_ :param name: The name of the role. :param applications: A list of application privilege entries. @@ -2976,7 +3102,10 @@ async def put_role( this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. :param remote_cluster: A list of remote cluster permissions entries. - :param remote_indices: A list of remote indices permissions entries. + :param remote_indices: A list of remote indices permissions entries. NOTE: Remote + indices are effective for remote clusters configured with the API key based + model. They have no effect for remote clusters configured with the certificate + based model. :param run_as: A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will @@ -3071,21 +3200,45 @@ async def put_role_mapping( that are granted to those users. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role - mapping files. This API does not create roles. Rather, it maps users to existing - roles. Roles can be created by using the create or update roles API or roles - files. + mapping files. NOTE: This API does not create roles. Rather, it maps users to + existing roles. Roles can be created by using the create or update roles API + or roles files. **Role templates** The most common use for role mappings is to + create a mapping from a known value on the user to a fixed role name. For example, + all users in the `cn=admin,dc=example,dc=com` LDAP group should be given the + superuser role in Elasticsearch. The `roles` field is used for this purpose. + For more complex needs, it is possible to use Mustache templates to dynamically + determine the names of the roles that should be granted to the user. The `role_templates` + field is used for this purpose. NOTE: To use role templates successfully, the + relevant scripting feature must be enabled. Otherwise, all attempts to create + a role mapping with role templates fail. All of the user fields that are available + in the role mapping rules are also available in the role templates. Thus it is + possible to assign a user to a role that reflects their username, their groups, + or the name of the realm to which they authenticated. By default a template is + evaluated to produce a single string that is the name of the role which should + be assigned to the user. If the format of the template is set to "json" then + the template is expected to produce a JSON string or an array of JSON strings + for the role names. + + ``_ - ``_ - - :param name: Role-mapping name - :param enabled: - :param metadata: + :param name: The distinct name that identifies the role mapping. The name is + used solely as an identifier to facilitate interaction via the API; it does + not affect the behavior of the mapping in any way. + :param enabled: Mappings that have `enabled` set to `false` are ignored when + role mapping is performed. + :param metadata: Additional metadata that helps define which roles are assigned + to each user. Within the metadata object, keys beginning with `_` are reserved + for system usage. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - :param role_templates: - :param roles: - :param rules: + :param role_templates: A list of Mustache templates that will be evaluated to + determine the roles names that should granted to the users that match the + role mapping rules. Exactly one of `roles` or `role_templates` must be specified. + :param roles: A list of role names that are granted to the users that match the + role mapping rules. Exactly one of `roles` or `role_templates` must be specified. + :param rules: The rules that determine which users should be matched by the mapping. + A rule is a logical condition that is expressed by using a JSON DSL. :param run_as: """ if name in SKIP_IN_PATH: @@ -3160,23 +3313,38 @@ async def put_user( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update users. A password is required for adding a new user but is optional - when updating an existing user. To change a user’s password without updating - any other fields, use the change password API. - - ``_ - - :param username: The username of the User - :param email: - :param enabled: - :param full_name: - :param metadata: - :param password: - :param password_hash: - :param refresh: If `true` (the default) then refresh the affected shards to make - this operation visible to search, if `wait_for` then wait for a refresh to - make this operation visible to search, if `false` then do nothing with refreshes. - :param roles: + Create or update users. Add and update users in the native realm. A password + is required for adding a new user but is optional when updating an existing user. + To change a user's password without updating any other fields, use the change + password API. + + ``_ + + :param username: An identifier for the user. NOTE: Usernames must be at least + 1 and no more than 507 characters. They can contain alphanumeric characters + (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic + Latin (ASCII) block. Leading or trailing whitespace is not allowed. + :param email: The email of the user. + :param enabled: Specifies whether the user is enabled. + :param full_name: The full name of the user. + :param metadata: Arbitrary metadata that you want to associate with the user. + :param password: The user's password. Passwords must be at least 6 characters + long. When adding a user, one of `password` or `password_hash` is required. + When updating an existing user, the password is optional, so that other fields + on the user (such as their roles) may be updated without modifying the user's + password + :param password_hash: A hash of the user's password. This must be produced using + the same hashing algorithm as has been configured for password storage. For + more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` + setting in the user cache and password hash algorithm documentation. Using + this parameter allows the client to pre-hash the password for performance + and/or confidentiality reasons. The `password` parameter and the `password_hash` + parameter cannot be used in the same request. + :param refresh: Valid values are `true`, `false`, and `wait_for`. These values + have the same meaning as in the index API, but the default value for this + API is true. + :param roles: A set of roles the user has. The roles determine the user's access + permissions. To create a user without any roles, specify an empty list (`[]`). """ if username in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'username'") @@ -3260,9 +3428,14 @@ async def query_api_keys( ) -> ObjectApiResponse[t.Any]: """ Find API keys with a query. Get a paginated list of API keys and their information. - You can optionally filter the results with a query. + You can optionally filter the results with a query. To use this API, you must + have at least the `manage_own_api_key` or the `read_security` cluster privileges. + If you have only the `manage_own_api_key` privilege, this API returns only the + API keys that you own. If you have the `read_security`, `manage_api_key`, or + greater privileges (including `manage_security`), this API returns all API keys + regardless of ownership. - ``_ + ``_ :param aggregations: Any aggregations to run over the corpus of returned API keys. Aggregations and queries work together. Aggregations are computed only @@ -3276,30 +3449,39 @@ async def query_api_keys( `terms`, `range`, `date_range`, `missing`, `cardinality`, `value_count`, `composite`, `filter`, and `filters`. Additionally, aggregations only run over the same subset of fields that query works with. - :param from_: Starting document offset. By default, you cannot page through more - than 10,000 hits using the from and size parameters. To page through more - hits, use the `search_after` parameter. + :param from_: The starting document offset. It must not be negative. By default, + you cannot page through more than 10,000 hits using the `from` and `size` + parameters. To page through more hits, use the `search_after` parameter. :param query: A query to filter which API keys to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following public information associated with an API key: `id`, `type`, `name`, `creation`, `expiration`, `invalidated`, `invalidation`, - `username`, `realm`, and `metadata`. - :param search_after: Search after definition - :param size: The number of hits to return. By default, you cannot page through - more than 10,000 hits using the `from` and `size` parameters. To page through - more hits, use the `search_after` parameter. - :param sort: Other than `id`, all public fields of an API key are eligible for - sorting. In addition, sort can also be applied to the `_doc` field to sort - by index order. + `username`, `realm`, and `metadata`. NOTE: The queryable string values associated + with API keys are internally mapped as keywords. Consequently, if no `analyzer` + parameter is specified for a `match` query, then the provided match query + string is interpreted as a single keyword value. Such a match query is hence + equivalent to a `term` query. + :param search_after: The search after definition. + :param size: The number of hits to return. It must not be negative. The `size` + parameter can be set to `0`, in which case no API key matches are returned, + only the aggregation results. By default, you cannot page through more than + 10,000 hits using the `from` and `size` parameters. To page through more + hits, use the `search_after` parameter. + :param sort: The sort definition. Other than `id`, all public fields of an API + key are eligible for sorting. In addition, sort can also be applied to the + `_doc` field to sort by index order. :param typed_keys: Determines whether aggregation names are prefixed by their respective types in the response. :param with_limited_by: Return the snapshot of the owner user's role descriptors associated with the API key. An API key's actual permission is the intersection - of its assigned role descriptors and the owner user's role descriptors. - :param with_profile_uid: Determines whether to also retrieve the profile uid, - for the API key owner principal, if it exists. + of its assigned role descriptors and the owner user's role descriptors (effectively + limited by it). An API key cannot retrieve any API key’s limited-by role + descriptors (including itself) unless it has `manage_api_key` or higher privileges. + :param with_profile_uid: Determines whether to also retrieve the profile UID + for the API key owner principal. If it exists, the profile UID is returned + under the `profile_uid` response field for each API key. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/_query/api_key" @@ -3386,26 +3568,30 @@ async def query_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Find roles with a query. Get roles in a paginated manner. You can optionally - filter the results with a query. + Find roles with a query. Get roles in a paginated manner. The role management + APIs are generally the preferred way to manage roles, rather than using file-based + role management. The query roles API does not retrieve roles that are defined + in roles files, nor built-in ones. You can optionally filter the results with + a query. Also, the results can be paginated and sorted. - ``_ + ``_ - :param from_: Starting document offset. By default, you cannot page through more - than 10,000 hits using the from and size parameters. To page through more - hits, use the `search_after` parameter. + :param from_: The starting document offset. It must not be negative. By default, + you cannot page through more than 10,000 hits using the `from` and `size` + parameters. To page through more hits, use the `search_after` parameter. :param query: A query to filter which roles to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following information associated with roles: `name`, `description`, - `metadata`, `applications.application`, `applications.privileges`, `applications.resources`. - :param search_after: Search after definition - :param size: The number of hits to return. By default, you cannot page through - more than 10,000 hits using the `from` and `size` parameters. To page through - more hits, use the `search_after` parameter. - :param sort: All public fields of a role are eligible for sorting. In addition, - sort can also be applied to the `_doc` field to sort by index order. + `metadata`, `applications.application`, `applications.privileges`, and `applications.resources`. + :param search_after: The search after definition. + :param size: The number of hits to return. It must not be negative. By default, + you cannot page through more than 10,000 hits using the `from` and `size` + parameters. To page through more hits, use the `search_after` parameter. + :param sort: The sort definition. You can sort on `username`, `roles`, or `enabled`. + In addition, sort can also be applied to the `_doc` field to sort by index + order. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/_query/role" @@ -3473,27 +3659,30 @@ async def query_user( ) -> ObjectApiResponse[t.Any]: """ Find users with a query. Get information for users in a paginated manner. You - can optionally filter the results with a query. + can optionally filter the results with a query. NOTE: As opposed to the get user + API, built-in users are excluded from the result. This API is only for native + users. - ``_ + ``_ - :param from_: Starting document offset. By default, you cannot page through more - than 10,000 hits using the from and size parameters. To page through more - hits, use the `search_after` parameter. + :param from_: The starting document offset. It must not be negative. By default, + you cannot page through more than 10,000 hits using the `from` and `size` + parameters. To page through more hits, use the `search_after` parameter. :param query: A query to filter which users to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following information associated with user: `username`, - `roles`, `enabled` - :param search_after: Search after definition - :param size: The number of hits to return. By default, you cannot page through - more than 10,000 hits using the `from` and `size` parameters. To page through - more hits, use the `search_after` parameter. - :param sort: Fields eligible for sorting are: username, roles, enabled In addition, - sort can also be applied to the `_doc` field to sort by index order. - :param with_profile_uid: If true will return the User Profile ID for the users - in the query result, if any. + `roles`, `enabled`, `full_name`, and `email`. + :param search_after: The search after definition + :param size: The number of hits to return. It must not be negative. By default, + you cannot page through more than 10,000 hits using the `from` and `size` + parameters. To page through more hits, use the `search_after` parameter. + :param sort: The sort definition. Fields eligible for sorting are: `username`, + `roles`, `enabled`. In addition, sort can also be applied to the `_doc` field + to sort by index order. + :param with_profile_uid: Determines whether to retrieve the user profile UID, + if it exists, for the users. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/_query/user" @@ -3565,7 +3754,7 @@ async def saml_authenticate( Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch. - ``_ + ``_ :param content: The SAML response as it was sent by the user's browser, usually a Base64 encoded XML document. @@ -3636,7 +3825,7 @@ async def saml_complete_logout( of this API must prepare the request accordingly so that this API can handle either of them. - ``_ + ``_ :param ids: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. @@ -3710,7 +3899,7 @@ async def saml_invalidate( to that specific SAML principal and provides a URL that contains a SAML LogoutResponse message. Thus the user can be redirected back to their IdP. - ``_ + ``_ :param query_string: The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. This query should include @@ -3784,7 +3973,7 @@ async def saml_logout( a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout). - ``_ + ``_ :param token: The access token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent token that was received @@ -3854,7 +4043,7 @@ async def saml_prepare_authentication( request. The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process. - ``_ + ``_ :param acs: The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. The realm is used to generate the authentication @@ -3913,7 +4102,7 @@ async def saml_service_provider_metadata( This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch. - ``_ + ``_ :param realm_name: The name of the SAML realm in Elasticsearch. """ @@ -3964,7 +4153,7 @@ async def suggest_user_profiles( Elastic reserves the right to change or remove this feature in future releases without prior notice. - ``_ + ``_ :param data: A comma-separated list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content, @@ -4033,38 +4222,44 @@ async def update_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update an API key. Updates attributes of an existing API key. Users can only - update API keys that they created or that were granted to them. Use this API - to update API keys created by the create API Key or grant API Key APIs. If you - need to apply the same update to many API keys, you can use bulk update API Keys - to reduce overhead. It’s not possible to update expired API keys, or API keys - that have been invalidated by invalidate API Key. This API supports updates to - an API key’s access scope and metadata. The access scope of an API key is derived - from the `role_descriptors` you specify in the request, and a snapshot of the - owner user’s permissions at the time of the request. The snapshot of the owner’s - permissions is updated automatically on every call. If you don’t specify `role_descriptors` - in the request, a call to this API might still change the API key’s access scope. - This change can occur if the owner user’s permissions have changed since the - API key was created or last modified. To update another user’s API key, use the - `run_as` feature to submit a request on behalf of another user. IMPORTANT: It’s - not possible to use an API key as the authentication credential for this API. - To update an API key, the owner user’s credentials are required. - - ``_ + Update an API key. Update attributes of an existing API key. This API supports + updates to an API key's access scope, expiration, and metadata. To use this API, + you must have at least the `manage_own_api_key` cluster privilege. Users can + only update API keys that they created or that were granted to them. To update + another user’s API key, use the `run_as` feature to submit a request on behalf + of another user. IMPORTANT: It's not possible to use an API key as the authentication + credential for this API. The owner user’s credentials are required. Use this + API to update API keys created by the create API key or grant API Key APIs. If + you need to apply the same update to many API keys, you can use the bulk update + API keys API to reduce overhead. It's not possible to update expired API keys + or API keys that have been invalidated by the invalidate API key API. The access + scope of an API key is derived from the `role_descriptors` you specify in the + request and a snapshot of the owner user's permissions at the time of the request. + The snapshot of the owner's permissions is updated automatically on every call. + IMPORTANT: If you don't specify `role_descriptors` in the request, a call to + this API might still change the API key's access scope. This change can occur + if the owner user's permissions have changed since the API key was created or + last modified. + + ``_ :param id: The ID of the API key to update. - :param expiration: Expiration time for the API key. + :param expiration: The expiration time for the API key. By default, API keys + never expire. This property can be omitted to leave the expiration unchanged. :param metadata: Arbitrary metadata that you want to associate with the API key. - It supports nested data structure. Within the metadata object, keys beginning - with _ are reserved for system usage. - :param role_descriptors: An array of role descriptors for this API key. This - parameter is optional. When it is not specified or is an empty array, then - the API key will have a point in time snapshot of permissions of the authenticated - user. If you supply role descriptors then the resultant permissions would - be an intersection of API keys permissions and authenticated user’s permissions - thereby limiting the access scope for API keys. The structure of role descriptor - is the same as the request for create role API. For more details, see create - or update roles API. + It supports a nested data structure. Within the metadata object, keys beginning + with `_` are reserved for system usage. When specified, this value fully + replaces the metadata previously associated with the API key. + :param role_descriptors: The role descriptors to assign to this API key. The + API key's effective permissions are an intersection of its assigned privileges + and the point in time snapshot of permissions of the owner user. You can + assign new privileges by specifying them in this parameter. To remove assigned + privileges, you can supply an empty `role_descriptors` parameter, that is + to say, an empty object `{}`. If an API key has no assigned privileges, it + inherits the owner user's full permissions. The snapshot of the owner's permissions + is always updated, whether you supply the `role_descriptors` parameter or + not. The structure of a role descriptor is the same as the request for the + create API keys API. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -4133,7 +4328,7 @@ async def update_cross_cluster_api_key( API keys, which should be updated by either the update API key or bulk update API keys API. - ``_ + ``_ :param id: The ID of the cross-cluster API key to update. :param access: The access to be granted to this API key. The access is composed @@ -4205,12 +4400,14 @@ async def update_settings( """ Update security index settings. Update the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of - settings are allowed to be modified, for example `index.auto_expand_replicas` - and `index.number_of_replicas`. If a specific index is not in use on the system - and settings are provided for it, the request will be rejected. This API does - not yet support configuring the settings for indices before they are in use. + settings are allowed to be modified. This includes `index.auto_expand_replicas` + and `index.number_of_replicas`. NOTE: If `index.auto_expand_replicas` is set, + `index.number_of_replicas` will be ignored during updates. If a specific index + is not in use on the system and settings are provided for it, the request will + be rejected. This API does not yet support configuring the settings for indices + before they are in use. - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails @@ -4291,7 +4488,7 @@ async def update_user_profile_data( data, content is namespaced by the top-level fields. The `update_profile_data` global privilege grants privileges for updating only the allowed namespaces. - ``_ + ``_ :param uid: A unique identifier for the user profile. :param data: Non-searchable data that you want to associate with the user profile. diff --git a/elasticsearch/_async/client/shutdown.py b/elasticsearch/_async/client/shutdown.py index df396a7a3..3236aa0c2 100644 --- a/elasticsearch/_async/client/shutdown.py +++ b/elasticsearch/_async/client/shutdown.py @@ -50,7 +50,7 @@ async def delete_node( and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. - ``_ + ``_ :param node_id: The node id of node to be removed from the shutdown state :param master_timeout: Period to wait for a connection to the master node. If @@ -108,7 +108,7 @@ async def get_node( the operator privileges feature is enabled, you must be an operator to use this API. - ``_ + ``_ :param node_id: Which node for which to retrieve the shutdown status :param master_timeout: Period to wait for a connection to the master node. If @@ -182,7 +182,7 @@ async def put_node( IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. - ``_ + ``_ :param node_id: The node identifier. This parameter is not validated against the cluster's active nodes. This enables you to register a node for shut diff --git a/elasticsearch/_async/client/simulate.py b/elasticsearch/_async/client/simulate.py index 4a2d871b2..6c40ff3c7 100644 --- a/elasticsearch/_async/client/simulate.py +++ b/elasticsearch/_async/client/simulate.py @@ -87,7 +87,7 @@ async def ingest( This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request. - ``_ + ``_ :param docs: Sample documents to test in the pipeline. :param index: The index to simulate ingesting into. This value can be overridden diff --git a/elasticsearch/_async/client/slm.py b/elasticsearch/_async/client/slm.py index cc3380d77..1870a9de5 100644 --- a/elasticsearch/_async/client/slm.py +++ b/elasticsearch/_async/client/slm.py @@ -42,7 +42,7 @@ async def delete_lifecycle( prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots. - ``_ + ``_ :param policy_id: The id of the snapshot lifecycle policy to remove :param master_timeout: The period to wait for a connection to the master node. @@ -96,7 +96,7 @@ async def execute_lifecycle( applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance. - ``_ + ``_ :param policy_id: The id of the snapshot lifecycle policy to be executed :param master_timeout: The period to wait for a connection to the master node. @@ -148,7 +148,7 @@ async def execute_retention( removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. The retention policy is normally applied according to its schedule. - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails @@ -197,7 +197,7 @@ async def get_lifecycle( Get policy information. Get snapshot lifecycle policy definitions and information about the latest snapshot attempts. - ``_ + ``_ :param policy_id: Comma-separated list of snapshot lifecycle policies to retrieve :param master_timeout: The period to wait for a connection to the master node. @@ -251,7 +251,7 @@ async def get_stats( Get snapshot lifecycle management statistics. Get global and policy-level statistics about actions taken by snapshot lifecycle management. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -298,7 +298,7 @@ async def get_status( """ Get the snapshot lifecycle management status. - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails @@ -358,7 +358,7 @@ async def put_lifecycle( policy already exists, this request increments the policy version. Only the latest version of a policy is stored. - ``_ + ``_ :param policy_id: The identifier for the snapshot lifecycle policy you want to create or update. @@ -441,7 +441,7 @@ async def start( automatically when a cluster is formed. Manually starting SLM is necessary only if it has been stopped using the stop SLM API. - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails @@ -498,7 +498,7 @@ async def stop( complete and it can be safely stopped. Use the get snapshot lifecycle management status API to see if SLM is running. - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails diff --git a/elasticsearch/_async/client/snapshot.py b/elasticsearch/_async/client/snapshot.py index f2c9448b9..969be8774 100644 --- a/elasticsearch/_async/client/snapshot.py +++ b/elasticsearch/_async/client/snapshot.py @@ -47,7 +47,7 @@ async def cleanup_repository( Clean up the snapshot repository. Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots. - ``_ + ``_ :param name: Snapshot repository to clean up. :param master_timeout: Period to wait for a connection to the master node. @@ -101,7 +101,7 @@ async def clone( Clone a snapshot. Clone part of all of a snapshot into another snapshot in the same repository. - ``_ + ``_ :param repository: A repository name :param snapshot: The name of the snapshot to clone from @@ -181,7 +181,7 @@ async def create( """ Create a snapshot. Take a snapshot of a cluster or of data streams and indices. - ``_ + ``_ :param repository: Repository for the snapshot. :param snapshot: Name of the snapshot. Must be unique in the repository. @@ -289,7 +289,7 @@ async def create_repository( be writeable. Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. - ``_ + ``_ :param name: A repository name :param repository: @@ -349,7 +349,7 @@ async def delete( """ Delete snapshots. - ``_ + ``_ :param repository: A repository name :param snapshot: A comma-separated list of snapshot names @@ -402,7 +402,7 @@ async def delete_repository( removes only the reference to the location where the repository is storing the snapshots. The snapshots themselves are left untouched and in place. - ``_ + ``_ :param name: Name of the snapshot repository to unregister. Wildcard (`*`) patterns are supported. @@ -476,7 +476,7 @@ async def get( """ Get snapshot information. - ``_ + ``_ :param repository: Comma-separated list of snapshot repository names used to limit the request. Wildcard (*) expressions are supported. @@ -588,7 +588,7 @@ async def get_repository( """ Get snapshot repository information. - ``_ + ``_ :param name: A comma-separated list of repository names :param local: Return local information, do not retrieve the state from master @@ -763,7 +763,7 @@ async def repository_analyze( Some operations also verify the behavior on small blobs with sizes other than 8 bytes. - ``_ + ``_ :param name: The name of the repository. :param blob_count: The total number of blobs to write to the repository during @@ -899,7 +899,7 @@ async def repository_verify_integrity( in future versions. NOTE: This API may not work correctly in a mixed-version cluster. - ``_ + ``_ :param name: A repository name :param blob_thread_pool_concurrency: Number of threads to use for reading blob @@ -1009,7 +1009,7 @@ async def restore( or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot. - ``_ + ``_ :param repository: A repository name :param snapshot: A snapshot name @@ -1113,7 +1113,7 @@ async def status( These requests can also tax machine resources and, when using cloud storage, incur high processing costs. - ``_ + ``_ :param repository: A repository name :param snapshot: A comma-separated list of snapshot names @@ -1173,7 +1173,7 @@ async def verify_repository( Verify a snapshot repository. Check for common misconfigurations in a snapshot repository. - ``_ + ``_ :param name: A repository name :param master_timeout: Explicit operation timeout for connection to master node diff --git a/elasticsearch/_async/client/sql.py b/elasticsearch/_async/client/sql.py index ca927d765..2a93a5837 100644 --- a/elasticsearch/_async/client/sql.py +++ b/elasticsearch/_async/client/sql.py @@ -41,7 +41,7 @@ async def clear_cursor( """ Clear an SQL search cursor. - ``_ + ``_ :param cursor: Cursor to clear. """ @@ -90,7 +90,7 @@ async def delete_async( a search: * Users with the `cancel_task` cluster privilege. * The user who first submitted the search. - ``_ + ``_ :param id: The identifier for the search. """ @@ -139,7 +139,7 @@ async def get_async( features are enabled, only the user who first submitted the SQL search can retrieve the search using this API. - ``_ + ``_ :param id: The identifier for the search. :param delimiter: The separator for CSV results. The API supports this parameter @@ -198,7 +198,7 @@ async def get_async_status( Get the async SQL search status. Get the current status of an async SQL search or a stored synchronous SQL search. - ``_ + ``_ :param id: The identifier for the search. """ @@ -283,7 +283,7 @@ async def query( """ Get SQL search results. Run an SQL request. - ``_ + ``_ :param allow_partial_search_results: If `true`, the response has partial results when there are shard request timeouts or shard failures. If `false`, the @@ -406,7 +406,7 @@ async def translate( API request containing Query DSL. It accepts the same request body parameters as the SQL search API, excluding `cursor`. - ``_ + ``_ :param query: The SQL query to run. :param fetch_size: The maximum number of rows (or entries) to return in one response. diff --git a/elasticsearch/_async/client/ssl.py b/elasticsearch/_async/client/ssl.py index 75f423927..6ab683691 100644 --- a/elasticsearch/_async/client/ssl.py +++ b/elasticsearch/_async/client/ssl.py @@ -53,7 +53,7 @@ async def certificates( the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ssl/certificates" diff --git a/elasticsearch/_async/client/synonyms.py b/elasticsearch/_async/client/synonyms.py index e4e79a9e9..e6fe303fc 100644 --- a/elasticsearch/_async/client/synonyms.py +++ b/elasticsearch/_async/client/synonyms.py @@ -52,7 +52,7 @@ async def delete_synonym( finished, you can delete the index. When the synonyms set is not used in analyzers, you will be able to delete it. - ``_ + ``_ :param id: The synonyms set identifier to delete. """ @@ -93,7 +93,7 @@ async def delete_synonym_rule( """ Delete a synonym rule. Delete a synonym rule from a synonym set. - ``_ + ``_ :param set_id: The ID of the synonym set to update. :param rule_id: The ID of the synonym rule to delete. @@ -143,7 +143,7 @@ async def get_synonym( """ Get a synonym set. - ``_ + ``_ :param id: The synonyms set identifier to retrieve. :param from_: The starting offset for query rules to retrieve. @@ -190,7 +190,7 @@ async def get_synonym_rule( """ Get a synonym rule. Get a synonym rule from a synonym set. - ``_ + ``_ :param set_id: The ID of the synonym set to retrieve the synonym rule from. :param rule_id: The ID of the synonym rule to retrieve. @@ -239,7 +239,7 @@ async def get_synonyms_sets( """ Get all synonym sets. Get a summary of all defined synonym sets. - ``_ + ``_ :param from_: The starting offset for synonyms sets to retrieve. :param size: The maximum number of synonyms sets to retrieve. @@ -293,7 +293,7 @@ async def put_synonym( equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. - ``_ + ``_ :param id: The ID of the synonyms set to be created or updated. :param synonyms_set: The synonym rules definitions for the synonyms set. @@ -349,7 +349,7 @@ async def put_synonym_rule( When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule. - ``_ + ``_ :param set_id: The ID of the synonym set. :param rule_id: The ID of the synonym rule to be updated or created. diff --git a/elasticsearch/_async/client/tasks.py b/elasticsearch/_async/client/tasks.py index 474ffa23e..576ef3c41 100644 --- a/elasticsearch/_async/client/tasks.py +++ b/elasticsearch/_async/client/tasks.py @@ -61,7 +61,7 @@ async def cancel( threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task. - ``_ + ``_ :param task_id: The task identifier. :param actions: A comma-separated list or wildcard expression of actions that @@ -126,7 +126,7 @@ async def get( task identifier is not found, a 404 response code indicates that there are no resources that match the request. - ``_ + ``_ :param task_id: The task identifier. :param timeout: The period to wait for a response. If no response is received @@ -203,7 +203,7 @@ async def list( initiated by the REST request. The `X-Opaque-Id` in the children `headers` is the child task of the task that was initiated by the REST request. - ``_ + ``_ :param actions: A comma-separated list or wildcard expression of actions used to limit the request. For example, you can use `cluser:*` to retrieve all diff --git a/elasticsearch/_async/client/text_structure.py b/elasticsearch/_async/client/text_structure.py index 4c4779cba..f06f0940a 100644 --- a/elasticsearch/_async/client/text_structure.py +++ b/elasticsearch/_async/client/text_structure.py @@ -70,7 +70,7 @@ async def find_field_structure( `explain` query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen. - ``_ + ``_ :param field: The field that should be analyzed. :param index: The name of the index that contains the analyzed field. @@ -255,7 +255,7 @@ async def find_message_structure( an explanation will appear in the response. It helps determine why the returned structure was chosen. - ``_ + ``_ :param messages: The list of messages you want to analyze. :param column_names: If the format is `delimited`, you can specify the column @@ -427,7 +427,7 @@ async def find_structure( However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. - ``_ + ``_ :param text_files: :param charset: The text's character set. It must be a character set that is @@ -611,7 +611,7 @@ async def test_grok_pattern( indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings. - ``_ + ``_ :param grok_pattern: The Grok pattern to run on the text. :param text: The lines of text to run the Grok pattern on. diff --git a/elasticsearch/_async/client/transform.py b/elasticsearch/_async/client/transform.py index 1d8f55a3b..ca05c9ac7 100644 --- a/elasticsearch/_async/client/transform.py +++ b/elasticsearch/_async/client/transform.py @@ -41,7 +41,7 @@ async def delete_transform( """ Delete a transform. Deletes a transform. - ``_ + ``_ :param transform_id: Identifier for the transform. :param delete_dest_index: If this value is true, the destination index is deleted @@ -101,7 +101,7 @@ async def get_transform( """ Get transforms. Retrieves configuration information for transforms. - ``_ + ``_ :param transform_id: Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using @@ -170,7 +170,7 @@ async def get_transform_stats( """ Get transform stats. Retrieves usage information for transforms. - ``_ + ``_ :param transform_id: Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using @@ -256,7 +256,7 @@ async def preview_transform( These values are determined based on the field types of the source index and the transform aggregations. - ``_ + ``_ :param transform_id: Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform configuration details in @@ -393,7 +393,7 @@ async def put_transform( If you used transforms prior to 7.5, also do not give users any privileges on `.data-frame-internal*` indices. - ``_ + ``_ :param transform_id: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -496,7 +496,7 @@ async def reset_transform( it; alternatively, use the `force` query parameter. If the destination index was created by the transform, it is deleted. - ``_ + ``_ :param transform_id: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -552,7 +552,7 @@ async def schedule_now_transform( the transform will be processed again at now + frequency unless _schedule_now API is called again in the meantime. - ``_ + ``_ :param transform_id: Identifier for the transform. :param timeout: Controls the time to wait for the scheduling to take place @@ -616,7 +616,7 @@ async def start_transform( privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. - ``_ + ``_ :param transform_id: Identifier for the transform. :param from_: Restricts the set of transformed entities to those changed after @@ -670,7 +670,7 @@ async def stop_transform( """ Stop transforms. Stops one or more transforms. - ``_ + ``_ :param transform_id: Identifier for the transform. To stop multiple transforms, use a comma-separated list or a wildcard expression. To stop all transforms, @@ -770,7 +770,7 @@ async def update_transform( which roles the user who updated it had at the time of update and runs with those privileges. - ``_ + ``_ :param transform_id: Identifier for the transform. :param defer_validation: When true, deferrable validations are not run. This @@ -864,7 +864,7 @@ async def upgrade_transforms( example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster. You may want to perform a recent cluster backup prior to the upgrade. - ``_ + ``_ :param dry_run: When true, the request checks for updates but does not run them. :param timeout: Period to wait for a response. If no response is received before diff --git a/elasticsearch/_async/client/watcher.py b/elasticsearch/_async/client/watcher.py index a4fcb27dd..70949c9e6 100644 --- a/elasticsearch/_async/client/watcher.py +++ b/elasticsearch/_async/client/watcher.py @@ -46,7 +46,7 @@ async def ack_watch( `ack.state` is reset to `awaits_successful_execution`. This happens when the condition of the watch is not met (the condition evaluates to false). - ``_ + ``_ :param watch_id: The watch identifier. :param action_id: A comma-separated list of the action identifiers to acknowledge. @@ -98,7 +98,7 @@ async def activate_watch( """ Activate a watch. A watch can be either active or inactive. - ``_ + ``_ :param watch_id: The watch identifier. """ @@ -138,7 +138,7 @@ async def deactivate_watch( """ Deactivate a watch. A watch can be either active or inactive. - ``_ + ``_ :param watch_id: The watch identifier. """ @@ -184,7 +184,7 @@ async def delete_watch( delete document API When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the `.watches` index. - ``_ + ``_ :param id: The watch identifier. """ @@ -267,7 +267,7 @@ async def execute_watch( that called the API will be used as a base, instead of the information who stored the watch. - ``_ + ``_ :param id: The watch identifier. :param action_modes: Determines how to handle the watch actions as part of the @@ -352,7 +352,7 @@ async def get_settings( Only a subset of settings are shown, for example `index.auto_expand_replicas` and `index.number_of_replicas`. - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails @@ -394,7 +394,7 @@ async def get_watch( """ Get a watch. - ``_ + ``_ :param id: The watch identifier. """ @@ -468,7 +468,7 @@ async def put_watch( for which the user that stored the watch has privileges. If the user is able to read index `a`, but not index `b`, the same will apply when the watch runs. - ``_ + ``_ :param id: The identifier for the watch. :param actions: The list of actions that will be run if the condition matches. @@ -578,7 +578,7 @@ async def query_watches( filter watches by a query. Note that only the `_id` and `metadata.*` fields are queryable or sortable. - ``_ + ``_ :param from_: The offset from the first result to fetch. It must be non-negative. :param query: A query that filters the watches to be returned. @@ -649,7 +649,7 @@ async def start( """ Start the watch service. Start the Watcher service if it is not already running. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. """ @@ -711,7 +711,7 @@ async def stats( Get Watcher statistics. This API always returns basic metrics. You retrieve more metrics by using the metric parameter. - ``_ + ``_ :param metric: Defines which additional metrics are included in the response. :param emit_stacktraces: Defines whether stack traces are generated for each @@ -758,7 +758,7 @@ async def stop( """ Stop the watch service. Stop the Watcher service if it is running. - ``_ + ``_ :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns @@ -812,7 +812,7 @@ async def update_settings( (`.watches`). Only a subset of settings can be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`. - ``_ + ``_ :param index_auto_expand_replicas: :param index_number_of_replicas: diff --git a/elasticsearch/_async/client/xpack.py b/elasticsearch/_async/client/xpack.py index f02ad837d..090aca019 100644 --- a/elasticsearch/_async/client/xpack.py +++ b/elasticsearch/_async/client/xpack.py @@ -48,7 +48,7 @@ async def info( installed license. * Feature information for the features that are currently enabled and available under the current license. - ``_ + ``_ :param accept_enterprise: If this param is used it must be set to true :param categories: A comma-separated list of the information categories to include @@ -94,7 +94,7 @@ async def usage( enabled and available under the current license. The API also provides some usage statistics. - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index 3292a7454..c308cf846 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -722,7 +722,7 @@ def bulk( only wait for those three shards to refresh. The other two shards that make up the index do not participate in the `_bulk` request at all. - ``_ + ``_ :param operations: :param index: The name of the data stream, index, or index alias to perform bulk @@ -840,7 +840,7 @@ def clear_scroll( Clear a scrolling search. Clear the search context and results for a scrolling search. - ``_ + ``_ :param scroll_id: The scroll IDs to clear. To clear all scroll IDs, use `_all`. """ @@ -894,7 +894,7 @@ def close_point_in_time( period has elapsed. However, keeping points in time has a cost; close them as soon as they are no longer required for search requests. - ``_ + ``_ :param id: The ID of the point-in-time. """ @@ -975,7 +975,7 @@ def count( a replica is chosen and the search is run against it. This means that replicas increase the scalability of the count. - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, @@ -1115,38 +1115,119 @@ def create( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Index a document. Adds a JSON document to the specified data stream or index - and makes it searchable. If the target is an index and the document already exists, - the request updates the document and increments its version. - - ``_ - - :param index: Name of the data stream or index to target. If the target doesn’t + Create a new document in the index. You can index a new JSON document with the + `//_doc/` or `//_create/<_id>` APIs Using `_create` guarantees + that the document is indexed only if it does not already exist. It returns a + 409 response when a document with a same ID already exists in the index. To update + an existing document, you must use the `//_doc/` API. If the Elasticsearch + security features are enabled, you must have the following index privileges for + the target data stream, index, or index alias: * To add a document using the + `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, + you must have the `create_doc`, `create`, `index`, or `write` index privilege. + * To automatically create a data stream or index with this API request, you must + have the `auto_configure`, `create_index`, or `manage` index privilege. Automatic + data stream creation requires a matching index template with data stream enabled. + **Automatically create data streams and indices** If the request's target doesn't + exist and matches an index template with a `data_stream` definition, the index + operation automatically creates the data stream. If the target doesn't exist + and doesn't match a data stream template, the operation automatically creates + the index and applies any matching index templates. NOTE: Elasticsearch includes + several built-in index templates. To avoid naming collisions with these templates, + refer to index pattern documentation. If no mapping exists, the index operation + creates a dynamic mapping. By default, new fields and objects are automatically + added to the mapping if needed. Automatic index creation is controlled by the + `action.auto_create_index` setting. If it is `true`, any index can be created + automatically. You can modify this setting to explicitly allow or block automatic + creation of indices that match specified patterns or set it to `false` to turn + off automatic index creation entirely. Specify a comma-separated list of patterns + you want to allow or prefix each pattern with `+` or `-` to indicate whether + it should be allowed or blocked. When a list is specified, the default behaviour + is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic + creation of indices only. It does not affect the creation of data streams. **Routing** + By default, shard placement — or routing — is controlled by using a hash of the + document's ID value. For more explicit control, the value fed into the hash function + used by the router can be directly specified on a per-operation basis using the + `routing` parameter. When setting up explicit mapping, you can also use the `_routing` + field to direct the index operation to extract the routing value from the document + itself. This does come at the (very minimal) cost of an additional document parsing + pass. If the `_routing` mapping is defined and set to be required, the index + operation will fail if no routing value is provided or extracted. NOTE: Data + streams do not support custom routing unless they were created with the `allow_custom_routing` + setting enabled in the template. **Distributed** The index operation is directed + to the primary shard based on its route and performed on the actual node containing + this shard. After the primary shard completes the operation, if needed, the update + is distributed to applicable replicas. **Active shards** To improve the resiliency + of writes to the system, indexing operations can be configured to wait for a + certain number of active shard copies before proceeding with the operation. If + the requisite number of active shard copies are not available, then the write + operation must wait and retry, until either the requisite shard copies have started + or a timeout occurs. By default, write operations only wait for the primary shards + to be active before proceeding (that is to say `wait_for_active_shards` is `1`). + This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. + To alter this behavior per operation, use the `wait_for_active_shards request` + parameter. Valid values are all or any positive integer up to the total number + of configured copies per shard in the index (which is `number_of_replicas`+1). + Specifying a negative value or a number greater than the number of shard copies + will throw an error. For example, suppose you have a cluster of three nodes, + A, B, and C and you create an index index with the number of replicas set to + 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt + an indexing operation, by default the operation will only ensure the primary + copy of each shard is available before proceeding. This means that even if B + and C went down and A hosted the primary shard copies, the indexing operation + would still proceed with only one copy of the data. If `wait_for_active_shards` + is set on the request to `3` (and all three nodes are up), the indexing operation + will require 3 active shard copies before proceeding. This requirement should + be met because there are 3 active nodes in the cluster, each one holding a copy + of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, + which is the same in this situation), the indexing operation will not proceed + as you do not have all 4 copies of each shard active in the index. The operation + will timeout unless a new node is brought up in the cluster to host the fourth + copy of the shard. It is important to note that this setting greatly reduces + the chances of the write operation not writing to the requisite number of shard + copies, but it does not completely eliminate the possibility, because this check + occurs before the write operation starts. After the write operation is underway, + it is still possible for replication to fail on any number of shard copies but + still succeed on the primary. The `_shards` section of the API response reveals + the number of shard copies on which replication succeeded and failed. + + ``_ + + :param index: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If - the target doesn’t exist and doesn’t match a data stream template, this request + the target doesn't exist and doesn’t match a data stream template, this request creates the index. - :param id: Unique identifier for the document. + :param id: A unique identifier for the document. To automatically generate a + document ID, use the `POST //_doc/` request format. :param document: - :param pipeline: ID of the pipeline to use to preprocess incoming documents. - If the index has a default ingest pipeline specified, then setting the value - to `_none` disables the default ingest pipeline for this request. If a final - pipeline is configured it will always run, regardless of the value of this + :param pipeline: The ID of the pipeline to use to preprocess incoming documents. + If the index has a default ingest pipeline specified, setting the value to + `_none` turns off the default ingest pipeline for this request. If a final + pipeline is configured, it will always run regardless of the value of this parameter. :param refresh: If `true`, Elasticsearch refreshes the affected shards to make - this operation visible to search, if `wait_for` then wait for a refresh to - make this operation visible to search, if `false` do nothing with refreshes. - Valid values: `true`, `false`, `wait_for`. - :param routing: Custom value used to route operations to a specific shard. - :param timeout: Period the request waits for the following operations: automatic - index creation, dynamic mapping updates, waiting for active shards. - :param version: Explicit version number for concurrency control. The specified - version must match the current version of the document for the request to - succeed. - :param version_type: Specific version type: `external`, `external_gte`. + this operation visible to search. If `wait_for`, it waits for a refresh to + make this operation visible to search. If `false`, it does nothing with refreshes. + :param routing: A custom value that is used to route operations to a specific + shard. + :param timeout: The period the request waits for the following operations: automatic + index creation, dynamic mapping updates, waiting for active shards. Elasticsearch + waits for at least the specified timeout period before failing. The actual + wait time could be longer, particularly when multiple waits occur. This parameter + is useful for situations where the primary shard assigned to perform the + operation might not be available when the operation runs. Some reasons for + this might be that the primary shard is currently recovering from a gateway + or undergoing relocation. By default, the operation will wait on the primary + shard to become available for at least 1 minute before failing and responding + with an error. The actual wait time could be longer, particularly when multiple + waits occur. + :param version: The explicit version number for concurrency control. It must + be a non-negative long number. + :param version_type: The version type. :param wait_for_active_shards: The number of shard copies that must be active - before proceeding with the operation. Set to `all` or any positive integer - up to the total number of shards in the index (`number_of_replicas+1`). + before proceeding with the operation. You can set it to `all` or any positive + integer up to the total number of shards in the index (`number_of_replicas+1`). + The default value of `1` means it waits for each primary shard to be active. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -1221,29 +1302,57 @@ def delete( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a document. Removes a JSON document from the specified index. - - ``_ - - :param index: Name of the target index. - :param id: Unique identifier for the document. + Delete a document. Remove a JSON document from the specified index. NOTE: You + cannot send deletion requests directly to a data stream. To delete a document + in a data stream, you must target the backing index containing the document. + **Optimistic concurrency control** Delete operations can be made conditional + and only be performed if the last modification to the document was assigned the + sequence number and primary term specified by the `if_seq_no` and `if_primary_term` + parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` + and a status code of `409`. **Versioning** Each document indexed is versioned. + When deleting a document, the version can be specified to make sure the relevant + document you are trying to delete is actually being deleted and it has not changed + in the meantime. Every write operation run on a document, deletes included, causes + its version to be incremented. The version number of a deleted document remains + available for a short time after deletion to allow for control of concurrent + operations. The length of time for which a deleted document's version remains + available is determined by the `index.gc_deletes` index setting. **Routing** + If routing is used during indexing, the routing value also needs to be specified + to delete a document. If the `_routing` mapping is set to `required` and no routing + value is specified, the delete API throws a `RoutingMissingException` and rejects + the request. For example: ``` DELETE /my-index-000001/_doc/1?routing=shard-1 + ``` This request deletes the document with ID 1, but it is routed based on the + user. The document is not deleted if the correct routing is not specified. **Distributed** + The delete operation gets hashed into a specific shard ID. It then gets redirected + into the primary shard within that ID group and replicated (if needed) to shard + replicas within that ID group. + + ``_ + + :param index: The name of the target index. + :param id: A unique identifier for the document. :param if_primary_term: Only perform the operation if the document has this primary term. :param if_seq_no: Only perform the operation if the document has this sequence number. :param refresh: If `true`, Elasticsearch refreshes the affected shards to make - this operation visible to search, if `wait_for` then wait for a refresh to - make this operation visible to search, if `false` do nothing with refreshes. - Valid values: `true`, `false`, `wait_for`. - :param routing: Custom value used to route operations to a specific shard. - :param timeout: Period to wait for active shards. - :param version: Explicit version number for concurrency control. The specified - version must match the current version of the document for the request to - succeed. - :param version_type: Specific version type: `external`, `external_gte`. - :param wait_for_active_shards: The number of shard copies that must be active - before proceeding with the operation. Set to `all` or any positive integer - up to the total number of shards in the index (`number_of_replicas+1`). + this operation visible to search. If `wait_for`, it waits for a refresh to + make this operation visible to search. If `false`, it does nothing with refreshes. + :param routing: A custom value used to route operations to a specific shard. + :param timeout: The period to wait for active shards. This parameter is useful + for situations where the primary shard assigned to perform the delete operation + might not be available when the delete operation runs. Some reasons for this + might be that the primary shard is currently recovering from a store or undergoing + relocation. By default, the delete operation will wait on the primary shard + to become available for up to 1 minute before failing and responding with + an error. + :param version: An explicit version number for concurrency control. It must match + the current version of the document for the request to succeed. + :param version_type: The version type. + :param wait_for_active_shards: The minimum number of shard copies that must be + active before proceeding with the operation. You can set it to `all` or any + positive integer up to the total number of shards in the index (`number_of_replicas+1`). + The default value of `1` means it waits for each primary shard to be active. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -1345,7 +1454,7 @@ def delete_by_query( """ Delete documents. Deletes documents that match the specified query. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this @@ -1526,7 +1635,7 @@ def delete_by_query_rethrottle( takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. - ``_ + ``_ :param task_id: The ID for the task. :param requests_per_second: The throttle for this request in sub-requests per @@ -1572,7 +1681,7 @@ def delete_script( """ Delete a script or search template. Deletes a stored script or search template. - ``_ + ``_ :param id: Identifier for the stored script or search template. :param master_timeout: Period to wait for a connection to the master node. If @@ -1638,32 +1747,54 @@ def exists( ] = None, ) -> HeadApiResponse: """ - Check a document. Checks if a specified document exists. - - ``_ - - :param index: Comma-separated list of data streams, indices, and aliases. Supports - wildcards (`*`). - :param id: Identifier of the document. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. + Check a document. Verify that a document exists. For example, check to see if + a document with the `_id` 0 exists: ``` HEAD my-index-000001/_doc/0 ``` If the + document exists, the API returns a status code of `200 - OK`. If the document + doesn’t exist, the API returns `404 - Not Found`. **Versioning support** You + can use the `version` parameter to check the document only if its current version + is equal to the specified one. Internally, Elasticsearch has marked the old document + as deleted and added an entirely new document. The old version of the document + doesn't disappear immediately, although you won't be able to access it. Elasticsearch + cleans up deleted documents in the background as you continue to index more data. + + ``_ + + :param index: A comma-separated list of data streams, indices, and aliases. It + supports wildcards (`*`). + :param id: A unique document identifier. + :param preference: The node or shard the operation should be performed on. By + default, the operation is randomized between the shard replicas. If it is + set to `_local`, the operation will prefer to be run on a local allocated + shard when possible. If it is set to a custom value, the value is used to + guarantee that the same shards will be used for the same custom value. This + can help with "jumping values" when hitting different shards in different + refresh states. A sample value can be something like the web session ID or + the user name. :param realtime: If `true`, the request is real-time as opposed to near-real-time. - :param refresh: If `true`, Elasticsearch refreshes all shards involved in the - delete by query after the request completes. - :param routing: Target the specified primary shard. - :param source: `true` or `false` to return the `_source` field or not, or a list - of fields to return. - :param source_excludes: A comma-separated list of source fields to exclude in - the response. + :param refresh: If `true`, the request refreshes the relevant shards before retrieving + the document. Setting it to `true` should be done after careful thought and + verification that this does not cause a heavy load on the system (and slow + down indexing). + :param routing: A custom value used to route operations to a specific shard. + :param source: Indicates whether to return the `_source` field (`true` or `false`) + or lists the fields to return. + :param source_excludes: A comma-separated list of source fields to exclude from + the response. You can also use this parameter to exclude fields from the + subset specified in `_source_includes` query parameter. If the `_source` + parameter is `false`, this parameter is ignored. :param source_includes: A comma-separated list of source fields to include in - the response. - :param stored_fields: List of stored fields to return as part of a hit. If no - fields are specified, no stored fields are included in the response. If this - field is specified, the `_source` parameter defaults to false. + the response. If this parameter is specified, only these source fields are + returned. You can exclude fields from this subset using the `_source_excludes` + query parameter. If the `_source` parameter is `false`, this parameter is + ignored. + :param stored_fields: A comma-separated list of stored fields to return as part + of a hit. If no fields are specified, no stored fields are included in the + response. If this field is specified, the `_source` parameter defaults to + `false`. :param version: Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. - :param version_type: Specific version type: `external`, `external_gte`. + :param version_type: The version type. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -1739,29 +1870,32 @@ def exists_source( ] = None, ) -> HeadApiResponse: """ - Check for a document source. Checks if a document's `_source` is stored. + Check for a document source. Check whether a document source exists in an index. + For example: ``` HEAD my-index-000001/_source/1 ``` A document's source is not + available if it is disabled in the mapping. - ``_ + ``_ - :param index: Comma-separated list of data streams, indices, and aliases. Supports - wildcards (`*`). - :param id: Identifier of the document. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. - :param realtime: If true, the request is real-time as opposed to near-real-time. - :param refresh: If `true`, Elasticsearch refreshes all shards involved in the - delete by query after the request completes. - :param routing: Target the specified primary shard. - :param source: `true` or `false` to return the `_source` field or not, or a list - of fields to return. + :param index: A comma-separated list of data streams, indices, and aliases. It + supports wildcards (`*`). + :param id: A unique identifier for the document. + :param preference: The node or shard the operation should be performed on. By + default, the operation is randomized between the shard replicas. + :param realtime: If `true`, the request is real-time as opposed to near-real-time. + :param refresh: If `true`, the request refreshes the relevant shards before retrieving + the document. Setting it to `true` should be done after careful thought and + verification that this does not cause a heavy load on the system (and slow + down indexing). + :param routing: A custom value used to route operations to a specific shard. + :param source: Indicates whether to return the `_source` field (`true` or `false`) + or lists the fields to return. :param source_excludes: A comma-separated list of source fields to exclude in the response. :param source_includes: A comma-separated list of source fields to include in the response. - :param version: Explicit version number for concurrency control. The specified - version must match the current version of the document for the request to - succeed. - :param version_type: Specific version type: `external`, `external_gte`. + :param version: The version number for concurrency control. It must match the + current version of the document for the request to succeed. + :param version_type: The version type. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -1842,7 +1976,7 @@ def explain( Explain a document match result. Returns information about why a specific document matches, or doesn’t match, a query. - ``_ + ``_ :param index: Index names used to limit the request. Only a single index name can be provided to this parameter. @@ -1965,7 +2099,7 @@ def field_caps( field. For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the `keyword` family. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams @@ -2079,36 +2213,78 @@ def get( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a document by its ID. Retrieves the document with the specified ID from an - index. - - ``_ - - :param index: Name of the index that contains the document. - :param id: Unique identifier of the document. - :param force_synthetic_source: Should this request force synthetic _source? Use - this to test if the mapping supports synthetic _source and to get a sense - of the worst case performance. Fetches with this enabled will be slower the - enabling synthetic source natively in the index. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. + Get a document by its ID. Get a document and its source or stored fields from + an index. By default, this API is realtime and is not affected by the refresh + rate of the index (when data will become visible for search). In the case where + stored fields are requested with the `stored_fields` parameter and the document + has been updated but is not yet refreshed, the API will have to parse and analyze + the source to extract the stored fields. To turn off realtime behavior, set the + `realtime` parameter to false. **Source filtering** By default, the API returns + the contents of the `_source` field unless you have used the `stored_fields` + parameter or the `_source` field is turned off. You can turn off `_source` retrieval + by using the `_source` parameter: ``` GET my-index-000001/_doc/0?_source=false + ``` If you only need one or two fields from the `_source`, use the `_source_includes` + or `_source_excludes` parameters to include or filter out particular fields. + This can be helpful with large documents where partial retrieval can save on + network overhead Both parameters take a comma separated list of fields or wildcard + expressions. For example: ``` GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities + ``` If you only want to specify includes, you can use a shorter notation: ``` + GET my-index-000001/_doc/0?_source=*.id ``` **Routing** If routing is used during + indexing, the routing value also needs to be specified to retrieve a document. + For example: ``` GET my-index-000001/_doc/2?routing=user1 ``` This request gets + the document with ID 2, but it is routed based on the user. The document is not + fetched if the correct routing is not specified. **Distributed** The GET operation + is hashed into a specific shard ID. It is then redirected to one of the replicas + within that shard ID and returns the result. The replicas are the primary shard + and its replicas within that shard ID group. This means that the more replicas + you have, the better your GET scaling will be. **Versioning support** You can + use the `version` parameter to retrieve the document only if its current version + is equal to the specified one. Internally, Elasticsearch has marked the old document + as deleted and added an entirely new document. The old version of the document + doesn't disappear immediately, although you won't be able to access it. Elasticsearch + cleans up deleted documents in the background as you continue to index more data. + + ``_ + + :param index: The name of the index that contains the document. + :param id: A unique document identifier. + :param force_synthetic_source: Indicates whether the request forces synthetic + `_source`. Use this paramater to test if the mapping supports synthetic `_source` + and to get a sense of the worst case performance. Fetches with this parameter + enabled will be slower than enabling synthetic source natively in the index. + :param preference: The node or shard the operation should be performed on. By + default, the operation is randomized between the shard replicas. If it is + set to `_local`, the operation will prefer to be run on a local allocated + shard when possible. If it is set to a custom value, the value is used to + guarantee that the same shards will be used for the same custom value. This + can help with "jumping values" when hitting different shards in different + refresh states. A sample value can be something like the web session ID or + the user name. :param realtime: If `true`, the request is real-time as opposed to near-real-time. - :param refresh: If true, Elasticsearch refreshes the affected shards to make - this operation visible to search. If false, do nothing with refreshes. - :param routing: Target the specified primary shard. - :param source: True or false to return the _source field or not, or a list of - fields to return. - :param source_excludes: A comma-separated list of source fields to exclude in - the response. + :param refresh: If `true`, the request refreshes the relevant shards before retrieving + the document. Setting it to `true` should be done after careful thought and + verification that this does not cause a heavy load on the system (and slow + down indexing). + :param routing: A custom value used to route operations to a specific shard. + :param source: Indicates whether to return the `_source` field (`true` or `false`) + or lists the fields to return. + :param source_excludes: A comma-separated list of source fields to exclude from + the response. You can also use this parameter to exclude fields from the + subset specified in `_source_includes` query parameter. If the `_source` + parameter is `false`, this parameter is ignored. :param source_includes: A comma-separated list of source fields to include in - the response. - :param stored_fields: List of stored fields to return as part of a hit. If no - fields are specified, no stored fields are included in the response. If this - field is specified, the `_source` parameter defaults to false. - :param version: Explicit version number for concurrency control. The specified - version must match the current version of the document for the request to - succeed. - :param version_type: Specific version type: internal, external, external_gte. + the response. If this parameter is specified, only these source fields are + returned. You can exclude fields from this subset using the `_source_excludes` + query parameter. If the `_source` parameter is `false`, this parameter is + ignored. + :param stored_fields: A comma-separated list of stored fields to return as part + of a hit. If no fields are specified, no stored fields are included in the + response. If this field is specified, the `_source` parameter defaults to + `false`. Only leaf fields can be retrieved with the `stored_field` option. + Object fields can't be returned;​if specified, the request fails. + :param version: The version number for concurrency control. It must match the + current version of the document for the request to succeed. + :param version_type: The version type. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -2171,7 +2347,7 @@ def get_script( """ Get a script or search template. Retrieves a stored script or search template. - ``_ + ``_ :param id: Identifier for the stored script or search template. :param master_timeout: Specify timeout for connection to master @@ -2213,7 +2389,7 @@ def get_script_context( """ Get script contexts. Get a list of supported script contexts and their methods. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_script_context" @@ -2248,7 +2424,7 @@ def get_script_languages( """ Get script languages. Get a list of available script types, languages, and contexts. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_script_language" @@ -2301,29 +2477,34 @@ def get_source( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a document's source. Returns the source of a document. + Get a document's source. Get the source of a document. For example: ``` GET my-index-000001/_source/1 + ``` You can use the source filtering parameters to control which parts of the + `_source` are returned: ``` GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities + ``` - ``_ + ``_ - :param index: Name of the index that contains the document. - :param id: Unique identifier of the document. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. - :param realtime: Boolean) If true, the request is real-time as opposed to near-real-time. - :param refresh: If true, Elasticsearch refreshes the affected shards to make - this operation visible to search. If false, do nothing with refreshes. - :param routing: Target the specified primary shard. - :param source: True or false to return the _source field or not, or a list of - fields to return. + :param index: The name of the index that contains the document. + :param id: A unique document identifier. + :param preference: The node or shard the operation should be performed on. By + default, the operation is randomized between the shard replicas. + :param realtime: If `true`, the request is real-time as opposed to near-real-time. + :param refresh: If `true`, the request refreshes the relevant shards before retrieving + the document. Setting it to `true` should be done after careful thought and + verification that this does not cause a heavy load on the system (and slow + down indexing). + :param routing: A custom value used to route operations to a specific shard. + :param source: Indicates whether to return the `_source` field (`true` or `false`) + or lists the fields to return. :param source_excludes: A comma-separated list of source fields to exclude in the response. :param source_includes: A comma-separated list of source fields to include in the response. - :param stored_fields: - :param version: Explicit version number for concurrency control. The specified - version must match the current version of the document for the request to - succeed. - :param version_type: Specific version type: internal, external, external_gte. + :param stored_fields: A comma-separated list of stored fields to return as part + of a hit. + :param version: The version number for concurrency control. It must match the + current version of the document for the request to succeed. + :param version_type: The version type. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -2405,7 +2586,7 @@ def health_report( for health status, set verbose to false to disable the more expensive analysis logic. - ``_ + ``_ :param feature: A feature of the cluster, as returned by the top-level health report API. @@ -2478,44 +2659,170 @@ def index( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Index a document. Adds a JSON document to the specified data stream or index - and makes it searchable. If the target is an index and the document already exists, - the request updates the document and increments its version. - - ``_ - - :param index: Name of the data stream or index to target. + Create or update a document in an index. Add a JSON document to the specified + data stream or index and make it searchable. If the target is an index and the + document already exists, the request updates the document and increments its + version. NOTE: You cannot use this API to send update requests for existing documents + in a data stream. If the Elasticsearch security features are enabled, you must + have the following index privileges for the target data stream, index, or index + alias: * To add or overwrite a document using the `PUT //_doc/<_id>` + request format, you must have the `create`, `index`, or `write` index privilege. + * To add a document using the `POST //_doc/` request format, you must + have the `create_doc`, `create`, `index`, or `write` index privilege. * To automatically + create a data stream or index with this API request, you must have the `auto_configure`, + `create_index`, or `manage` index privilege. Automatic data stream creation requires + a matching index template with data stream enabled. NOTE: Replica shards might + not all be started when an indexing operation returns successfully. By default, + only the primary is required. Set `wait_for_active_shards` to change this default + behavior. **Automatically create data streams and indices** If the request's + target doesn't exist and matches an index template with a `data_stream` definition, + the index operation automatically creates the data stream. If the target doesn't + exist and doesn't match a data stream template, the operation automatically creates + the index and applies any matching index templates. NOTE: Elasticsearch includes + several built-in index templates. To avoid naming collisions with these templates, + refer to index pattern documentation. If no mapping exists, the index operation + creates a dynamic mapping. By default, new fields and objects are automatically + added to the mapping if needed. Automatic index creation is controlled by the + `action.auto_create_index` setting. If it is `true`, any index can be created + automatically. You can modify this setting to explicitly allow or block automatic + creation of indices that match specified patterns or set it to `false` to turn + off automatic index creation entirely. Specify a comma-separated list of patterns + you want to allow or prefix each pattern with `+` or `-` to indicate whether + it should be allowed or blocked. When a list is specified, the default behaviour + is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic + creation of indices only. It does not affect the creation of data streams. **Optimistic + concurrency control** Index operations can be made conditional and only be performed + if the last modification to the document was assigned the sequence number and + primary term specified by the `if_seq_no` and `if_primary_term` parameters. If + a mismatch is detected, the operation will result in a `VersionConflictException` + and a status code of `409`. **Routing** By default, shard placement — or routing + — is controlled by using a hash of the document's ID value. For more explicit + control, the value fed into the hash function used by the router can be directly + specified on a per-operation basis using the `routing` parameter. When setting + up explicit mapping, you can also use the `_routing` field to direct the index + operation to extract the routing value from the document itself. This does come + at the (very minimal) cost of an additional document parsing pass. If the `_routing` + mapping is defined and set to be required, the index operation will fail if no + routing value is provided or extracted. NOTE: Data streams do not support custom + routing unless they were created with the `allow_custom_routing` setting enabled + in the template. **Distributed** The index operation is directed to the primary + shard based on its route and performed on the actual node containing this shard. + After the primary shard completes the operation, if needed, the update is distributed + to applicable replicas. **Active shards** To improve the resiliency of writes + to the system, indexing operations can be configured to wait for a certain number + of active shard copies before proceeding with the operation. If the requisite + number of active shard copies are not available, then the write operation must + wait and retry, until either the requisite shard copies have started or a timeout + occurs. By default, write operations only wait for the primary shards to be active + before proceeding (that is to say `wait_for_active_shards` is `1`). This default + can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. + To alter this behavior per operation, use the `wait_for_active_shards request` + parameter. Valid values are all or any positive integer up to the total number + of configured copies per shard in the index (which is `number_of_replicas`+1). + Specifying a negative value or a number greater than the number of shard copies + will throw an error. For example, suppose you have a cluster of three nodes, + A, B, and C and you create an index index with the number of replicas set to + 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt + an indexing operation, by default the operation will only ensure the primary + copy of each shard is available before proceeding. This means that even if B + and C went down and A hosted the primary shard copies, the indexing operation + would still proceed with only one copy of the data. If `wait_for_active_shards` + is set on the request to `3` (and all three nodes are up), the indexing operation + will require 3 active shard copies before proceeding. This requirement should + be met because there are 3 active nodes in the cluster, each one holding a copy + of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, + which is the same in this situation), the indexing operation will not proceed + as you do not have all 4 copies of each shard active in the index. The operation + will timeout unless a new node is brought up in the cluster to host the fourth + copy of the shard. It is important to note that this setting greatly reduces + the chances of the write operation not writing to the requisite number of shard + copies, but it does not completely eliminate the possibility, because this check + occurs before the write operation starts. After the write operation is underway, + it is still possible for replication to fail on any number of shard copies but + still succeed on the primary. The `_shards` section of the API response reveals + the number of shard copies on which replication succeeded and failed. **No operation + (noop) updates** When updating a document by using this API, a new version of + the document is always created even if the document hasn't changed. If this isn't + acceptable use the `_update` API with `detect_noop` set to `true`. The `detect_noop` + option isn't available on this API because it doesn’t fetch the old source and + isn't able to compare it against the new source. There isn't a definitive rule + for when noop updates aren't acceptable. It's a combination of lots of factors + like how frequently your data source sends updates that are actually noops and + how many queries per second Elasticsearch runs on the shard receiving the updates. + **Versioning** Each indexed document is given a version number. By default, internal + versioning is used that starts at 1 and increments with each update, deletes + included. Optionally, the version number can be set to an external value (for + example, if maintained in a database). To enable this functionality, `version_type` + should be set to `external`. The value provided must be a numeric, long value + greater than or equal to 0, and less than around `9.2e+18`. NOTE: Versioning + is completely real time, and is not affected by the near real time aspects of + search operations. If no version is provided, the operation runs without any + version checks. When using the external version type, the system checks to see + if the version number passed to the index request is greater than the version + of the currently stored document. If true, the document will be indexed and the + new version number used. If the value provided is less than or equal to the stored + document's version number, a version conflict will occur and the index operation + will fail. For example: ``` PUT my-index-000001/_doc/1?version=2&version_type=external + { "user": { "id": "elkbee" } } In this example, the operation will succeed since + the supplied version of 2 is higher than the current document version of 1. If + the document was already updated and its version was set to 2 or higher, the + indexing command will fail and result in a conflict (409 HTTP status code). A + nice side effect is that there is no need to maintain strict ordering of async + indexing operations run as a result of changes to a source database, as long + as version numbers from the source database are used. Even the simple case of + updating the Elasticsearch index using data from a database is simplified if + external versioning is used, as only the latest version will be used if the index + operations arrive out of order. + + ``_ + + :param index: The name of the data stream or index to target. If the target doesn't + exist and matches the name or wildcard (`*`) pattern of an index template + with a `data_stream` definition, this request creates the data stream. If + the target doesn't exist and doesn't match a data stream template, this request + creates the index. You can check for existing targets with the resolve index + API. :param document: - :param id: Unique identifier for the document. + :param id: A unique identifier for the document. To automatically generate a + document ID, use the `POST //_doc/` request format and omit this + parameter. :param if_primary_term: Only perform the operation if the document has this primary term. :param if_seq_no: Only perform the operation if the document has this sequence number. - :param op_type: Set to create to only index the document if it does not already + :param op_type: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, - the indexing operation will fail. Same as using the `/_create` endpoint. - Valid values: `index`, `create`. If document id is specified, it defaults - to `index`. Otherwise, it defaults to `create`. - :param pipeline: ID of the pipeline to use to preprocess incoming documents. + the indexing operation will fail. The behavior is the same as using the `/_create` + endpoint. If a document ID is specified, this paramater defaults to `index`. + Otherwise, it defaults to `create`. If the request targets a data stream, + an `op_type` of `create` is required. + :param pipeline: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. :param refresh: If `true`, Elasticsearch refreshes the affected shards to make - this operation visible to search, if `wait_for` then wait for a refresh to - make this operation visible to search, if `false` do nothing with refreshes. - Valid values: `true`, `false`, `wait_for`. + this operation visible to search. If `wait_for`, it waits for a refresh to + make this operation visible to search. If `false`, it does nothing with refreshes. :param require_alias: If `true`, the destination must be an index alias. - :param routing: Custom value used to route operations to a specific shard. - :param timeout: Period the request waits for the following operations: automatic - index creation, dynamic mapping updates, waiting for active shards. - :param version: Explicit version number for concurrency control. The specified - version must match the current version of the document for the request to - succeed. - :param version_type: Specific version type: `external`, `external_gte`. + :param routing: A custom value that is used to route operations to a specific + shard. + :param timeout: The period the request waits for the following operations: automatic + index creation, dynamic mapping updates, waiting for active shards. This + parameter is useful for situations where the primary shard assigned to perform + the operation might not be available when the operation runs. Some reasons + for this might be that the primary shard is currently recovering from a gateway + or undergoing relocation. By default, the operation will wait on the primary + shard to become available for at least 1 minute before failing and responding + with an error. The actual wait time could be longer, particularly when multiple + waits occur. + :param version: An explicit version number for concurrency control. It must be + a non-negative long number. + :param version_type: The version type. :param wait_for_active_shards: The number of shard copies that must be active - before proceeding with the operation. Set to all or any positive integer - up to the total number of shards in the index (`number_of_replicas+1`). + before proceeding with the operation. You can set it to `all` or any positive + integer up to the total number of shards in the index (`number_of_replicas+1`). + The default value of `1` means it waits for each primary shard to be active. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -2591,7 +2898,7 @@ def info( """ Get cluster info. Get basic build, version, and cluster information. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/" @@ -2656,7 +2963,7 @@ def knn_search( The kNN search API supports restricting the search using a filter. The search will return the top k documents that also match the filter query. - ``_ + ``_ :param index: A comma-separated list of index names to search; use `_all` or to perform the operation on all indices @@ -2760,7 +3067,7 @@ def mget( IDs in the request body. To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail. - ``_ + ``_ :param index: Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. @@ -2887,7 +3194,7 @@ def msearch( Each newline character may be preceded by a carriage return `\\r`. When sending requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. - ``_ + ``_ :param searches: :param index: Comma-separated list of data streams, indices, and index aliases @@ -3019,7 +3326,7 @@ def msearch_template( """ Run multiple templated searches. - ``_ + ``_ :param search_templates: :param index: Comma-separated list of data streams, indices, and aliases to search. @@ -3118,7 +3425,7 @@ def mtermvectors( with all the fetched termvectors. Each element has the structure provided by the termvectors API. - ``_ + ``_ :param index: Name of the index that contains the documents. :param docs: Array of existing or artificial documents. @@ -3238,7 +3545,7 @@ def open_point_in_time( A point in time must be opened explicitly before being used in search requests. The `keep_alive` parameter tells Elasticsearch how long it should persist. - ``_ + ``_ :param index: A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices @@ -3326,7 +3633,7 @@ def put_script( Create or update a script or search template. Creates or updates a stored script or search template. - ``_ + ``_ :param id: Identifier for the stored script or search template. Must be unique within the cluster. @@ -3412,7 +3719,7 @@ def rank_eval( Evaluate ranked search results. Evaluate the quality of ranked search results over a set of typical search queries. - ``_ + ``_ :param requests: A set of typical search requests, together with their provided ratings. @@ -3504,33 +3811,191 @@ def reindex( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reindex documents. Copies documents from a source to a destination. The source - can be any existing index, alias, or data stream. The destination must differ - from the source. For example, you cannot reindex a data stream into itself. - - ``_ + Reindex documents. Copy documents from a source to a destination. You can copy + all documents to the destination index or reindex a subset of the documents. + The source can be any existing index, alias, or data stream. The destination + must differ from the source. For example, you cannot reindex a data stream into + itself. IMPORTANT: Reindex requires `_source` to be enabled for all documents + in the source. The destination should be configured as wanted before calling + the reindex API. Reindex does not copy the settings from the source or its associated + template. Mappings, shard counts, and replicas, for example, must be configured + ahead of time. If the Elasticsearch security features are enabled, you must have + the following security privileges: * The `read` index privilege for the source + data stream, index, or alias. * The `write` index privilege for the destination + data stream, index, or index alias. * To automatically create a data stream or + index with a reindex API request, you must have the `auto_configure`, `create_index`, + or `manage` index privilege for the destination data stream, index, or alias. + * If reindexing from a remote cluster, the `source.remote.user` must have the + `monitor` cluster privilege and the `read` index privilege for the source data + stream, index, or alias. If reindexing from a remote cluster, you must explicitly + allow the remote host in the `reindex.remote.whitelist` setting. Automatic data + stream creation requires a matching index template with data stream enabled. + The `dest` element can be configured like the index API to control optimistic + concurrency control. Omitting `version_type` or setting it to `internal` causes + Elasticsearch to blindly dump documents into the destination, overwriting any + that happen to have the same ID. Setting `version_type` to `external` causes + Elasticsearch to preserve the `version` from the source, create any documents + that are missing, and update any documents that have an older version in the + destination than they do in the source. Setting `op_type` to `create` causes + the reindex API to create only missing documents in the destination. All existing + documents will cause a version conflict. IMPORTANT: Because data streams are + append-only, any reindex request to a destination data stream must have an `op_type` + of `create`. A reindex can only add new documents to a destination data stream. + It cannot update existing documents in a destination data stream. By default, + version conflicts abort the reindex process. To continue reindexing if there + are conflicts, set the `conflicts` request body property to `proceed`. In this + case, the response includes a count of the version conflicts that were encountered. + Note that the handling of other error types is unaffected by the `conflicts` + property. Additionally, if you opt to count version conflicts, the operation + could attempt to reindex more documents from the source than `max_docs` until + it has successfully indexed `max_docs` documents into the target or it has gone + through every document in the source query. NOTE: The reindex API makes no effort + to handle ID collisions. The last document written will "win" but the order isn't + usually predictable so it is not a good idea to rely on this behavior. Instead, + make sure that IDs are unique by using a script. **Running reindex asynchronously** + If the request contains `wait_for_completion=false`, Elasticsearch performs some + preflight checks, launches the request, and returns a task you can use to cancel + or get the status of the task. Elasticsearch creates a record of this task as + a document at `_tasks/`. **Reindex from multiple sources** If you have + many sources to reindex it is generally better to reindex them one at a time + rather than using a glob pattern to pick up multiple sources. That way you can + resume the process if there are any errors by removing the partially completed + source and starting over. It also makes parallelizing the process fairly simple: + split the list of sources to reindex and run each list in parallel. For example, + you can use a bash script like this: ``` for index in i1 i2 i3 i4 i5; do curl + -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{ "source": + { "index": "'$index'" }, "dest": { "index": "'$index'-reindexed" } }' done ``` + **Throttling** Set `requests_per_second` to any positive decimal number (`1.4`, + `6`, `1000`, for example) to throttle the rate at which reindex issues batches + of index operations. Requests are throttled by padding each batch with a wait + time. To turn off throttling, set `requests_per_second` to `-1`. The throttling + is done by waiting between batches so that the scroll that reindex uses internally + can be given a timeout that takes into account the padding. The padding time + is the difference between the batch size divided by the `requests_per_second` + and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` + is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time + = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the + batch is issued as a single bulk request, large batch sizes cause Elasticsearch + to create many requests and then wait for a while before starting the next set. + This is "bursty" instead of "smooth". **Slicing** Reindex supports sliced scroll + to parallelize the reindexing process. This parallelization can improve efficiency + and provide a convenient way to break the request down into smaller parts. NOTE: + Reindexing from remote clusters does not support manual or automatic slicing. + You can slice a reindex request manually by providing a slice ID and total number + of slices to each request. You can also let reindex automatically parallelize + by using sliced scroll to slice on `_id`. The `slices` parameter specifies the + number of slices to use. Adding `slices` to the reindex request just automates + the manual process, creating sub-requests which means it has some quirks: * You + can see these requests in the tasks API. These sub-requests are "child" tasks + of the task for the request with slices. * Fetching the status of the task for + the request with `slices` only contains the status of completed slices. * These + sub-requests are individually addressable for things like cancellation and rethrottling. + * Rethrottling the request with `slices` will rethrottle the unfinished sub-request + proportionally. * Canceling the request with `slices` will cancel each sub-request. + * Due to the nature of `slices`, each sub-request won't get a perfectly even + portion of the documents. All documents will be addressed, but some slices may + be larger than others. Expect larger slices to have a more even distribution. + * Parameters like `requests_per_second` and `max_docs` on a request with `slices` + are distributed proportionally to each sub-request. Combine that with the previous + point about distribution being uneven and you should conclude that using `max_docs` + with `slices` might not result in exactly `max_docs` documents being reindexed. + * Each sub-request gets a slightly different snapshot of the source, though these + are all taken at approximately the same time. If slicing automatically, setting + `slices` to `auto` will choose a reasonable number for most indices. If slicing + manually or otherwise tuning automatic slicing, use the following guidelines. + Query performance is most efficient when the number of slices is equal to the + number of shards in the index. If that number is large (for example, `500`), + choose a lower number as too many slices will hurt performance. Setting slices + higher than the number of shards generally does not improve efficiency and adds + overhead. Indexing performance scales linearly across available resources with + the number of slices. Whether query or indexing performance dominates the runtime + depends on the documents being reindexed and cluster resources. **Modify documents + during reindexing** Like `_update_by_query`, reindex operations support a script + that modifies the document. Unlike `_update_by_query`, the script is allowed + to modify the document's metadata. Just as in `_update_by_query`, you can set + `ctx.op` to change the operation that is run on the destination. For example, + set `ctx.op` to `noop` if your script decides that the document doesn’t have + to be indexed in the destination. This "no operation" will be reported in the + `noop` counter in the response body. Set `ctx.op` to `delete` if your script + decides that the document must be deleted from the destination. The deletion + will be reported in the `deleted` counter in the response body. Setting `ctx.op` + to anything else will return an error, as will setting any other field in `ctx`. + Think of the possibilities! Just be careful; you are able to change: * `_id` + * `_index` * `_version` * `_routing` Setting `_version` to `null` or clearing + it from the `ctx` map is just like not sending the version in an indexing request. + It will cause the document to be overwritten in the destination regardless of + the version on the target or the version type you use in the reindex API. **Reindex + from remote** Reindex supports reindexing from a remote Elasticsearch cluster. + The `host` parameter must contain a scheme, host, port, and optional path. The + `username` and `password` parameters are optional and when they are present the + reindex operation will connect to the remote Elasticsearch node using basic authentication. + Be sure to use HTTPS when using basic authentication or the password will be + sent in plain text. There are a range of settings available to configure the + behavior of the HTTPS connection. When using Elastic Cloud, it is also possible + to authenticate against the remote cluster through the use of a valid API key. + Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting. + It can be set to a comma delimited list of allowed remote host and port combinations. + Scheme is ignored; only the host and port are used. For example: ``` reindex.remote.whitelist: + [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"] ``` The list of + allowed hosts must be configured on any nodes that will coordinate the reindex. + This feature should work with remote clusters of any version of Elasticsearch. + This should enable you to upgrade from any version of Elasticsearch to the current + version by reindexing from a cluster of the old version. WARNING: Elasticsearch + does not support forward compatibility across major versions. For example, you + cannot reindex from a 7.x cluster into a 6.x cluster. To enable queries sent + to older versions of Elasticsearch, the `query` parameter is sent directly to + the remote host without validation or modification. NOTE: Reindexing from remote + clusters does not support manual or automatic slicing. Reindexing from a remote + server uses an on-heap buffer that defaults to a maximum size of 100mb. If the + remote index includes very large documents you'll need to use a smaller batch + size. It is also possible to set the socket read timeout on the remote connection + with the `socket_timeout` field and the connection timeout with the `connect_timeout` + field. Both default to 30 seconds. **Configuring SSL parameters** Reindex from + remote supports configurable SSL settings. These must be specified in the `elasticsearch.yml` + file, with the exception of the secure settings, which you add in the Elasticsearch + keystore. It is not possible to configure SSL in the body of the reindex request. + + ``_ :param dest: The destination you are copying to. :param source: The source you are copying from. - :param conflicts: Set to proceed to continue reindexing even if there are conflicts. - :param max_docs: The maximum number of documents to reindex. + :param conflicts: Indicates whether to continue reindexing even when there are + conflicts. + :param max_docs: The maximum number of documents to reindex. By default, all + documents are reindexed. If it is a value less then or equal to `scroll_size`, + a scroll will not be used to retrieve the results for the operation. If `conflicts` + is set to `proceed`, the reindex operation could attempt to reindex more + documents from the source than `max_docs` until it has successfully indexed + `max_docs` documents into the target or it has gone through every document + in the source query. :param refresh: If `true`, the request refreshes affected shards to make this operation visible to search. :param requests_per_second: The throttle for this request in sub-requests per - second. Defaults to no throttle. + second. By default, there is no throttle. :param require_alias: If `true`, the destination must be an index alias. :param script: The script to run to update the document source or metadata when reindexing. - :param scroll: Specifies how long a consistent view of the index should be maintained - for scrolled search. + :param scroll: The period of time that a consistent view of the index should + be maintained for scrolled search. :param size: - :param slices: The number of slices this task should be divided into. Defaults - to 1 slice, meaning the task isn’t sliced into subtasks. - :param timeout: Period each indexing waits for automatic index creation, dynamic - mapping updates, and waiting for active shards. + :param slices: The number of slices this task should be divided into. It defaults + to one slice, which means the task isn't sliced into subtasks. Reindex supports + sliced scroll to parallelize the reindexing process. This parallelization + can improve efficiency and provide a convenient way to break the request + down into smaller parts. NOTE: Reindexing from remote clusters does not support + manual or automatic slicing. If set to `auto`, Elasticsearch chooses the + number of slices to use. This setting will use one slice per shard, up to + a certain limit. If there are multiple sources, it will choose the number + of slices based on the index or backing index with the smallest number of + shards. + :param timeout: The period each indexing waits for automatic index creation, + dynamic mapping updates, and waiting for active shards. By default, Elasticsearch + waits for at least one minute before failing. The actual wait time could + be longer, particularly when multiple waits occur. :param wait_for_active_shards: The number of shard copies that must be active - before proceeding with the operation. Set to `all` or any positive integer - up to the total number of shards in the index (`number_of_replicas+1`). + before proceeding with the operation. Set it to `all` or any positive integer + up to the total number of shards in the index (`number_of_replicas+1`). The + default value is one, which means it waits for each primary shard to be active. :param wait_for_completion: If `true`, the request blocks until the operation is complete. """ @@ -3603,13 +4068,17 @@ def reindex_rethrottle( ) -> ObjectApiResponse[t.Any]: """ Throttle a reindex operation. Change the number of requests per second for a - particular reindex operation. + particular reindex operation. For example: ``` POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 + ``` Rethrottling that speeds up the query takes effect immediately. Rethrottling + that slows down the query will take effect after completing the current batch. + This behavior prevents scroll timeouts. - ``_ + ``_ - :param task_id: Identifier for the task. + :param task_id: The task identifier, which can be found by using the tasks API. :param requests_per_second: The throttle for this request in sub-requests per - second. + second. It can be either `-1` to turn off throttling or any decimal number + like `1.7` or `12` to throttle to that level. """ if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_id'") @@ -3656,7 +4125,7 @@ def render_search_template( """ Render a search template. Render a search template as a search request body. - ``_ + ``_ :param id: ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. @@ -3725,7 +4194,7 @@ def scripts_painless_execute( """ Run a script. Runs a script and returns a result. - ``_ + ``_ :param context: The context that the script should run in. :param context_setup: Additional parameters for the `context`. @@ -3798,7 +4267,7 @@ def scroll( of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests. - ``_ + ``_ :param scroll_id: Scroll ID of the search. :param rest_total_hits_as_int: If true, the API response’s hit.total property @@ -3990,7 +4459,7 @@ def search( can provide search queries using the `q` query string parameter or the request body. If both are specified, only the query parameter is used. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams and indices, omit this @@ -4420,7 +4889,7 @@ def search_mvt( """ Search a vector tile. Search a vector tile for geospatial values. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, or aliases to search :param field: Field containing geospatial data to return @@ -4578,7 +5047,7 @@ def search_shards( optimizations with routing and shard preferences. When filtered aliases are used, the filter is returned as part of the indices section. - ``_ + ``_ :param index: Returns the indices and shards that a search request would be executed against. @@ -4682,7 +5151,7 @@ def search_template( """ Run a search with a search template. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (*). @@ -4822,7 +5291,7 @@ def terms_enum( are actually deleted. Until that happens, the terms enum API will return terms from these documents. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and index aliases to search. Wildcard (*) expressions are supported. @@ -4921,7 +5390,7 @@ def termvectors( Get term vector information. Get information and statistics about terms in the fields of a particular document. - ``_ + ``_ :param index: Name of the index that contains the document. :param id: Unique identifier of the document. @@ -5061,46 +5530,60 @@ def update( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update a document. Updates a document by running a script or passing a partial - document. - - ``_ - - :param index: The name of the index - :param id: Document ID - :param detect_noop: Set to false to disable setting 'result' in the response - to 'noop' if no change to the document occurred. - :param doc: A partial update to an existing document. - :param doc_as_upsert: Set to true to use the contents of 'doc' as the value of - 'upsert' + Update a document. Update a document by running a script or passing a partial + document. If the Elasticsearch security features are enabled, you must have the + `index` or `write` index privilege for the target index or index alias. The script + can update, delete, or skip modifying the document. The API also supports passing + a partial document, which is merged into the existing document. To fully replace + an existing document, use the index API. This operation: * Gets the document + (collocated with the shard) from the index. * Runs the specified script. * Indexes + the result. The document must still be reindexed, but using this API removes + some network roundtrips and reduces chances of version conflicts between the + GET and the index operation. The `_source` field must be enabled to use this + API. In addition to `_source`, you can access the following variables through + the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the + current timestamp). + + ``_ + + :param index: The name of the target index. By default, the index is created + automatically if it doesn't exist. + :param id: A unique identifier for the document to be updated. + :param detect_noop: If `true`, the `result` in the response is set to `noop` + (no operation) when there are no changes to the document. + :param doc: A partial update to an existing document. If both `doc` and `script` + are specified, `doc` is ignored. + :param doc_as_upsert: If `true`, use the contents of 'doc' as the value of 'upsert'. + NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. :param if_primary_term: Only perform the operation if the document has this primary term. :param if_seq_no: Only perform the operation if the document has this sequence number. :param lang: The script language. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make - this operation visible to search, if 'wait_for' then wait for a refresh to - make this operation visible to search, if 'false' do nothing with refreshes. - :param require_alias: If true, the destination must be an index alias. - :param retry_on_conflict: Specify how many times should the operation be retried + this operation visible to search. If 'wait_for', it waits for a refresh to + make this operation visible to search. If 'false', it does nothing with refreshes. + :param require_alias: If `true`, the destination must be an index alias. + :param retry_on_conflict: The number of times the operation should be retried when a conflict occurs. - :param routing: Custom value used to route operations to a specific shard. - :param script: Script to execute to update the document. - :param scripted_upsert: Set to true to execute the script whether or not the - document exists. - :param source: Set to false to disable source retrieval. You can also specify - a comma-separated list of the fields you want to retrieve. - :param source_excludes: Specify the source fields you want to exclude. - :param source_includes: Specify the source fields you want to retrieve. - :param timeout: Period to wait for dynamic mapping updates and active shards. - This guarantees Elasticsearch waits for at least the timeout before failing. - The actual wait time could be longer, particularly when multiple waits occur. + :param routing: A custom value used to route operations to a specific shard. + :param script: The script to run to update the document. + :param scripted_upsert: If `true`, run the script whether or not the document + exists. + :param source: If `false`, turn off source retrieval. You can also specify a + comma-separated list of the fields you want to retrieve. + :param source_excludes: The source fields you want to exclude. + :param source_includes: The source fields you want to retrieve. + :param timeout: The period to wait for the following operations: dynamic mapping + updates and waiting for active shards. Elasticsearch waits for at least the + timeout period before failing. The actual wait time could be longer, particularly + when multiple waits occur. :param upsert: If the document does not already exist, the contents of 'upsert' - are inserted as a new document. If the document exists, the 'script' is executed. - :param wait_for_active_shards: The number of shard copies that must be active - before proceeding with the operations. Set to 'all' or any positive integer - up to the total number of shards in the index (number_of_replicas+1). Defaults - to 1 meaning the primary shard. + are inserted as a new document. If the document exists, the 'script' is run. + :param wait_for_active_shards: The number of copies of each shard that must be + active before proceeding with the operation. Set to 'all' or any positive + integer up to the total number of shards in the index (`number_of_replicas`+1). + The default value of `1` means it waits for each primary shard to be active. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -5230,7 +5713,7 @@ def update_by_query( is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this @@ -5429,7 +5912,7 @@ def update_by_query_rethrottle( takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. - ``_ + ``_ :param task_id: The ID for the task. :param requests_per_second: The throttle for this request in sub-requests per diff --git a/elasticsearch/_sync/client/async_search.py b/elasticsearch/_sync/client/async_search.py index 3a8791e3c..87a8e5707 100644 --- a/elasticsearch/_sync/client/async_search.py +++ b/elasticsearch/_sync/client/async_search.py @@ -42,7 +42,7 @@ def delete( the authenticated user that submitted the original search request; users that have the `cancel_task` cluster privilege. - ``_ + ``_ :param id: A unique identifier for the async search. """ @@ -90,7 +90,7 @@ def get( the results of a specific async search is restricted to the user or API key that submitted it. - ``_ + ``_ :param id: A unique identifier for the async search. :param keep_alive: Specifies how long the async search should be available in @@ -154,7 +154,7 @@ def status( security features are enabled, use of this API is restricted to the `monitoring_user` role. - ``_ + ``_ :param id: A unique identifier for the async search. :param keep_alive: Specifies how long the async search needs to be available. @@ -336,7 +336,7 @@ def submit( can be set by changing the `search.max_async_search_response_size` cluster level setting. - ``_ + ``_ :param index: A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices diff --git a/elasticsearch/_sync/client/autoscaling.py b/elasticsearch/_sync/client/autoscaling.py index c73f74986..ab6ec9f21 100644 --- a/elasticsearch/_sync/client/autoscaling.py +++ b/elasticsearch/_sync/client/autoscaling.py @@ -42,7 +42,7 @@ def delete_autoscaling_policy( by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - ``_ + ``_ :param name: the name of the autoscaling policy :param master_timeout: Period to wait for a connection to the master node. If @@ -102,7 +102,7 @@ def get_autoscaling_capacity( capacity was required. This information is provided for diagnosis only. Do not use this information to make autoscaling decisions. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -147,7 +147,7 @@ def get_autoscaling_policy( Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - ``_ + ``_ :param name: the name of the autoscaling policy :param master_timeout: Period to wait for a connection to the master node. If @@ -200,7 +200,7 @@ def put_autoscaling_policy( use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. - ``_ + ``_ :param name: the name of the autoscaling policy :param policy: diff --git a/elasticsearch/_sync/client/cat.py b/elasticsearch/_sync/client/cat.py index 082a4105d..cb97b3054 100644 --- a/elasticsearch/_sync/client/cat.py +++ b/elasticsearch/_sync/client/cat.py @@ -57,18 +57,20 @@ def aliases( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get aliases. Retrieves the cluster’s index aliases, including filter and routing - information. The API does not return data stream aliases. CAT APIs are only intended + Get aliases. Get the cluster's index aliases, including filter and routing information. + This API does not return data stream aliases. IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API. - ``_ + ``_ :param name: A comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. - :param expand_wildcards: Whether to expand wildcard expression to concrete indices - that are open, closed or both. + :param expand_wildcards: The type of index that wildcard patterns can match. + If the request can target data streams, this argument determines whether + wildcard expressions match hidden data streams. It supports comma-separated + values, such as `open,hidden`. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: List of columns to appear in the response. Supports simple wildcards. @@ -78,7 +80,10 @@ def aliases( the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. + :param master_timeout: The period to wait for a connection to the master node. + If the master node is not available before the timeout expires, the request + fails and returns an error. To indicated that the request should never timeout, + you can set it to `-1`. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -147,13 +152,14 @@ def allocation( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Provides a snapshot of the number of shards allocated to each data node and their - disk space. IMPORTANT: cat APIs are only intended for human consumption using - the command line or Kibana console. They are not intended for use by applications. + Get shard allocation information. Get a snapshot of the number of shards allocated + to each data node and their disk space. IMPORTANT: CAT APIs are only intended + for human consumption using the command line or Kibana console. They are not + intended for use by applications. - ``_ + ``_ - :param node_id: Comma-separated list of node identifiers or names used to limit + :param node_id: A comma-separated list of node identifiers or names used to limit the returned information. :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set @@ -231,17 +237,17 @@ def component_templates( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get component templates. Returns information about component templates in a cluster. + Get component templates. Get information about component templates in a cluster. Component templates are building blocks for constructing index templates that - specify index mappings, settings, and aliases. CAT APIs are only intended for - human consumption using the command line or Kibana console. They are not intended - for use by applications. For application consumption, use the get component template - API. + specify index mappings, settings, and aliases. IMPORTANT: CAT APIs are only intended + for human consumption using the command line or Kibana console. They are not + intended for use by applications. For application consumption, use the get component + template API. - ``_ + ``_ - :param name: The name of the component template. Accepts wildcard expressions. - If omitted, all component templates are returned. + :param name: The name of the component template. It accepts wildcard expressions. + If it is omitted, all component templates are returned. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: List of columns to appear in the response. Supports simple wildcards. @@ -251,7 +257,7 @@ def component_templates( the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. + :param master_timeout: The period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -313,17 +319,17 @@ def count( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get a document count. Provides quick access to a document count for a data stream, + Get a document count. Get quick access to a document count for a data stream, an index, or an entire cluster. The document count only includes live documents, - not deleted documents which have not yet been removed by the merge process. CAT - APIs are only intended for human consumption using the command line or Kibana + not deleted documents which have not yet been removed by the merge process. IMPORTANT: + CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the count API. - ``_ + ``_ - :param index: Comma-separated list of data streams, indices, and aliases used - to limit the request. Supports wildcards (`*`). To target all data streams + :param index: A comma-separated list of data streams, indices, and aliases used + to limit the request. It supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -390,12 +396,13 @@ def fielddata( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns the amount of heap memory currently used by the field data cache on every - data node in the cluster. IMPORTANT: cat APIs are only intended for human consumption - using the command line or Kibana console. They are not intended for use by applications. - For application consumption, use the nodes stats API. + Get field data cache information. Get the amount of heap memory currently used + by the field data cache on every data node in the cluster. IMPORTANT: cat APIs + are only intended for human consumption using the command line or Kibana console. + They are not intended for use by applications. For application consumption, use + the nodes stats API. - ``_ + ``_ :param fields: Comma-separated list of fields used to limit returned information. To retrieve all fields, omit this parameter. @@ -467,19 +474,19 @@ def health( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns the health status of a cluster, similar to the cluster health API. IMPORTANT: - cat APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the cluster health API. This API is often used to check malfunctioning clusters. - To help you track cluster health alongside log files and alerting systems, the - API returns timestamps in two formats: `HH:MM:SS`, which is human-readable but - includes no date information; `Unix epoch time`, which is machine-sortable and - includes date information. The latter format is useful for cluster recoveries - that take multiple days. You can use the cat health API to verify cluster health - across multiple nodes. You also can use the API to track the recovery of a large - cluster over a longer period of time. - - ``_ + Get the cluster health status. IMPORTANT: CAT APIs are only intended for human + consumption using the command line or Kibana console. They are not intended for + use by applications. For application consumption, use the cluster health API. + This API is often used to check malfunctioning clusters. To help you track cluster + health alongside log files and alerting systems, the API returns timestamps in + two formats: `HH:MM:SS`, which is human-readable but includes no date information; + `Unix epoch time`, which is machine-sortable and includes date information. The + latter format is useful for cluster recoveries that take multiple days. You can + use the cat health API to verify cluster health across multiple nodes. You also + can use the API to track the recovery of a large cluster over a longer period + of time. + + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -531,9 +538,9 @@ def health( @_rewrite_parameters() def help(self) -> TextApiResponse: """ - Get CAT help. Returns help for the CAT APIs. + Get CAT help. Get help for the CAT APIs. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_cat" @@ -582,7 +589,7 @@ def indices( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get index information. Returns high-level information about indices in a cluster, + Get index information. Get high-level information about indices in a cluster, including backing indices for data streams. Use this request to get the following information for each index in a cluster: - shard count - document count - deleted document count - primary store size - total store size of all shards, including @@ -593,7 +600,7 @@ def indices( using the command line or Kibana console. They are not intended for use by applications. For application consumption, use an index endpoint. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -684,12 +691,12 @@ def master( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about the master node, including the ID, bound IP address, - and name. IMPORTANT: cat APIs are only intended for human consumption using the - command line or Kibana console. They are not intended for use by applications. - For application consumption, use the nodes info API. + Get master node information. Get information about the master node, including + the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for + human consumption using the command line or Kibana console. They are not intended + for use by applications. For application consumption, use the nodes info API. - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -858,13 +865,13 @@ def ml_data_frame_analytics( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get data frame analytics jobs. Returns configuration and usage information about - data frame analytics jobs. CAT APIs are only intended for human consumption using - the Kibana console or command line. They are not intended for use by applications. + Get data frame analytics jobs. Get configuration and usage information about + data frame analytics jobs. IMPORTANT: CAT APIs are only intended for human consumption + using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get data frame analytics jobs statistics API. - ``_ + ``_ :param id: The ID of the data frame analytics to fetch :param allow_no_match: Whether to ignore if a wildcard expression matches no @@ -1020,14 +1027,15 @@ def ml_datafeeds( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get datafeeds. Returns configuration and usage information about datafeeds. This + Get datafeeds. Get configuration and usage information about datafeeds. This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` - cluster privileges to use this API. CAT APIs are only intended for human consumption - using the Kibana console or command line. They are not intended for use by applications. - For application consumption, use the get datafeed statistics API. + cluster privileges to use this API. IMPORTANT: CAT APIs are only intended for + human consumption using the Kibana console or command line. They are not intended + for use by applications. For application consumption, use the get datafeed statistics + API. - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. @@ -1381,15 +1389,15 @@ def ml_jobs( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get anomaly detection jobs. Returns configuration and usage information for anomaly + Get anomaly detection jobs. Get configuration and usage information for anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, - or `manage` cluster privileges to use this API. CAT APIs are only intended for - human consumption using the Kibana console or command line. They are not intended - for use by applications. For application consumption, use the get anomaly detection - job statistics API. + or `manage` cluster privileges to use this API. IMPORTANT: CAT APIs are only + intended for human consumption using the Kibana console or command line. They + are not intended for use by applications. For application consumption, use the + get anomaly detection job statistics API. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param allow_no_match: Specifies what to do when the request: * Contains wildcard @@ -1565,12 +1573,12 @@ def ml_trained_models( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get trained models. Returns configuration and usage information about inference - trained models. CAT APIs are only intended for human consumption using the Kibana - console or command line. They are not intended for use by applications. For application - consumption, use the get trained models statistics API. + Get trained models. Get configuration and usage information about inference trained + models. IMPORTANT: CAT APIs are only intended for human consumption using the + Kibana console or command line. They are not intended for use by applications. + For application consumption, use the get trained models statistics API. - ``_ + ``_ :param model_id: A unique identifier for the trained model. :param allow_no_match: Specifies what to do when the request: contains wildcard @@ -1656,12 +1664,12 @@ def nodeattrs( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about custom node attributes. IMPORTANT: cat APIs are only - intended for human consumption using the command line or Kibana console. They - are not intended for use by applications. For application consumption, use the - nodes info API. + Get node attribute information. Get information about custom node attributes. + IMPORTANT: cat APIs are only intended for human consumption using the command + line or Kibana console. They are not intended for use by applications. For application + consumption, use the nodes info API. - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -1737,12 +1745,12 @@ def nodes( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about the nodes in a cluster. IMPORTANT: cat APIs are only - intended for human consumption using the command line or Kibana console. They - are not intended for use by applications. For application consumption, use the - nodes info API. + Get node information. Get information about the nodes in a cluster. IMPORTANT: + cat APIs are only intended for human consumption using the command line or Kibana + console. They are not intended for use by applications. For application consumption, + use the nodes info API. - ``_ + ``_ :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set @@ -1822,12 +1830,12 @@ def pending_tasks( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns cluster-level changes that have not yet been executed. IMPORTANT: cat - APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the pending cluster tasks API. + Get pending task information. Get information about cluster-level changes that + have not yet taken effect. IMPORTANT: cat APIs are only intended for human consumption + using the command line or Kibana console. They are not intended for use by applications. + For application consumption, use the pending cluster tasks API. - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -1900,12 +1908,12 @@ def plugins( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns a list of plugins running on each node of a cluster. IMPORTANT: cat APIs - are only intended for human consumption using the command line or Kibana console. - They are not intended for use by applications. For application consumption, use - the nodes info API. + Get plugin information. Get a list of plugins running on each node of a cluster. + IMPORTANT: cat APIs are only intended for human consumption using the command + line or Kibana console. They are not intended for use by applications. For application + consumption, use the nodes info API. - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -1984,16 +1992,16 @@ def recovery( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about ongoing and completed shard recoveries. Shard recovery - is the process of initializing a shard copy, such as restoring a primary shard - from a snapshot or syncing a replica shard from a primary shard. When a shard - recovery completes, the recovered shard is available for search and indexing. - For data streams, the API returns information about the stream’s backing indices. - IMPORTANT: cat APIs are only intended for human consumption using the command - line or Kibana console. They are not intended for use by applications. For application - consumption, use the index recovery API. + Get shard recovery information. Get information about ongoing and completed shard + recoveries. Shard recovery is the process of initializing a shard copy, such + as restoring a primary shard from a snapshot or syncing a replica shard from + a primary shard. When a shard recovery completes, the recovered shard is available + for search and indexing. For data streams, the API returns information about + the stream’s backing indices. IMPORTANT: cat APIs are only intended for human + consumption using the command line or Kibana console. They are not intended for + use by applications. For application consumption, use the index recovery API. - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2074,12 +2082,12 @@ def repositories( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns the snapshot repositories for a cluster. IMPORTANT: cat APIs are only - intended for human consumption using the command line or Kibana console. They - are not intended for use by applications. For application consumption, use the - get snapshot repository API. + Get snapshot repository information. Get a list of snapshot repositories for + a cluster. IMPORTANT: cat APIs are only intended for human consumption using + the command line or Kibana console. They are not intended for use by applications. + For application consumption, use the get snapshot repository API. - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -2152,13 +2160,13 @@ def segments( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns low-level information about the Lucene segments in index shards. For - data streams, the API returns information about the backing indices. IMPORTANT: - cat APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the index segments API. + Get segment information. Get low-level information about the Lucene segments + in index shards. For data streams, the API returns information about the backing + indices. IMPORTANT: cat APIs are only intended for human consumption using the + command line or Kibana console. They are not intended for use by applications. + For application consumption, use the index segments API. - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2244,12 +2252,12 @@ def shards( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about the shards in a cluster. For data streams, the API - returns information about the backing indices. IMPORTANT: cat APIs are only intended - for human consumption using the command line or Kibana console. They are not - intended for use by applications. + Get shard information. Get information about the shards in a cluster. For data + streams, the API returns information about the backing indices. IMPORTANT: cat + APIs are only intended for human consumption using the command line or Kibana + console. They are not intended for use by applications. - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2330,13 +2338,13 @@ def snapshots( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about the snapshots stored in one or more repositories. A - snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: - cat APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the get snapshot API. + Get snapshot information. Get information about the snapshots stored in one or + more repositories. A snapshot is a backup of an index or running Elasticsearch + cluster. IMPORTANT: cat APIs are only intended for human consumption using the + command line or Kibana console. They are not intended for use by applications. + For application consumption, use the get snapshot API. - ``_ + ``_ :param repository: A comma-separated list of snapshot repositories used to limit the request. Accepts wildcard expressions. `_all` returns all repositories. @@ -2422,12 +2430,12 @@ def tasks( wait_for_completion: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about tasks currently executing in the cluster. IMPORTANT: - cat APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the task management API. + Get task information. Get information about tasks currently running in the cluster. + IMPORTANT: cat APIs are only intended for human consumption using the command + line or Kibana console. They are not intended for use by applications. For application + consumption, use the task management API. - ``_ + ``_ :param actions: The task action names, which are used to limit the response. :param detailed: If `true`, the response includes detailed information about @@ -2513,13 +2521,13 @@ def templates( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns information about index templates in a cluster. You can use index templates - to apply index settings and field mappings to new indices at creation. IMPORTANT: - cat APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the get index template API. + Get index template information. Get information about the index templates in + a cluster. You can use index templates to apply index settings and field mappings + to new indices at creation. IMPORTANT: cat APIs are only intended for human consumption + using the command line or Kibana console. They are not intended for use by applications. + For application consumption, use the get index template API. - ``_ + ``_ :param name: The name of the template to return. Accepts wildcard expressions. If omitted, all templates are returned. @@ -2599,13 +2607,13 @@ def thread_pool( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Returns thread pool statistics for each node in a cluster. Returned information - includes all built-in thread pools and custom thread pools. IMPORTANT: cat APIs - are only intended for human consumption using the command line or Kibana console. - They are not intended for use by applications. For application consumption, use - the nodes info API. + Get thread pool statistics. Get thread pool statistics for each node in a cluster. + Returned information includes all built-in thread pools and custom thread pools. + IMPORTANT: cat APIs are only intended for human consumption using the command + line or Kibana console. They are not intended for use by applications. For application + consumption, use the nodes info API. - ``_ + ``_ :param thread_pool_patterns: A comma-separated list of thread pool names used to limit the request. Accepts wildcard expressions. @@ -2853,12 +2861,12 @@ def transforms( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get transforms. Returns configuration and usage information about transforms. + Get transform information. Get configuration and usage information about transforms. CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get transform statistics API. - ``_ + ``_ :param transform_id: A transform identifier or a wildcard expression. If you do not specify one of these options, the API returns information for all diff --git a/elasticsearch/_sync/client/ccr.py b/elasticsearch/_sync/client/ccr.py index bd6eb0b13..fdd79e2c9 100644 --- a/elasticsearch/_sync/client/ccr.py +++ b/elasticsearch/_sync/client/ccr.py @@ -40,7 +40,7 @@ def delete_auto_follow_pattern( Delete auto-follow patterns. Delete a collection of cross-cluster replication auto-follow patterns. - ``_ + ``_ :param name: The name of the auto follow pattern. :param master_timeout: Period to wait for a connection to the master node. @@ -122,7 +122,7 @@ def follow( cross-cluster replication starts replicating operations from the leader index to the follower index. - ``_ + ``_ :param index: The name of the follower index. :param leader_index: The name of the index in the leader cluster to follow. @@ -249,7 +249,7 @@ def follow_info( index names, replication options, and whether the follower indices are active or paused. - ``_ + ``_ :param index: A comma-separated list of index patterns; use `_all` to perform the operation on all indices @@ -296,7 +296,7 @@ def follow_stats( shard-level stats about the "following tasks" associated with each shard for the specified indices. - ``_ + ``_ :param index: A comma-separated list of index patterns; use `_all` to perform the operation on all indices @@ -370,7 +370,7 @@ def forget_follower( API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked. - ``_ + ``_ :param index: the name of the leader index for which specified follower retention leases should be removed @@ -431,7 +431,7 @@ def get_auto_follow_pattern( """ Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. - ``_ + ``_ :param name: Specifies the auto-follow pattern collection that you want to retrieve. If you do not specify a name, the API returns information for all collections. @@ -486,7 +486,7 @@ def pause_auto_follow_pattern( patterns. Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim. - ``_ + ``_ :param name: The name of the auto follow pattern that should pause discovering new indices to follow. @@ -534,7 +534,7 @@ def pause_follow( resume following with the resume follower API. You can pause and resume a follower index to change the configuration of the following task. - ``_ + ``_ :param index: The name of the follower index that should pause following its leader index. @@ -620,7 +620,7 @@ def put_auto_follow_pattern( that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns. - ``_ + ``_ :param name: The name of the collection of auto-follow patterns. :param remote_cluster: The remote cluster containing the leader indices to match @@ -752,7 +752,7 @@ def resume_auto_follow_pattern( Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim. - ``_ + ``_ :param name: The name of the auto follow pattern to resume discovering new indices to follow. @@ -825,7 +825,7 @@ def resume_follow( to failures during following tasks. When this API returns, the follower index will resume fetching operations from the leader index. - ``_ + ``_ :param index: The name of the follow index to resume following. :param master_timeout: Period to wait for a connection to the master node. @@ -913,7 +913,7 @@ def stats( Get cross-cluster replication stats. This API returns stats about auto-following and the same shard-level stats as the get follower stats API. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. :param timeout: Period to wait for a response. If no response is received before @@ -964,7 +964,7 @@ def unfollow( regular index to a follower index. Converting a follower index to a regular index is an irreversible operation. - ``_ + ``_ :param index: The name of the follower index that should be turned into a regular index. diff --git a/elasticsearch/_sync/client/cluster.py b/elasticsearch/_sync/client/cluster.py index f5b45aa37..636fae5d1 100644 --- a/elasticsearch/_sync/client/cluster.py +++ b/elasticsearch/_sync/client/cluster.py @@ -53,7 +53,7 @@ def allocation_explain( or why a shard continues to remain on its current node when you might expect otherwise. - ``_ + ``_ :param current_node: Specifies the node ID or the name of the node to only explain a shard that is currently located on the specified node. @@ -126,7 +126,7 @@ def delete_component_template( Delete component templates. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases. - ``_ + ``_ :param name: Comma-separated list or wildcard expression of component template names used to limit the request. @@ -178,7 +178,7 @@ def delete_voting_config_exclusions( Clear cluster voting config exclusions. Remove master-eligible nodes from the voting configuration exclusion list. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. :param wait_for_removal: Specifies whether to wait for all excluded nodes to @@ -229,7 +229,7 @@ def exists_component_template( Check component templates. Returns information about whether a particular component template exists. - ``_ + ``_ :param name: Comma-separated list of component template names used to limit the request. Wildcard (*) expressions are supported. @@ -284,7 +284,7 @@ def get_component_template( """ Get component templates. Get information about component templates. - ``_ + ``_ :param name: Comma-separated list of component template names used to limit the request. Wildcard (`*`) expressions are supported. @@ -348,7 +348,7 @@ def get_settings( Get cluster-wide settings. By default, it returns only settings that have been explicitly defined. - ``_ + ``_ :param flat_settings: If `true`, returns settings in flat format. :param include_defaults: If `true`, returns default cluster settings from the @@ -439,7 +439,7 @@ def health( high watermark health level. The cluster status is controlled by the worst index status. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target @@ -543,7 +543,7 @@ def info( """ Get cluster info. Returns basic information about the cluster. - ``_ + ``_ :param target: Limits the information returned to the specific target. Supports a comma-separated list, such as http,ingest. @@ -592,7 +592,7 @@ def pending_tasks( index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API. - ``_ + ``_ :param local: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. @@ -667,7 +667,7 @@ def post_voting_config_exclusions( master-ineligible nodes or when removing fewer than half of the master-eligible nodes. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. :param node_ids: A comma-separated list of the persistent ids of the nodes to @@ -746,7 +746,7 @@ def put_component_template( template to a data stream or index. To be applied, a component template must be included in an index template's `composed_of` list. - ``_ + ``_ :param name: Name of the component template to create. Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; @@ -854,7 +854,7 @@ def put_settings( settings can clear unexpectedly, resulting in a potentially undesired cluster configuration. - ``_ + ``_ :param flat_settings: Return settings in flat format (default: false) :param master_timeout: Explicit operation timeout for connection to master node @@ -910,7 +910,7 @@ def remote_info( This API returns connection and endpoint information keyed by the configured remote cluster alias. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_remote/info" @@ -973,7 +973,7 @@ def reroute( API with the `?retry_failed` URI query parameter, which will attempt a single retry round for these shards. - ``_ + ``_ :param commands: Defines the commands to perform. :param dry_run: If true, then the request simulates the operation. It will calculate @@ -1081,7 +1081,7 @@ def state( external monitoring tools. Instead, obtain the information you require using other more stable cluster APIs. - ``_ + ``_ :param metric: Limit the information returned to the specified metrics :param index: A comma-separated list of index names; use `_all` or empty string @@ -1167,7 +1167,7 @@ def stats( usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins). - ``_ + ``_ :param node_id: Comma-separated list of node filters used to limit returned information. Defaults to all nodes in the cluster. diff --git a/elasticsearch/_sync/client/connector.py b/elasticsearch/_sync/client/connector.py index aeffc0d39..7b334ab01 100644 --- a/elasticsearch/_sync/client/connector.py +++ b/elasticsearch/_sync/client/connector.py @@ -46,7 +46,7 @@ def check_in( Check in a connector. Update the `last_seen` field in the connector and set it to the current timestamp. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be checked in """ @@ -91,7 +91,7 @@ def delete( ingest pipelines, or data indices associated with the connector. These need to be removed manually. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be deleted :param delete_sync_jobs: A flag indicating if associated sync jobs should be @@ -136,7 +136,7 @@ def get( """ Get a connector. Get the details about a connector. - ``_ + ``_ :param connector_id: The unique identifier of the connector """ @@ -232,7 +232,7 @@ def last_sync( Update the connector last sync stats. Update the fields related to the last sync of a connector. This action is used for analytics and monitoring. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param last_access_control_sync_error: @@ -327,7 +327,7 @@ def list( """ Get all connectors. Get information about all connectors. - ``_ + ``_ :param connector_name: A comma-separated list of connector names to fetch connector documents for @@ -406,7 +406,7 @@ def post( a managed service on Elastic Cloud. Self-managed connectors (Connector clients) are self-managed on your infrastructure. - ``_ + ``_ :param description: :param index_name: @@ -485,7 +485,7 @@ def put( """ Create or update a connector. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. @@ -558,7 +558,7 @@ def sync_job_cancel( connector service is then responsible for setting the status of connector sync jobs to cancelled. - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job """ @@ -607,7 +607,7 @@ def sync_job_check_in( on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job to be checked in. @@ -665,7 +665,7 @@ def sync_job_claim( service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job. :param worker_hostname: The host name of the current system that will run the @@ -723,7 +723,7 @@ def sync_job_delete( Delete a connector sync job. Remove a connector sync job and its associated data. This is a destructive action that is not recoverable. - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job to be deleted @@ -774,7 +774,7 @@ def sync_job_error( you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. - ``_ + ``_ :param connector_sync_job_id: The unique identifier for the connector sync job. :param error: The error for the connector sync job error field. @@ -825,7 +825,7 @@ def sync_job_get( """ Get a connector sync job. - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job """ @@ -895,7 +895,7 @@ def sync_job_list( Get all connector sync jobs. Get information about all stored connector sync jobs listed by their creation date in ascending order. - ``_ + ``_ :param connector_id: A connector id to fetch connector sync jobs for :param from_: Starting offset (default: 0) @@ -958,7 +958,7 @@ def sync_job_post( Create a connector sync job. Create a connector sync job document in the internal index and initialize its counters and timestamps with default values. - ``_ + ``_ :param id: The id of the associated connector :param job_type: @@ -1031,7 +1031,7 @@ def sync_job_update_stats( service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job. :param deleted_document_count: The number of documents the sync job deleted. @@ -1111,7 +1111,7 @@ def update_active_filtering( Activate the connector draft filter. Activates the valid draft filtering for a connector. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated """ @@ -1161,7 +1161,7 @@ def update_api_key_id( secret ID is required only for Elastic managed (native) connectors. Self-managed connectors (connector clients) do not use this field. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param api_key_id: @@ -1217,7 +1217,7 @@ def update_configuration( Update the connector configuration. Update the configuration field in the connector document. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param configuration: @@ -1274,7 +1274,7 @@ def update_error( to error. Otherwise, if the error is reset to null, the connector status is updated to connected. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param error: @@ -1334,7 +1334,7 @@ def update_features( on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated. :param features: @@ -1392,7 +1392,7 @@ def update_filtering( is activated once validated by the running Elastic connector service. The filtering property is used to configure sync rules (both basic and advanced) for a connector. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param advanced_snippet: @@ -1450,7 +1450,7 @@ def update_filtering_validation( Update the connector draft filtering validation. Update the draft filtering validation info for a connector. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param validation: @@ -1504,7 +1504,7 @@ def update_index_name( Update the connector index name. Update the `index_name` field of a connector, specifying the index where the data ingested by the connector is stored. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param index_name: @@ -1558,7 +1558,7 @@ def update_name( """ Update the connector name and description. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param description: @@ -1612,7 +1612,7 @@ def update_native( """ Update the connector is_native flag. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param is_native: @@ -1666,7 +1666,7 @@ def update_pipeline( Update the connector pipeline. When you create a new connector, the configuration of an ingest pipeline is populated with default settings. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param pipeline: @@ -1719,7 +1719,7 @@ def update_scheduling( """ Update the connector scheduling. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param scheduling: @@ -1772,7 +1772,7 @@ def update_service_type( """ Update the connector service type. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param service_type: @@ -1832,7 +1832,7 @@ def update_status( """ Update the connector status. - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param status: diff --git a/elasticsearch/_sync/client/dangling_indices.py b/elasticsearch/_sync/client/dangling_indices.py index 63bebd50c..9e0ab3870 100644 --- a/elasticsearch/_sync/client/dangling_indices.py +++ b/elasticsearch/_sync/client/dangling_indices.py @@ -44,7 +44,7 @@ def delete_dangling_index( For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. - ``_ + ``_ :param index_uuid: The UUID of the index to delete. Use the get dangling indices API to find the UUID. @@ -103,7 +103,7 @@ def import_dangling_index( For example, this can happen if you delete more than `cluster.indices.tombstones.size` indices while an Elasticsearch node is offline. - ``_ + ``_ :param index_uuid: The UUID of the index to import. Use the get dangling indices API to locate the UUID. @@ -162,7 +162,7 @@ def list_dangling_indices( indices while an Elasticsearch node is offline. Use this API to list dangling indices, which you can then import or delete. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_dangling" diff --git a/elasticsearch/_sync/client/enrich.py b/elasticsearch/_sync/client/enrich.py index 47b85cbaf..8a9755d89 100644 --- a/elasticsearch/_sync/client/enrich.py +++ b/elasticsearch/_sync/client/enrich.py @@ -39,7 +39,7 @@ def delete_policy( """ Delete an enrich policy. Deletes an existing enrich policy and its enrich index. - ``_ + ``_ :param name: Enrich policy to delete. :param master_timeout: Period to wait for a connection to the master node. @@ -84,7 +84,7 @@ def execute_policy( """ Run an enrich policy. Create the enrich index for an existing enrich policy. - ``_ + ``_ :param name: Enrich policy to execute. :param master_timeout: Period to wait for a connection to the master node. @@ -132,7 +132,7 @@ def get_policy( """ Get an enrich policy. Returns information about an enrich policy. - ``_ + ``_ :param name: Comma-separated list of enrich policy names used to limit the request. To return information for all enrich policies, omit this parameter. @@ -186,7 +186,7 @@ def put_policy( """ Create an enrich policy. Creates an enrich policy. - ``_ + ``_ :param name: Name of the enrich policy to create or update. :param geo_match: Matches enrich data to incoming documents based on a `geo_shape` @@ -244,7 +244,7 @@ def stats( Get enrich stats. Returns enrich coordinator statistics and information about enrich policies that are currently executing. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. """ diff --git a/elasticsearch/_sync/client/eql.py b/elasticsearch/_sync/client/eql.py index 82b085ee2..1d01501a6 100644 --- a/elasticsearch/_sync/client/eql.py +++ b/elasticsearch/_sync/client/eql.py @@ -39,7 +39,7 @@ def delete( Delete an async EQL search. Delete an async EQL search or a stored synchronous EQL search. The API also deletes results for the search. - ``_ + ``_ :param id: Identifier for the search to delete. A search ID is provided in the EQL search API's response for an async search. A search ID is also provided @@ -86,7 +86,7 @@ def get( Get async EQL search results. Get the current status and available results for an async EQL search or a stored synchronous EQL search. - ``_ + ``_ :param id: Identifier for the search. :param keep_alive: Period for which the search and its results are stored on @@ -137,7 +137,7 @@ def get_status( Get the async EQL status. Get the current status for an async EQL search or a stored synchronous EQL search without returning results. - ``_ + ``_ :param id: Identifier for the search. """ @@ -233,7 +233,7 @@ def search( query. EQL assumes each document in a data stream or index corresponds to an event. - ``_ + ``_ :param index: The name of the index to scope the operation :param query: EQL query you wish to run. diff --git a/elasticsearch/_sync/client/esql.py b/elasticsearch/_sync/client/esql.py index 7c35bb652..8a087c6ba 100644 --- a/elasticsearch/_sync/client/esql.py +++ b/elasticsearch/_sync/client/esql.py @@ -78,7 +78,7 @@ def async_query( The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties. - ``_ + ``_ :param query: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. @@ -189,7 +189,7 @@ def async_query_delete( authenticated user that submitted the original query request * Users with the `cancel_task` cluster privilege - ``_ + ``_ :param id: The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the @@ -240,7 +240,7 @@ def async_query_get( features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API. - ``_ + ``_ :param id: The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the @@ -334,7 +334,7 @@ def query( Run an ES|QL query. Get search results for an ES|QL (Elasticsearch query language) query. - ``_ + ``_ :param query: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. diff --git a/elasticsearch/_sync/client/features.py b/elasticsearch/_sync/client/features.py index 14cb4f156..6bc6c1c66 100644 --- a/elasticsearch/_sync/client/features.py +++ b/elasticsearch/_sync/client/features.py @@ -48,7 +48,7 @@ def get_features( this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. """ @@ -102,7 +102,7 @@ def reset_features( on the master node if you have any doubts about which plugins are installed on individual nodes. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. """ diff --git a/elasticsearch/_sync/client/fleet.py b/elasticsearch/_sync/client/fleet.py index a8a86a7df..39d30f376 100644 --- a/elasticsearch/_sync/client/fleet.py +++ b/elasticsearch/_sync/client/fleet.py @@ -49,7 +49,7 @@ def global_checkpoints( Returns the current global checkpoints for an index. This API is design for internal use by the fleet server project. - ``_ + ``_ :param index: A single index or index alias that resolves to a single index. :param checkpoints: A comma separated list of previous global checkpoints. When diff --git a/elasticsearch/_sync/client/graph.py b/elasticsearch/_sync/client/graph.py index f62bbb15a..b8253cfc1 100644 --- a/elasticsearch/_sync/client/graph.py +++ b/elasticsearch/_sync/client/graph.py @@ -54,7 +54,7 @@ def explore( from one more vertices of interest. You can exclude vertices that have already been returned. - ``_ + ``_ :param index: Name of the index. :param connections: Specifies or more fields from which you want to extract terms diff --git a/elasticsearch/_sync/client/ilm.py b/elasticsearch/_sync/client/ilm.py index b2591fd90..2b133ea2c 100644 --- a/elasticsearch/_sync/client/ilm.py +++ b/elasticsearch/_sync/client/ilm.py @@ -42,7 +42,7 @@ def delete_lifecycle( If the policy is being used to manage any indices, the request fails and returns an error. - ``_ + ``_ :param name: Identifier for the policy. :param master_timeout: Period to wait for a connection to the master node. If @@ -98,7 +98,7 @@ def explain_lifecycle( lifecycle state, provides the definition of the running phase, and information about any failures. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to target. Supports wildcards (`*`). To target all data streams and indices, use `*` @@ -156,7 +156,7 @@ def get_lifecycle( """ Get lifecycle policies. - ``_ + ``_ :param name: Identifier for the policy. :param master_timeout: Period to wait for a connection to the master node. If @@ -207,7 +207,7 @@ def get_status( """ Get the ILM status. Get the current index lifecycle management status. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ilm/status" @@ -259,7 +259,7 @@ def migrate_to_data_tiers( stop ILM and get ILM status APIs to wait until the reported operation mode is `STOPPED`. - ``_ + ``_ :param dry_run: If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. This provides @@ -333,7 +333,7 @@ def move_to_step( specified in the ILM policy are considered valid. An index cannot move to a step that is not part of its policy. - ``_ + ``_ :param index: The name of the index whose lifecycle step is to change :param current_step: The step that the index is expected to be in. @@ -398,7 +398,7 @@ def put_lifecycle( and the policy version is incremented. NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions. - ``_ + ``_ :param name: Identifier for the policy. :param master_timeout: Period to wait for a connection to the master node. If @@ -458,7 +458,7 @@ def remove_policy( Remove policies from an index. Remove the assigned lifecycle policies from an index or a data stream's backing indices. It also stops managing the indices. - ``_ + ``_ :param index: The name of the index to remove policy on """ @@ -501,7 +501,7 @@ def retry( and runs the step. Use the explain lifecycle state API to determine whether an index is in the ERROR step. - ``_ + ``_ :param index: The name of the indices (comma-separated) whose failed lifecycle step is to be retry @@ -545,7 +545,7 @@ def start( stopped. ILM is started automatically when the cluster is formed. Restarting ILM is necessary only when it has been stopped using the stop ILM API. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -597,7 +597,7 @@ def stop( might continue to run until in-progress operations complete and the plugin can be safely stopped. Use the get ILM status API to check whether ILM is running. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index 40062036b..a700cb9f2 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -58,7 +58,7 @@ def add_block( Add an index block. Limits the operations allowed on an index by blocking specific operation types. - ``_ + ``_ :param index: A comma separated list of indices to add a block to :param block: The block to add (one of read, write, read_only or metadata) @@ -150,7 +150,7 @@ def analyze( of tokens gets generated, an error occurs. The `_analyze` endpoint without a specified index will always use `10000` as its limit. - ``_ + ``_ :param index: Index used to derive the analyzer. If specified, the `analyzer` or field parameter overrides this value. If no index is specified or the @@ -255,7 +255,7 @@ def clear_cache( `query`, or `request` parameters. To clear the cache only of specific fields, use the `fields` parameter. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -377,7 +377,7 @@ def clone( a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well. - ``_ + ``_ :param index: Name of the source index to clone. :param target: Name of the target index to create. @@ -482,7 +482,7 @@ def close( can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` to `false`. - ``_ + ``_ :param index: Comma-separated list or wildcard expression of index names used to limit the request. @@ -582,7 +582,7 @@ def create( setting `index.write.wait_for_active_shards`. Note that changing this setting will also affect the `wait_for_active_shards` value on all subsequent write operations. - ``_ + ``_ :param index: Name of the index you wish to create. :param aliases: Aliases for the index. @@ -656,7 +656,7 @@ def create_data_stream( Create a data stream. Creates a data stream. You must have a matching index template with data stream enabled. - ``_ + ``_ :param name: Name of the data stream, which must meet the following criteria: Lowercase only; Cannot include `\\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, @@ -717,7 +717,7 @@ def data_streams_stats( """ Get data stream stats. Retrieves statistics for one or more data streams. - ``_ + ``_ :param name: Comma-separated list of data streams used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams in a @@ -782,7 +782,7 @@ def delete( delete the index, you must roll over the data stream so a new write index is created. You can then use the delete index API to delete the previous write index. - ``_ + ``_ :param index: Comma-separated list of indices to delete. You cannot specify index aliases. By default, this parameter does not support wildcards (`*`) or `_all`. @@ -852,7 +852,7 @@ def delete_alias( """ Delete an alias. Removes a data stream or index from an alias. - ``_ + ``_ :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). @@ -917,7 +917,7 @@ def delete_data_lifecycle( Delete data stream lifecycles. Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle. - ``_ + ``_ :param name: A comma-separated list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams @@ -977,7 +977,7 @@ def delete_data_stream( """ Delete data streams. Deletes one or more data streams and their backing indices. - ``_ + ``_ :param name: Comma-separated list of data streams to delete. Wildcard (`*`) expressions are supported. @@ -1032,7 +1032,7 @@ def delete_index_template( then there is no wildcard support and the provided names should match completely with existing templates. - ``_ + ``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. @@ -1084,7 +1084,7 @@ def delete_template( """ Delete a legacy index template. - ``_ + ``_ :param name: The name of the legacy index template to delete. Wildcard (`*`) expressions are supported. @@ -1156,7 +1156,7 @@ def disk_usage( The stored size of the `_id` field is likely underestimated while the `_source` field is overestimated. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. It’s recommended to execute this API with a single @@ -1237,7 +1237,7 @@ def downsample( are supported. Neither field nor document level security can be defined on the source index. The source index must be read only (`index.blocks.write: true`). - ``_ + ``_ :param index: Name of the time series index to downsample. :param target_index: Name of the index to create. @@ -1305,7 +1305,7 @@ def exists( """ Check indices. Check if one or more indices, index aliases, or data streams exist. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases. Supports wildcards (`*`). @@ -1383,7 +1383,7 @@ def exists_alias( """ Check aliases. Checks if one or more data stream or index aliases exist. - ``_ + ``_ :param name: Comma-separated list of aliases to check. Supports wildcards (`*`). :param index: Comma-separated list of data streams or indices used to limit the @@ -1453,7 +1453,7 @@ def exists_index_template( """ Check index templates. Check whether index templates exist. - ``_ + ``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. @@ -1506,7 +1506,7 @@ def exists_template( templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. - ``_ + ``_ :param name: A comma-separated list of index template names used to limit the request. Wildcard (`*`) expressions are supported. @@ -1563,7 +1563,7 @@ def explain_data_lifecycle( creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution. - ``_ + ``_ :param index: The name of the index to explain :param include_defaults: indicates if the API should return the default values @@ -1631,7 +1631,7 @@ def field_usage_stats( in the index. A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times. - ``_ + ``_ :param index: Comma-separated list or wildcard expression of index names used to limit the request. @@ -1725,7 +1725,7 @@ def flush( documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to flush. Supports wildcards (`*`). To flush all data streams and indices, omit this @@ -1850,7 +1850,7 @@ def forcemerge( searches. For example: ``` POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 ``` - ``_ + ``_ :param index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices @@ -1944,7 +1944,7 @@ def get( Get index information. Get information about one or more indices. For data streams, the API returns information about the stream’s backing indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (*) are supported. @@ -2033,7 +2033,7 @@ def get_alias( """ Get aliases. Retrieves information for one or more data stream or index aliases. - ``_ + ``_ :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, @@ -2116,7 +2116,7 @@ def get_data_lifecycle( Get data stream lifecycles. Retrieves the data stream lifecycle configuration of one or more data streams. - ``_ + ``_ :param name: Comma-separated list of data streams to limit the request. Supports wildcards (`*`). To target all data streams, omit this parameter or use `*` @@ -2171,7 +2171,7 @@ def get_data_lifecycle_stats( Get data stream lifecycle stats. Get statistics about the data streams that are managed by a data stream lifecycle. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_lifecycle/stats" @@ -2218,7 +2218,7 @@ def get_data_stream( """ Get data streams. Retrieves information about one or more data streams. - ``_ + ``_ :param name: Comma-separated list of data stream names used to limit the request. Wildcard (`*`) expressions are supported. If omitted, all data streams are @@ -2296,7 +2296,7 @@ def get_field_mapping( This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields. - ``_ + ``_ :param fields: Comma-separated list or wildcard expression of fields used to limit returned information. Supports wildcards (`*`). @@ -2373,7 +2373,7 @@ def get_index_template( """ Get index templates. Get information about one or more index templates. - ``_ + ``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. @@ -2447,7 +2447,7 @@ def get_mapping( Get mapping definitions. For data streams, the API retrieves mappings for the stream’s backing indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2532,7 +2532,7 @@ def get_settings( Get index settings. Get setting information for one or more indices. For data streams, it returns setting information for the stream's backing indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2621,7 +2621,7 @@ def get_template( This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8. - ``_ + ``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (`*`) expressions are supported. To return all index templates, @@ -2687,7 +2687,7 @@ def migrate_to_data_stream( with the same name. The indices for the alias become hidden backing indices for the stream. The write index for the alias becomes the write index for the stream. - ``_ + ``_ :param name: Name of the index alias to convert to a data stream. :param master_timeout: Period to wait for a connection to the master node. If @@ -2740,7 +2740,7 @@ def modify_data_stream( Update data streams. Performs one or more data stream modification actions in a single atomic operation. - ``_ + ``_ :param actions: Actions to perform. """ @@ -2820,7 +2820,7 @@ def open( setting on index creation applies to the `_open` and `_close` index actions as well. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). By default, you must explicitly @@ -2906,7 +2906,7 @@ def promote_data_stream( a matching index template is created. This will affect the lifecycle management of the data stream and interfere with the data stream size and retention. - ``_ + ``_ :param name: The name of the data stream :param master_timeout: Period to wait for a connection to the master node. If @@ -2968,7 +2968,7 @@ def put_alias( """ Create or update an alias. Adds a data stream or index to an alias. - ``_ + ``_ :param index: Comma-separated list of data streams or indices to add. Supports wildcards (`*`). Wildcard patterns that match both data streams and indices @@ -3070,7 +3070,7 @@ def put_data_lifecycle( Update data stream lifecycles. Update the data stream lifecycle of the specified data streams. - ``_ + ``_ :param name: Comma-separated list of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. @@ -3189,7 +3189,7 @@ def put_index_template( default new `dynamic_templates` entries are appended onto the end. If an entry already exists with the same key, then it is overwritten by the new definition. - ``_ + ``_ :param name: Index or template name :param allow_auto_create: This setting overrides the value of the `action.auto_create_index` @@ -3373,7 +3373,7 @@ def put_mapping( invalidate data already indexed under the old field name. Instead, add an alias field to create an alternate field name. - ``_ + ``_ :param index: A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. @@ -3516,7 +3516,7 @@ def put_settings( existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. - ``_ + ``_ :param settings: :param index: Comma-separated list of data streams, indices, and aliases used @@ -3637,7 +3637,7 @@ def put_template( and higher orders overriding them. NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order. - ``_ + ``_ :param name: The name of the template :param aliases: Aliases for the index. @@ -3738,7 +3738,7 @@ def recovery( onto a different node then the information about the original recovery will not be shown in the recovery API. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -3812,7 +3812,7 @@ def refresh( query parameter option. This option ensures the indexing operation waits for a periodic refresh before running the search. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -3896,7 +3896,7 @@ def reload_search_analyzers( a shard replica--before using this API. This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future. - ``_ + ``_ :param index: A comma-separated list of index names to reload analyzers for :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves @@ -3991,7 +3991,7 @@ def resolve_cluster( errors will be shown.) * A remote cluster is an older version that does not support the feature you want to use in your search. - ``_ + ``_ :param name: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified @@ -4065,7 +4065,7 @@ def resolve_index( Resolve indices. Resolve the names and/or index patterns for indices, aliases, and data streams. Multiple patterns and remote clusters are supported. - ``_ + ``_ :param name: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified @@ -4164,7 +4164,7 @@ def rollover( If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. - ``_ + ``_ :param alias: Name of the data stream or index alias to roll over. :param new_index: Name of the index to create. Supports date math. Data streams @@ -4271,7 +4271,7 @@ def segments( shards. For data streams, the API returns information about the stream's backing indices. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -4357,7 +4357,7 @@ def shard_stores( information only for primary shards that are unassigned or have one or more unassigned replica shards. - ``_ + ``_ :param index: List of data streams, indices, and aliases used to limit the request. :param allow_no_indices: If false, the request returns an error if any wildcard @@ -4460,7 +4460,7 @@ def shrink( must have sufficient free disk space to accommodate a second copy of the existing index. - ``_ + ``_ :param index: Name of the source index to shrink. :param target: Name of the target index to create. @@ -4536,7 +4536,7 @@ def simulate_index_template( Simulate an index. Get the index configuration that would be applied to the specified index from an existing index template. - ``_ + ``_ :param name: Name of the index to simulate :param include_defaults: If true, returns all relevant default configurations @@ -4614,7 +4614,7 @@ def simulate_template( Simulate an index template. Get the index configuration that would be applied by a particular index template. - ``_ + ``_ :param name: Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit this parameter and specify the template @@ -4769,7 +4769,7 @@ def split( in the source index. * The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index. - ``_ + ``_ :param index: Name of the source index to split. :param target: Name of the target index to create. @@ -4868,7 +4868,7 @@ def stats( cleared. Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed. - ``_ + ``_ :param index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices @@ -4972,7 +4972,7 @@ def unfreeze( Unfreeze an index. When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again. - ``_ + ``_ :param index: Identifier for the index. :param allow_no_indices: If `false`, the request returns an error if any wildcard @@ -5046,7 +5046,7 @@ def update_aliases( """ Create or update an alias. Adds a data stream or index to an alias. - ``_ + ``_ :param actions: Actions to perform. :param master_timeout: Period to wait for a connection to the master node. If @@ -5121,7 +5121,7 @@ def validate_query( """ Validate a query. Validates a query without running it. - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index fc3c46861..553789086 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -46,7 +46,7 @@ def delete( """ Delete an inference endpoint - ``_ + ``_ :param inference_id: The inference Id :param task_type: The task type @@ -111,7 +111,7 @@ def get( """ Get an inference endpoint - ``_ + ``_ :param task_type: The task type :param inference_id: The inference Id @@ -174,7 +174,7 @@ def inference( """ Perform inference on the service - ``_ + ``_ :param inference_id: The inference Id :param input: Inference input. Either a string or an array of strings. @@ -271,7 +271,7 @@ def put( to use these models or if you want to use non-NLP models, use the machine learning trained model APIs. - ``_ + ``_ :param inference_id: The inference Id :param inference_config: @@ -350,7 +350,7 @@ def update( or if you want to use non-NLP models, use the machine learning trained model APIs. - ``_ + ``_ :param inference_id: The unique identifier of the inference endpoint. :param inference_config: diff --git a/elasticsearch/_sync/client/ingest.py b/elasticsearch/_sync/client/ingest.py index 7d8b2d154..fb8f7a35f 100644 --- a/elasticsearch/_sync/client/ingest.py +++ b/elasticsearch/_sync/client/ingest.py @@ -41,7 +41,7 @@ def delete_geoip_database( Delete GeoIP database configurations. Delete one or more IP geolocation database configurations. - ``_ + ``_ :param id: A comma-separated list of geoip database configurations to delete :param master_timeout: Period to wait for a connection to the master node. If @@ -92,7 +92,7 @@ def delete_ip_location_database( """ Delete IP geolocation database configurations. - ``_ + ``_ :param id: A comma-separated list of IP location database configurations. :param master_timeout: The period to wait for a connection to the master node. @@ -145,7 +145,7 @@ def delete_pipeline( """ Delete pipelines. Delete one or more ingest pipelines. - ``_ + ``_ :param id: Pipeline ID or wildcard expression of pipeline IDs used to limit the request. To delete all ingest pipelines in a cluster, use a value of `*`. @@ -195,7 +195,7 @@ def geo_ip_stats( Get GeoIP statistics. Get download statistics for GeoIP2 databases that are used with the GeoIP processor. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ingest/geoip/stats" @@ -232,7 +232,7 @@ def get_geoip_database( Get GeoIP database configurations. Get information about one or more IP geolocation database configurations. - ``_ + ``_ :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit @@ -278,7 +278,7 @@ def get_ip_location_database( """ Get IP geolocation database configurations. - ``_ + ``_ :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit @@ -332,7 +332,7 @@ def get_pipeline( Get pipelines. Get information about one or more ingest pipelines. This API returns a local reference of the pipeline. - ``_ + ``_ :param id: Comma-separated list of pipeline IDs to retrieve. Wildcard (`*`) expressions are supported. To get all ingest pipelines, omit this parameter or use `*`. @@ -386,7 +386,7 @@ def processor_grok( as the grok pattern you expect will match. A grok pattern is like a regular expression that supports aliased expressions that can be reused. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ingest/processor/grok" @@ -430,7 +430,7 @@ def put_geoip_database( Create or update a GeoIP database configuration. Refer to the create or update IP geolocation database configuration API. - ``_ + ``_ :param id: ID of the database configuration to create or update. :param maxmind: The configuration necessary to identify which IP geolocation @@ -502,7 +502,7 @@ def put_ip_location_database( """ Create or update an IP geolocation database configuration. - ``_ + ``_ :param id: The database configuration identifier. :param configuration: @@ -584,7 +584,7 @@ def put_pipeline( """ Create or update a pipeline. Changes made using this API take effect immediately. - ``_ + ``_ :param id: ID of the ingest pipeline to create or update. :param deprecated: Marks this ingest pipeline as deprecated. When a deprecated @@ -678,7 +678,7 @@ def simulate( You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request. - ``_ + ``_ :param docs: Sample documents to test in the pipeline. :param id: Pipeline to test. If you don’t specify a `pipeline` in the request diff --git a/elasticsearch/_sync/client/license.py b/elasticsearch/_sync/client/license.py index 0dd83ee24..03462e864 100644 --- a/elasticsearch/_sync/client/license.py +++ b/elasticsearch/_sync/client/license.py @@ -41,7 +41,7 @@ def delete( to Basic. If the operator privileges feature is enabled, only operator users can use this API. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. :param timeout: Period to wait for a response. If no response is received before @@ -90,7 +90,7 @@ def get( Not Found` response. If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request. - ``_ + ``_ :param accept_enterprise: If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum @@ -136,7 +136,7 @@ def get_basic_status( """ Get the basic license status. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_license/basic_status" @@ -171,7 +171,7 @@ def get_trial_status( """ Get the trial status. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_license/trial_status" @@ -221,7 +221,7 @@ def post( TLS on the transport networking layer before you install the license. If the operator privileges feature is enabled, only operator users can use this API. - ``_ + ``_ :param acknowledge: Specifies whether you acknowledge the license changes. :param license: @@ -290,7 +290,7 @@ def post_start_basic( parameter set to `true`. To check the status of your basic license, use the get basic license API. - ``_ + ``_ :param acknowledge: whether the user has acknowledged acknowledge messages (default: false) @@ -345,7 +345,7 @@ def post_start_trial( however, request an extended trial at https://www.elastic.co/trialextension. To check the status of your trial, use the get trial status API. - ``_ + ``_ :param acknowledge: whether the user has acknowledged acknowledge messages (default: false) diff --git a/elasticsearch/_sync/client/logstash.py b/elasticsearch/_sync/client/logstash.py index 382aedfc7..7bd02551f 100644 --- a/elasticsearch/_sync/client/logstash.py +++ b/elasticsearch/_sync/client/logstash.py @@ -40,7 +40,7 @@ def delete_pipeline( Management. If the request succeeds, you receive an empty response with an appropriate status code. - ``_ + ``_ :param id: An identifier for the pipeline. """ @@ -80,7 +80,7 @@ def get_pipeline( """ Get Logstash pipelines. Get pipelines that are used for Logstash Central Management. - ``_ + ``_ :param id: A comma-separated list of pipeline identifiers. """ @@ -128,7 +128,7 @@ def put_pipeline( Create or update a Logstash pipeline. Create a pipeline that is used for Logstash Central Management. If the specified pipeline exists, it is replaced. - ``_ + ``_ :param id: An identifier for the pipeline. :param pipeline: diff --git a/elasticsearch/_sync/client/migration.py b/elasticsearch/_sync/client/migration.py index 6b96c5203..a1d3160c5 100644 --- a/elasticsearch/_sync/client/migration.py +++ b/elasticsearch/_sync/client/migration.py @@ -41,7 +41,7 @@ def deprecations( in the next major version. TIP: This APIs is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. - ``_ + ``_ :param index: Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. @@ -88,7 +88,7 @@ def get_feature_upgrade_status( in progress. TIP: This API is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_migration/system_features" @@ -127,7 +127,7 @@ def post_feature_upgrade( unavailable during the migration process. TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_migration/system_features" diff --git a/elasticsearch/_sync/client/ml.py b/elasticsearch/_sync/client/ml.py index 20949046c..6013c1dc7 100644 --- a/elasticsearch/_sync/client/ml.py +++ b/elasticsearch/_sync/client/ml.py @@ -42,7 +42,7 @@ def clear_trained_model_deployment_cache( may be cached on that individual node. Calling this API clears the caches without restarting the deployment. - ``_ + ``_ :param model_id: The unique identifier of the trained model. """ @@ -102,7 +102,7 @@ def close_job( force parameters as the close job request. When a datafeed that has a specified end date stops, it automatically closes its associated job. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection @@ -164,7 +164,7 @@ def delete_calendar( Delete a calendar. Removes all scheduled events from a calendar, then deletes it. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. """ @@ -205,7 +205,7 @@ def delete_calendar_event( """ Delete events from a calendar. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param event_id: Identifier for the scheduled event. You can obtain this identifier @@ -253,7 +253,7 @@ def delete_calendar_job( """ Delete anomaly jobs from a calendar. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param job_id: An identifier for the anomaly detection jobs. It can be a job @@ -302,7 +302,7 @@ def delete_data_frame_analytics( """ Delete a data frame analytics job. - ``_ + ``_ :param id: Identifier for the data frame analytics job. :param force: If `true`, it deletes a job that is not stopped; this method is @@ -350,7 +350,7 @@ def delete_datafeed( """ Delete a datafeed. - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -408,7 +408,7 @@ def delete_expired_data( expired data for all anomaly detection jobs by using _all, by specifying * as the , or by omitting the . - ``_ + ``_ :param job_id: Identifier for an anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. @@ -469,7 +469,7 @@ def delete_filter( delete the filter. You must update or delete the job before you can delete the filter. - ``_ + ``_ :param filter_id: A string that uniquely identifies a filter. """ @@ -515,7 +515,7 @@ def delete_forecast( in the forecast jobs API. The delete forecast API enables you to delete one or more forecasts before they expire. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param forecast_id: A comma-separated list of forecast identifiers. If you do @@ -587,7 +587,7 @@ def delete_job( delete datafeed API with the same timeout and force parameters as the delete job request. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param delete_user_annotations: Specifies whether annotations that have been @@ -643,7 +643,7 @@ def delete_model_snapshot( that snapshot, first revert to a different one. To identify the active model snapshot, refer to the `model_snapshot_id` in the results from the get jobs API. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: Identifier for the model snapshot. @@ -692,7 +692,7 @@ def delete_trained_model( Delete an unreferenced trained model. The request deletes a trained inference model that is not referenced by an ingest pipeline. - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param force: Forcefully deletes a trained model that is referenced by ingest @@ -743,7 +743,7 @@ def delete_trained_model_alias( to a trained model. If the model alias is missing or refers to a model other than the one identified by the `model_id`, this API returns an error. - ``_ + ``_ :param model_id: The trained model ID to which the model alias refers. :param model_alias: The model alias to delete. @@ -800,7 +800,7 @@ def estimate_model_memory( an anomaly detection job model. It is based on analysis configuration details for the job and cardinality estimates for the fields it references. - ``_ + ``_ :param analysis_config: For a list of the properties that you can specify in the `analysis_config` component of the body of this API. @@ -868,7 +868,7 @@ def evaluate_data_frame( for use on indexes created by data frame analytics. Evaluation requires both a ground truth field and an analytics result field to be present. - ``_ + ``_ :param evaluation: Defines the type of evaluation you want to perform. :param index: Defines the `index` in which the evaluation will be performed. @@ -948,7 +948,7 @@ def explain_data_frame_analytics( setting later on. If you have object fields or fields that are excluded via source filtering, they are not included in the explanation. - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -1055,7 +1055,7 @@ def flush_job( and persists the model state to disk and the job must be opened again before analyzing further data. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param advance_time: Refer to the description for the `advance_time` query parameter. @@ -1126,7 +1126,7 @@ def forecast( for a job that has an `over_field_name` in its configuration. Forcasts predict future behavior based on historical data. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. The job must be open when you create a forecast; otherwise, an error occurs. @@ -1209,7 +1209,7 @@ def get_buckets( Get anomaly detection job results for buckets. The API presents a chronological view of the records, grouped by bucket. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param timestamp: The timestamp of a single bucket result. If you do not specify @@ -1304,7 +1304,7 @@ def get_calendar_events( """ Get info about events in calendars. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids @@ -1370,7 +1370,7 @@ def get_calendars( """ Get calendar configuration info. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids @@ -1443,7 +1443,7 @@ def get_categories( """ Get anomaly detection job results for categories. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param category_id: Identifier for the category, which is unique in the job. @@ -1527,7 +1527,7 @@ def get_data_frame_analytics( multiple data frame analytics jobs in a single API request by using a comma-separated list of data frame analytics jobs or a wildcard expression. - ``_ + ``_ :param id: Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame @@ -1599,7 +1599,7 @@ def get_data_frame_analytics_stats( """ Get data frame analytics jobs usage info. - ``_ + ``_ :param id: Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame @@ -1669,7 +1669,7 @@ def get_datafeed_stats( the only information you receive is the `datafeed_id` and the `state`. This API returns a maximum of 10,000 datafeeds. - ``_ + ``_ :param datafeed_id: Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the @@ -1729,7 +1729,7 @@ def get_datafeeds( `*` as the ``, or by omitting the ``. This API returns a maximum of 10,000 datafeeds. - ``_ + ``_ :param datafeed_id: Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the @@ -1792,7 +1792,7 @@ def get_filters( """ Get filters. You can get a single filter or all filters. - ``_ + ``_ :param filter_id: A string that uniquely identifies a filter. :param from_: Skips the specified number of filters. @@ -1856,7 +1856,7 @@ def get_influencers( that have contributed to, or are to blame for, the anomalies. Influencer results are available only if an `influencer_field_name` is specified in the job configuration. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param desc: If true, the results are sorted in descending order. @@ -1937,7 +1937,7 @@ def get_job_stats( """ Get anomaly detection jobs usage info. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. If @@ -1998,7 +1998,7 @@ def get_jobs( detection jobs by using `_all`, by specifying `*` as the ``, or by omitting the ``. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. If you do not specify one of these @@ -2061,7 +2061,7 @@ def get_memory_stats( jobs and trained models are using memory, on each node, both within the JVM heap, and natively, outside of the JVM. - ``_ + ``_ :param node_id: The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or `ml:true` @@ -2116,7 +2116,7 @@ def get_model_snapshot_upgrade_stats( """ Get anomaly detection job model snapshot upgrade usage info. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: A numerical character string that uniquely identifies the @@ -2187,7 +2187,7 @@ def get_model_snapshots( """ Get model snapshots info. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: A numerical character string that uniquely identifies the @@ -2300,7 +2300,7 @@ def get_overall_buckets( its default), the `overall_score` is the maximum `overall_score` of the overall buckets that have a span equal to the jobs' largest bucket span. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, a comma-separated list of jobs or groups, or a wildcard expression. @@ -2405,7 +2405,7 @@ def get_records( found in each bucket, which relates to the number of time series being modeled and the number of detectors. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param desc: Refer to the description for the `desc` query parameter. @@ -2501,7 +2501,7 @@ def get_trained_models( """ Get trained model configuration info. - ``_ + ``_ :param model_id: The unique identifier of the trained model or a model alias. You can get information for multiple trained models in a single API request @@ -2589,7 +2589,7 @@ def get_trained_models_stats( models in a single API request by using a comma-separated list of model IDs or a wildcard expression. - ``_ + ``_ :param model_id: The unique identifier of the trained model or a model alias. It can be a comma-separated list or a wildcard expression. @@ -2652,7 +2652,7 @@ def infer_trained_model( """ Evaluate a trained model. - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param docs: An array of objects to pass to the model for inference. The objects @@ -2714,7 +2714,7 @@ def info( what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ml/info" @@ -2759,7 +2759,7 @@ def open_job( job is ready to resume its analysis from where it left off, once new data is received. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param timeout: Refer to the description for the `timeout` query parameter. @@ -2813,7 +2813,7 @@ def post_calendar_events( """ Add scheduled events to the calendar. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param events: A list of one of more scheduled events. The event’s start and @@ -2871,7 +2871,7 @@ def post_data( data can be accepted from only a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. @@ -2935,7 +2935,7 @@ def preview_data_frame_analytics( Preview features used by data frame analytics. Previews the extracted features used by a data frame analytics config. - ``_ + ``_ :param id: Identifier for the data frame analytics job. :param config: A data frame analytics config as described in create data frame @@ -3005,7 +3005,7 @@ def preview_datafeed( that accurately reflects the behavior of the datafeed, use the appropriate credentials. You can also use secondary authorization headers to supply the credentials. - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -3081,7 +3081,7 @@ def put_calendar( """ Create a calendar. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param description: A description of the calendar. @@ -3135,7 +3135,7 @@ def put_calendar_job( """ Add anomaly detection job to calendar. - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param job_id: An identifier for the anomaly detection jobs. It can be a job @@ -3216,7 +3216,7 @@ def put_data_frame_analytics( parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters. - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -3400,7 +3400,7 @@ def put_datafeed( directly to the `.ml-config` index. Do not give users `write` privileges on the `.ml-config` index. - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -3559,7 +3559,7 @@ def put_filter( more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` property of detector configuration objects. - ``_ + ``_ :param filter_id: A string that uniquely identifies a filter. :param description: A description of the filter. @@ -3658,7 +3658,7 @@ def put_job( have read index privileges on the source index. If you include a `datafeed_config` but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. - ``_ + ``_ :param job_id: The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and @@ -3863,7 +3863,7 @@ def put_trained_model( Create a trained model. Enable you to supply a trained model that is not created by data frame analytics. - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param compressed_definition: The compressed (GZipped and Base64 encoded) inference @@ -3977,7 +3977,7 @@ def put_trained_model_alias( common between the old and new trained models for the model alias, the API returns a warning. - ``_ + ``_ :param model_id: The identifier for the trained model that the alias refers to. :param model_alias: The alias to create or update. This value cannot end in numbers. @@ -4035,7 +4035,7 @@ def put_trained_model_definition_part( """ Create part of a trained model definition. - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param part: The definition part number. When the definition is loaded for inference @@ -4114,7 +4114,7 @@ def put_trained_model_vocabulary( processing (NLP) models. The vocabulary is stored in the index as described in `inference_config.*.vocabulary` of the trained model definition. - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param vocabulary: The model vocabulary, which must not be empty. @@ -4172,7 +4172,7 @@ def reset_job( job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list. - ``_ + ``_ :param job_id: The ID of the job to reset. :param delete_user_annotations: Specifies whether annotations that have been @@ -4232,7 +4232,7 @@ def revert_model_snapshot( For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: You can specify `empty` as the . Reverting to @@ -4302,7 +4302,7 @@ def set_upgrade_mode( the current value for the upgrade_mode setting by using the get machine learning info API. - ``_ + ``_ :param enabled: When `true`, it enables `upgrade_mode` which temporarily halts all job and datafeed tasks and prohibits new job and datafeed tasks from @@ -4357,7 +4357,7 @@ def start_data_frame_analytics( exists, it is used as is. You can therefore set up the destination index in advance with custom settings and mappings. - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -4419,7 +4419,7 @@ def start_datafeed( headers when you created or updated the datafeed, those credentials are used instead. - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -4489,7 +4489,7 @@ def start_trained_model_deployment( Start a trained model deployment. It allocates the model to every machine learning node. - ``_ + ``_ :param model_id: The unique identifier of the trained model. Currently, only PyTorch models are supported. @@ -4573,7 +4573,7 @@ def stop_data_frame_analytics( Stop data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -4639,7 +4639,7 @@ def stop_datafeed( Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle. - ``_ + ``_ :param datafeed_id: Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a @@ -4701,7 +4701,7 @@ def stop_trained_model_deployment( """ Stop a trained model deployment. - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param allow_no_match: Specifies what to do when the request: contains wildcard @@ -4766,7 +4766,7 @@ def update_data_frame_analytics( """ Update a data frame analytics job. - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -4878,7 +4878,7 @@ def update_datafeed( query using those same roles. If you provide secondary authorization headers, those credentials are used instead. - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -5042,7 +5042,7 @@ def update_filter( Update a filter. Updates the description of a filter, adds items, or removes items from the list. - ``_ + ``_ :param filter_id: A string that uniquely identifies a filter. :param add_items: The items to add to the filter. @@ -5133,7 +5133,7 @@ def update_job( Update an anomaly detection job. Updates certain properties of an anomaly detection job. - ``_ + ``_ :param job_id: Identifier for the job. :param allow_lazy_open: Advanced configuration option. Specifies whether this @@ -5261,7 +5261,7 @@ def update_model_snapshot( """ Update a snapshot. Updates certain properties of a snapshot. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: Identifier for the model snapshot. @@ -5322,7 +5322,7 @@ def update_trained_model_deployment( """ Update a trained model deployment. - ``_ + ``_ :param model_id: The unique identifier of the trained model. Currently, only PyTorch models are supported. @@ -5388,7 +5388,7 @@ def upgrade_job_snapshot( a time and the upgraded snapshot cannot be the current snapshot of the anomaly detection job. - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: A numerical character string that uniquely identifies the @@ -5464,7 +5464,7 @@ def validate( """ Validate an anomaly detection job. - ``_ + ``_ :param analysis_config: :param analysis_limits: @@ -5534,7 +5534,7 @@ def validate_detector( """ Validate an anomaly detection job. - ``_ + ``_ :param detector: """ diff --git a/elasticsearch/_sync/client/monitoring.py b/elasticsearch/_sync/client/monitoring.py index 2de29f47c..455a78304 100644 --- a/elasticsearch/_sync/client/monitoring.py +++ b/elasticsearch/_sync/client/monitoring.py @@ -45,7 +45,7 @@ def bulk( Send monitoring data. This API is used by the monitoring features to send monitoring data. - ``_ + ``_ :param interval: Collection interval (e.g., '10s' or '10000ms') of the payload :param operations: diff --git a/elasticsearch/_sync/client/nodes.py b/elasticsearch/_sync/client/nodes.py index dfa10aab5..a466586be 100644 --- a/elasticsearch/_sync/client/nodes.py +++ b/elasticsearch/_sync/client/nodes.py @@ -47,7 +47,7 @@ def clear_repositories_metering_archive( Clear the archived repositories metering. Clear the archived repositories metering information in the cluster. - ``_ + ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. @@ -100,7 +100,7 @@ def get_repositories_metering_info( over a period of time. Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts. - ``_ + ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). @@ -154,7 +154,7 @@ def hot_threads( node in the cluster. The output is plain text with a breakdown of the top hot threads for each node. - ``_ + ``_ :param node_id: List of node IDs or names used to limit returned information. :param ignore_idle_threads: If true, known idle threads (e.g. waiting in a socket @@ -224,7 +224,7 @@ def info( Get node information. By default, the API returns all attributes and core settings for cluster nodes. - ``_ + ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. @@ -299,7 +299,7 @@ def reload_secure_settings( by locally accessing the API and passing the node-specific Elasticsearch keystore password. - ``_ + ``_ :param node_id: The names of particular nodes in the cluster to target. :param secure_settings_password: The password for the Elasticsearch keystore. @@ -370,7 +370,7 @@ def stats( Get node statistics. Get statistics for nodes in a cluster. By default, all stats are returned. You can limit the returned information by using metrics. - ``_ + ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. @@ -482,7 +482,7 @@ def usage( """ Get feature usage information. - ``_ + ``_ :param node_id: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting diff --git a/elasticsearch/_sync/client/query_rules.py b/elasticsearch/_sync/client/query_rules.py index b05f8b291..147642436 100644 --- a/elasticsearch/_sync/client/query_rules.py +++ b/elasticsearch/_sync/client/query_rules.py @@ -41,7 +41,7 @@ def delete_rule( action that is only recoverable by re-adding the same rule with the create or update query rule API. - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset containing the rule to delete @@ -90,7 +90,7 @@ def delete_ruleset( Delete a query ruleset. Remove a query ruleset and its associated data. This is a destructive action that is not recoverable. - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset to delete """ @@ -131,7 +131,7 @@ def get_rule( """ Get a query rule. Get details about a query rule within a query ruleset. - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset containing the rule to retrieve @@ -179,7 +179,7 @@ def get_ruleset( """ Get a query ruleset. Get details about a query ruleset. - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset """ @@ -222,7 +222,7 @@ def list_rulesets( """ Get all query rulesets. Get summarized information about the query rulesets. - ``_ + ``_ :param from_: The offset from the first result to fetch. :param size: The maximum number of results to retrieve. @@ -281,7 +281,7 @@ def put_rule( than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset containing the rule to be created or updated. @@ -366,7 +366,7 @@ def put_ruleset( rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset. - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset to be created or updated. @@ -420,7 +420,7 @@ def test( Test a query ruleset. Evaluate match criteria against a query ruleset to identify the rules that would match that criteria. - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset to be created or updated diff --git a/elasticsearch/_sync/client/rollup.py b/elasticsearch/_sync/client/rollup.py index f957ad80a..3baa6c10c 100644 --- a/elasticsearch/_sync/client/rollup.py +++ b/elasticsearch/_sync/client/rollup.py @@ -58,7 +58,7 @@ def delete_job( index. For example: ``` POST my_rollup_index/_delete_by_query { "query": { "term": { "_rollup.id": "the_rollup_job_id" } } } ``` - ``_ + ``_ :param id: Identifier for the job. """ @@ -103,7 +103,7 @@ def get_jobs( any details about it. For details about a historical rollup job, the rollup capabilities API may be more useful. - ``_ + ``_ :param id: Identifier for the rollup job. If it is `_all` or omitted, the API returns all rollup jobs. @@ -156,7 +156,7 @@ def get_rollup_caps( the first question, what fields were rolled up, what aggregations can be performed, and where does the data live? - ``_ + ``_ :param id: Index, indices or index-pattern to return rollup capabilities for. `_all` may be used to fetch rollup capabilities from all jobs. @@ -206,7 +206,7 @@ def get_rollup_index_caps( via a pattern)? * What target indices were rolled up, what fields were used in those rollups, and what aggregations can be performed on each job? - ``_ + ``_ :param index: Data stream or index to check for rollup capabilities. Wildcard (`*`) expressions are supported. @@ -278,7 +278,7 @@ def put_job( and what metrics to collect for each group. Jobs are created in a `STOPPED` state. You can start them with the start rollup jobs API. - ``_ + ``_ :param id: Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the data that is associated with the rollup job. @@ -413,7 +413,7 @@ def rollup_search( During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used. - ``_ + ``_ :param index: A comma-separated list of data streams and indices used to limit the request. This parameter has the following rules: * At least one data @@ -487,7 +487,7 @@ def start_job( Start rollup jobs. If you try to start a job that does not exist, an exception occurs. If you try to start a job that is already started, nothing happens. - ``_ + ``_ :param id: Identifier for the rollup job. """ @@ -537,7 +537,7 @@ def stop_job( moved to STOPPED or the specified time has elapsed. If the specified time elapses without the job moving to STOPPED, a timeout exception occurs. - ``_ + ``_ :param id: Identifier for the rollup job. :param timeout: If `wait_for_completion` is `true`, the API blocks for (at maximum) diff --git a/elasticsearch/_sync/client/search_application.py b/elasticsearch/_sync/client/search_application.py index 88e4b5531..64858faae 100644 --- a/elasticsearch/_sync/client/search_application.py +++ b/elasticsearch/_sync/client/search_application.py @@ -46,7 +46,7 @@ def delete( Delete a search application. Remove a search application and its associated alias. Indices attached to the search application are not removed. - ``_ + ``_ :param name: The name of the search application to delete """ @@ -88,7 +88,7 @@ def delete_behavioral_analytics( Delete a behavioral analytics collection. The associated data stream is also deleted. - ``_ + ``_ :param name: The name of the analytics collection to be deleted """ @@ -129,7 +129,7 @@ def get( """ Get search application details. - ``_ + ``_ :param name: The name of the search application """ @@ -170,7 +170,7 @@ def get_behavioral_analytics( """ Get behavioral analytics collections. - ``_ + ``_ :param name: A list of analytics collections to limit the returned information """ @@ -218,7 +218,7 @@ def list( """ Get search applications. Get information about search applications. - ``_ + ``_ :param from_: Starting offset. :param q: Query in the Lucene query string syntax. @@ -271,7 +271,7 @@ def post_behavioral_analytics_event( """ Create a behavioral analytics collection event. - ``_ + ``_ :param collection_name: The name of the behavioral analytics collection. :param event_type: The analytics event type. @@ -335,7 +335,7 @@ def put( """ Create or update a search application. - ``_ + ``_ :param name: The name of the search application to be created or updated. :param search_application: @@ -389,7 +389,7 @@ def put_behavioral_analytics( """ Create a behavioral analytics collection. - ``_ + ``_ :param name: The name of the analytics collection to be created or updated. """ @@ -441,7 +441,7 @@ def render_query( generated and run by calling the search application search API. You must have `read` privileges on the backing alias of the search application. - ``_ + ``_ :param name: The name of the search application to render teh query for. :param params: @@ -503,7 +503,7 @@ def search( the search application or default template. Unspecified template parameters are assigned their default values if applicable. - ``_ + ``_ :param name: The name of the search application to be searched. :param params: Query parameters specific to this request, which will override diff --git a/elasticsearch/_sync/client/searchable_snapshots.py b/elasticsearch/_sync/client/searchable_snapshots.py index 3ec1d0045..63c1d4fda 100644 --- a/elasticsearch/_sync/client/searchable_snapshots.py +++ b/elasticsearch/_sync/client/searchable_snapshots.py @@ -47,7 +47,7 @@ def cache_stats( Get cache statistics. Get statistics about the shared cache for partially mounted indices. - ``_ + ``_ :param node_id: The names of the nodes in the cluster to target. :param master_timeout: @@ -105,7 +105,7 @@ def clear_cache( Clear the cache. Clear indices and data streams from the shared cache for partially mounted indices. - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases to clear from the cache. It supports wildcards (`*`). @@ -180,7 +180,7 @@ def mount( this API for snapshots managed by index lifecycle management (ILM). Manually mounting ILM-managed snapshots can interfere with ILM processes. - ``_ + ``_ :param repository: The name of the repository containing the snapshot of the index to mount. @@ -265,7 +265,7 @@ def stats( """ Get searchable snapshot statistics. - ``_ + ``_ :param index: A comma-separated list of data streams and indices to retrieve statistics for. diff --git a/elasticsearch/_sync/client/security.py b/elasticsearch/_sync/client/security.py index a3d80d3f1..8c506ac40 100644 --- a/elasticsearch/_sync/client/security.py +++ b/elasticsearch/_sync/client/security.py @@ -60,7 +60,7 @@ def activate_user_profile( the document if it was disabled. Any updates do not change existing content for either the `labels` or `data` fields. - ``_ + ``_ :param grant_type: The type of grant. :param access_token: The user's Elasticsearch access token or JWT. Both `access` @@ -124,7 +124,7 @@ def authenticate( and information about the realms that authenticated and authorized the user. If the user cannot be authenticated, this API returns a 401 status code. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/_authenticate" @@ -168,7 +168,7 @@ def bulk_delete_role( manage roles, rather than using file-based role management. The bulk delete roles API cannot delete roles that are defined in roles files. - ``_ + ``_ :param names: An array of role names to delete :param refresh: If `true` (the default) then refresh the affected shards to make @@ -226,7 +226,7 @@ def bulk_put_role( way to manage roles, rather than using file-based role management. The bulk create or update roles API cannot update roles that are defined in roles files. - ``_ + ``_ :param roles: A dictionary of role name to RoleDescriptor objects to add or update :param refresh: If `true` (the default) then refresh the affected shards to make @@ -298,7 +298,7 @@ def bulk_update_api_keys( the requested changes and did not require an update, and error details for any failed update. - ``_ + ``_ :param ids: The API key identifiers. :param expiration: Expiration time for the API keys. By default, API keys never @@ -373,7 +373,7 @@ def change_password( Change passwords. Change the passwords of users in the native realm and built-in users. - ``_ + ``_ :param username: The user whose password you want to change. If you do not specify this parameter, the password is changed for the current user. @@ -436,7 +436,7 @@ def clear_api_key_cache( Clear the API key cache. Evict a subset of all entries from the API key cache. The cache is also automatically cleared on state changes of the security index. - ``_ + ``_ :param ids: Comma-separated list of API key IDs to evict from the API key cache. To evict all API keys, use `*`. Does not support other wildcard patterns. @@ -479,9 +479,10 @@ def clear_cached_privileges( cache. The cache is also automatically cleared for applications that have their privileges updated. - ``_ + ``_ - :param application: A comma-separated list of application names + :param application: A comma-separated list of applications. To clear all applications, + use an asterism (`*`). It does not support other wildcard patterns. """ if application in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'application'") @@ -519,12 +520,19 @@ def clear_cached_realms( ) -> ObjectApiResponse[t.Any]: """ Clear the user cache. Evict users from the user cache. You can completely clear - the cache or evict specific users. + the cache or evict specific users. User credentials are cached in memory on each + node to avoid connecting to a remote authentication service or hitting the disk + for every incoming request. There are realm settings that you can use to configure + the user cache. For more information, refer to the documentation about controlling + the user cache. - ``_ + ``_ - :param realms: Comma-separated list of realms to clear - :param usernames: Comma-separated list of usernames to clear from the cache + :param realms: A comma-separated list of realms. To clear all realms, use an + asterisk (`*`). It does not support other wildcard patterns. + :param usernames: A comma-separated list of the users to clear from the cache. + If you do not specify this parameter, the API evicts all users from the user + cache. """ if realms in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'realms'") @@ -564,9 +572,11 @@ def clear_cached_roles( """ Clear the roles cache. Evict roles from the native role cache. - ``_ + ``_ - :param name: Role name + :param name: A comma-separated list of roles to evict from the role cache. To + evict all roles, use an asterisk (`*`). It does not support other wildcard + patterns. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -605,13 +615,20 @@ def clear_cached_service_tokens( ) -> ObjectApiResponse[t.Any]: """ Clear service account token caches. Evict a subset of all entries from the service - account token caches. + account token caches. Two separate caches exist for service account tokens: one + cache for tokens backed by the `service_tokens` file, and another for tokens + backed by the `.security` index. This API clears matching entries from both caches. + The cache for service account tokens backed by the `.security` index is cleared + automatically on state changes of the security index. The cache for tokens backed + by the `service_tokens` file is cleared automatically on file changes. - ``_ + ``_ - :param namespace: An identifier for the namespace - :param service: An identifier for the service name - :param name: A comma-separated list of service token names + :param namespace: The namespace, which is a top-level grouping of service accounts. + :param service: The name of the service, which must be unique within its namespace. + :param name: A comma-separated list of token names to evict from the service + account token caches. Use a wildcard (`*`) to evict all tokens that belong + to a service account. It does not support other wildcard patterns. """ if namespace in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'namespace'") @@ -665,30 +682,40 @@ def create_api_key( ) -> ObjectApiResponse[t.Any]: """ Create an API key. Create an API key for access without requiring basic authentication. - A successful request returns a JSON structure that contains the API key, its - unique id, and its name. If applicable, it also returns expiration information - for the API key in milliseconds. NOTE: By default, API keys never expire. You - can specify expiration information when you create the API keys. + IMPORTANT: If the credential that is used to authenticate this request is an + API key, the derived API key cannot have any privileges. If you specify privileges, + the API returns an error. A successful request returns a JSON structure that + contains the API key, its unique id, and its name. If applicable, it also returns + expiration information for the API key in milliseconds. NOTE: By default, API + keys never expire. You can specify expiration information when you create the + API keys. The API keys are created by the Elasticsearch API key service, which + is automatically enabled. To configure or turn off the API key service, refer + to API key service setting documentation. + + ``_ - ``_ - - :param expiration: Expiration time for the API key. By default, API keys never - expire. + :param expiration: The expiration time for the API key. By default, API keys + never expire. :param metadata: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. - :param name: Specifies the name for this API key. + :param name: A name for the API key. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - :param role_descriptors: An array of role descriptors for this API key. This - parameter is optional. When it is not specified or is an empty array, then - the API key will have a point in time snapshot of permissions of the authenticated - user. If you supply role descriptors then the resultant permissions would - be an intersection of API keys permissions and authenticated user’s permissions - thereby limiting the access scope for API keys. The structure of role descriptor - is the same as the request for create role API. For more details, see create - or update roles API. + :param role_descriptors: An array of role descriptors for this API key. When + it is not specified or it is an empty array, the API key will have a point + in time snapshot of permissions of the authenticated user. If you supply + role descriptors, the resultant permissions are an intersection of API keys + permissions and the authenticated user's permissions thereby limiting the + access scope for API keys. The structure of role descriptor is the same as + the request for the create role API. For more details, refer to the create + or update roles API. NOTE: Due to the way in which this permission intersection + is calculated, it is not possible to create an API key that is a child of + another API key, unless the derived key is created without any privileges. + In this case, you must explicitly specify a role descriptor with no privileges. + The derived API key can be used for authentication; it will not have authority + to call Elasticsearch APIs. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/api_key" @@ -757,7 +784,7 @@ def create_cross_cluster_api_key( API key API. Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error. - ``_ + ``_ :param access: The access to be granted to this API key. The access is composed of permissions for cross-cluster search and cross-cluster replication. At @@ -825,13 +852,21 @@ def create_service_token( ) -> ObjectApiResponse[t.Any]: """ Create a service account token. Create a service accounts token for access without - requiring basic authentication. - - ``_ - - :param namespace: An identifier for the namespace - :param service: An identifier for the service name - :param name: An identifier for the token name + requiring basic authentication. NOTE: Service account tokens never expire. You + must actively delete them if they are no longer needed. + + ``_ + + :param namespace: The name of the namespace, which is a top-level grouping of + service accounts. + :param service: The name of the service. + :param name: The name for the service account token. If omitted, a random name + will be generated. Token names must be at least one and no more than 256 + characters. They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes + (`-`), and underscores (`_`), but cannot begin with an underscore. NOTE: + Token names must be unique in the context of the associated service account. + They must also be globally unique with their fully qualified names, which + are comprised of the service account principal and token name, such as `//`. :param refresh: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. @@ -909,7 +944,7 @@ def delegate_pki( the TLS authentication and this API translates that authentication into an Elasticsearch access token. - ``_ + ``_ :param x509_certificate_chain: The X509Certificate chain, which is represented as an ordered string array. Each string in the array is a base64-encoded @@ -963,12 +998,16 @@ def delete_privileges( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete application privileges. + Delete application privileges. To use this API, you must have one of the following + privileges: * The `manage_security` cluster privilege (or a greater privilege + such as `all`). * The "Manage Application Privileges" global privilege for the + application being referenced in the request. - ``_ + ``_ - :param application: Application name - :param name: Privilege name + :param application: The name of the application. Application privileges are always + associated with exactly one application. + :param name: The name of the privilege. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. @@ -1019,11 +1058,14 @@ def delete_role( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete roles. Delete roles in the native realm. + Delete roles. Delete roles in the native realm. The role management APIs are + generally the preferred way to manage roles, rather than using file-based role + management. The delete roles API cannot remove roles that are defined in roles + files. - ``_ + ``_ - :param name: Role name + :param name: The name of the role. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. @@ -1067,11 +1109,16 @@ def delete_role_mapping( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete role mappings. + Delete role mappings. Role mappings define which roles are assigned to each user. + The role mapping APIs are generally the preferred way to manage role mappings + rather than using role mapping files. The delete role mappings API cannot remove + role mappings that are defined in role mapping files. - ``_ + ``_ - :param name: Role-mapping name + :param name: The distinct name that identifies the role mapping. The name is + used solely as an identifier to facilitate interaction via the API; it does + not affect the behavior of the mapping in any way. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. @@ -1120,11 +1167,11 @@ def delete_service_token( Delete service account tokens. Delete service account tokens for a service in a specified namespace. - ``_ + ``_ - :param namespace: An identifier for the namespace - :param service: An identifier for the service name - :param name: An identifier for the token name + :param namespace: The namespace, which is a top-level grouping of service accounts. + :param service: The service name. + :param name: The name of the service account token. :param refresh: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. @@ -1178,9 +1225,9 @@ def delete_user( """ Delete users. Delete users from the native realm. - ``_ + ``_ - :param username: username + :param username: An identifier for the user. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. @@ -1224,11 +1271,12 @@ def disable_user( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Disable users. Disable users in the native realm. + Disable users. Disable users in the native realm. By default, when you create + users, they are enabled. You can use this API to revoke a user's access to Elasticsearch. - ``_ + ``_ - :param username: The username of the user to disable + :param username: An identifier for the user. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. @@ -1282,7 +1330,7 @@ def disable_user_profile( API to disable a user profile so it’s not visible in these searches. To re-enable a disabled user profile, use the enable user profile API . - ``_ + ``_ :param uid: Unique identifier for the user profile. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make @@ -1328,11 +1376,12 @@ def enable_user( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Enable users. Enable users in the native realm. + Enable users. Enable users in the native realm. By default, when you create users, + they are enabled. - ``_ + ``_ - :param username: The username of the user to enable + :param username: An identifier for the user. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. @@ -1385,7 +1434,7 @@ def enable_user_profile( profile searches. If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again. - ``_ + ``_ :param uid: A unique identifier for the user profile. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make @@ -1428,9 +1477,12 @@ def enroll_kibana( ) -> ObjectApiResponse[t.Any]: """ Enroll Kibana. Enable a Kibana instance to configure itself for communication - with a secured Elasticsearch cluster. + with a secured Elasticsearch cluster. NOTE: This API is currently intended for + internal use only by Kibana. Kibana uses this API internally to configure itself + for communications with an Elasticsearch cluster that already has security features + enabled. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/enroll/kibana" @@ -1464,9 +1516,13 @@ def enroll_node( ) -> ObjectApiResponse[t.Any]: """ Enroll a node. Enroll a new node to allow it to join an existing cluster with - security features enabled. + security features enabled. The response contains all the necessary information + for the joining node to bootstrap discovery and security related settings so + that it can successfully join the cluster. The response contains key and certificate + material that allows the caller to generate valid signed certificates for the + HTTP layer of all nodes in the cluster. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/enroll/node" @@ -1513,7 +1569,7 @@ def get_api_key( privileges (including `manage_security`), this API returns all API keys regardless of ownership. - ``_ + ``_ :param active_only: A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, @@ -1588,7 +1644,7 @@ def get_builtin_privileges( Get builtin privileges. Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/privilege/_builtin" @@ -1623,12 +1679,18 @@ def get_privileges( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get application privileges. + Get application privileges. To use this API, you must have one of the following + privileges: * The `read_security` cluster privilege (or a greater privilege such + as `manage_security` or `all`). * The "Manage Application Privileges" global + privilege for the application being referenced in the request. - ``_ + ``_ - :param application: Application name - :param name: Privilege name + :param application: The name of the application. Application privileges are always + associated with exactly one application. If you do not specify this parameter, + the API returns information about all privileges for all applications. + :param name: The name of the privilege. If you do not specify this parameter, + the API returns information about all privileges for the requested application. """ __path_parts: t.Dict[str, str] if application not in SKIP_IN_PATH and name not in SKIP_IN_PATH: @@ -1674,7 +1736,7 @@ def get_role( the preferred way to manage roles, rather than using file-based role management. The get roles API cannot retrieve roles that are defined in roles files. - ``_ + ``_ :param name: The name of the role. You can specify multiple roles as a comma-separated list. If you do not specify this parameter, the API returns information about @@ -1722,7 +1784,7 @@ def get_role_mapping( rather than using role mapping files. The get role mappings API cannot retrieve role mappings that are defined in role mapping files. - ``_ + ``_ :param name: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does @@ -1769,14 +1831,15 @@ def get_service_accounts( ) -> ObjectApiResponse[t.Any]: """ Get service accounts. Get a list of service accounts that match the provided - path parameters. + path parameters. NOTE: Currently, only the `elastic/fleet-server` service account + is available. - ``_ + ``_ - :param namespace: Name of the namespace. Omit this parameter to retrieve information - about all service accounts. If you omit this parameter, you must also omit - the `service` parameter. - :param service: Name of the service name. Omit this parameter to retrieve information + :param namespace: The name of the namespace. Omit this parameter to retrieve + information about all service accounts. If you omit this parameter, you must + also omit the `service` parameter. + :param service: The service name. Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. """ __path_parts: t.Dict[str, str] @@ -1820,12 +1883,19 @@ def get_service_credentials( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get service account credentials. + Get service account credentials. To use this API, you must have at least the + `read_security` cluster privilege (or a greater privilege such as `manage_service_account` + or `manage_security`). The response includes service account tokens that were + created with the create service account tokens API as well as file-backed tokens + from all nodes of the cluster. NOTE: For tokens backed by the `service_tokens` + file, the API collects them from all nodes of the cluster. Tokens with the same + name from different nodes are assumed to be the same token and are only counted + once towards the total number of service tokens. - ``_ + ``_ - :param namespace: Name of the namespace. - :param service: Name of the service name. + :param namespace: The name of the namespace. + :param service: The service name. """ if namespace in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'namespace'") @@ -1867,9 +1937,11 @@ def get_settings( ) -> ObjectApiResponse[t.Any]: """ Get security index settings. Get the user-configurable settings for the security - internal index (`.security` and associated indices). + internal index (`.security` and associated indices). Only a subset of the index + settings — those that are user-configurable—will be shown. This includes: * `index.auto_expand_replicas` + * `index.number_of_replicas` - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -1932,15 +2004,39 @@ def get_token( ) -> ObjectApiResponse[t.Any]: """ Get a token. Create a bearer token for access without requiring basic authentication. - - ``_ - - :param grant_type: - :param kerberos_ticket: - :param password: - :param refresh_token: - :param scope: - :param username: + The tokens are created by the Elasticsearch Token Service, which is automatically + enabled when you configure TLS on the HTTP interface. Alternatively, you can + explicitly enable the `xpack.security.authc.token.enabled` setting. When you + are running in production mode, a bootstrap check prevents you from enabling + the token service unless you also enable TLS on the HTTP interface. The get token + API takes the same parameters as a typical OAuth 2.0 token API except for the + use of a JSON request body. A successful get token API call returns a JSON structure + that contains the access token, the amount of time (seconds) that the token expires + in, the type, and the scope if available. The tokens returned by the get token + API have a finite period of time for which they are valid and after that time + period, they can no longer be used. That time period is defined by the `xpack.security.authc.token.timeout` + setting. If you want to invalidate a token immediately, you can do so by using + the invalidate token API. + + ``_ + + :param grant_type: The type of grant. Supported grant types are: `password`, + `_kerberos`, `client_credentials`, and `refresh_token`. + :param kerberos_ticket: The base64 encoded kerberos ticket. If you specify the + `_kerberos` grant type, this parameter is required. This parameter is not + valid with any other supported grant type. + :param password: The user's password. If you specify the `password` grant type, + this parameter is required. This parameter is not valid with any other supported + grant type. + :param refresh_token: The string that was returned when you created the token, + which enables you to extend its life. If you specify the `refresh_token` + grant type, this parameter is required. This parameter is not valid with + any other supported grant type. + :param scope: The scope of the token. Currently tokens are only issued for a + scope of FULL regardless of the value sent with the request. + :param username: The username that identifies the user. If you specify the `password` + grant type, this parameter is required. This parameter is not valid with + any other supported grant type. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/oauth2/token" @@ -1992,13 +2088,13 @@ def get_user( """ Get users. Get information about users in the native realm and built-in users. - ``_ + ``_ :param username: An identifier for the user. You can specify multiple usernames as a comma-separated list. If you omit this parameter, the API retrieves information about all users. - :param with_profile_uid: If true will return the User Profile ID for a user, - if any. + :param with_profile_uid: Determines whether to retrieve the user profile UID, + if it exists, for the users. """ __path_parts: t.Dict[str, str] if username not in SKIP_IN_PATH: @@ -2041,9 +2137,12 @@ def get_user_privileges( username: t.Optional[t.Union[None, str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get user privileges. + Get user privileges. Get the security privileges for the logged in user. All + users can use this API, but only to determine their own privileges. To check + the privileges of other users, you must use the run as feature. To check whether + a user has a specific list of privileges, use the has privileges API. - ``_ + ``_ :param application: The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, @@ -2097,7 +2196,7 @@ def get_user_profile( applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. - ``_ + ``_ :param uid: A unique identifier for the user profile. :param data: A comma-separated list of filters for the `data` field of the profile @@ -2162,28 +2261,30 @@ def grant_api_key( Grant an API key. Create an API key on behalf of another user. This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. The caller must have authentication - credentials (either an access token, or a username and password) for the user - on whose behalf the API key will be created. It is not possible to use this API - to create an API key without that user’s credentials. The user, for whom the - authentication credentials is provided, can optionally "run as" (impersonate) - another user. In this case, the API key will be created on behalf of the impersonated - user. This API is intended be used by applications that need to create and manage - API keys for end users, but cannot guarantee that those users have permission - to create API keys on their own behalf. A successful grant API key API call returns - a JSON structure that contains the API key, its unique id, and its name. If applicable, + credentials for the user on whose behalf the API key will be created. It is not + possible to use this API to create an API key without that user's credentials. + The supported user authentication credential types are: * username and password + * Elasticsearch access tokens * JWTs The user, for whom the authentication credentials + is provided, can optionally "run as" (impersonate) another user. In this case, + the API key will be created on behalf of the impersonated user. This API is intended + be used by applications that need to create and manage API keys for end users, + but cannot guarantee that those users have permission to create API keys on their + own behalf. The API keys are created by the Elasticsearch API key service, which + is automatically enabled. A successful grant API key API call returns a JSON + structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds. By default, API keys never expire. You can specify expiration information when you create the API keys. - ``_ + ``_ - :param api_key: Defines the API key. + :param api_key: The API key. :param grant_type: The type of grant. Supported grant types are: `access_token`, `password`. - :param access_token: The user’s access token. If you specify the `access_token` + :param access_token: The user's access token. If you specify the `access_token` grant type, this parameter is required. It is not valid with other grant types. - :param password: The user’s password. If you specify the `password` grant type, + :param password: The user's password. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. :param run_as: The name of the user to be impersonated. :param username: The user name that identifies the user. If you specify the `password` @@ -2315,9 +2416,10 @@ def has_privileges( ) -> ObjectApiResponse[t.Any]: """ Check user privileges. Determine whether the specified user has a specified list - of privileges. + of privileges. All users can use this API, but only to determine their own privileges. + To check the privileges of other users, you must use the run as feature. - ``_ + ``_ :param user: Username :param application: @@ -2381,7 +2483,7 @@ def has_privileges_user_profile( applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice. - ``_ + ``_ :param privileges: An object containing all the privileges to be checked. :param uids: A list of profile IDs. The privileges are checked for associated @@ -2442,29 +2544,33 @@ def invalidate_api_key( key or grant API key APIs. Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically - deleted. The `manage_api_key` privilege allows deleting any API keys. The `manage_own_api_key` - only allows deleting API keys that are owned by the user. In addition, with the - `manage_own_api_key` privilege, an invalidation request must be issued in one - of the three formats: - Set the parameter `owner=true`. - Or, set both `username` - and `realm_name` to match the user’s identity. - Or, if the request is issued - by an API key, that is to say an API key invalidates itself, specify its ID in - the `ids` field. - - ``_ + deleted. To use this API, you must have at least the `manage_security`, `manage_api_key`, + or `manage_own_api_key` cluster privileges. The `manage_security` privilege allows + deleting any API key, including both REST and cross cluster API keys. The `manage_api_key` + privilege allows deleting any REST API key, but not cross cluster API keys. The + `manage_own_api_key` only allows deleting REST API keys that are owned by the + user. In addition, with the `manage_own_api_key` privilege, an invalidation request + must be issued in one of the three formats: - Set the parameter `owner=true`. + - Or, set both `username` and `realm_name` to match the user's identity. - Or, + if the request is issued by an API key, that is to say an API key invalidates + itself, specify its ID in the `ids` field. + + ``_ :param id: :param ids: A list of API key ids. This parameter cannot be used with any of `name`, `realm_name`, or `username`. :param name: An API key name. This parameter cannot be used with any of `ids`, `realm_name` or `username`. - :param owner: Can be used to query API keys owned by the currently authenticated - user. The `realm_name` or `username` parameters cannot be specified when - this parameter is set to `true` as they are assumed to be the currently authenticated - ones. + :param owner: Query API keys owned by the currently authenticated user. The `realm_name` + or `username` parameters cannot be specified when this parameter is set to + `true` as they are assumed to be the currently authenticated ones. NOTE: + At least one of `ids`, `name`, `username`, and `realm_name` must be specified + if `owner` is `false`. :param realm_name: The name of an authentication realm. This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. :param username: The username of a user. This parameter cannot be used with either - `ids` or `name`, or when `owner` flag is set to `true`. + `ids` or `name` or when `owner` flag is set to `true`. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/api_key" @@ -2524,14 +2630,21 @@ def invalidate_token( longer be used. The time period is defined by the `xpack.security.authc.token.timeout` setting. The refresh tokens returned by the get token API are only valid for 24 hours. They can also be used exactly once. If you want to invalidate one or - more access or refresh tokens immediately, use this invalidate token API. + more access or refresh tokens immediately, use this invalidate token API. NOTE: + While all parameters are optional, at least one of them is required. More specifically, + either one of `token` or `refresh_token` parameters is required. If none of these + two are specified, then `realm_name` and/or `username` need to be specified. - ``_ + ``_ - :param realm_name: - :param refresh_token: - :param token: - :param username: + :param realm_name: The name of an authentication realm. This parameter cannot + be used with either `refresh_token` or `token`. + :param refresh_token: A refresh token. This parameter cannot be used if any of + `refresh_token`, `realm_name`, or `username` are used. + :param token: An access token. This parameter cannot be used if any of `refresh_token`, + `realm_name`, or `username` are used. + :param username: The username of a user. This parameter cannot be used with either + `refresh_token` or `token`. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/oauth2/token" @@ -2589,7 +2702,7 @@ def oidc_authenticate( are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients. - ``_ + ``_ :param nonce: Associate a client session with an ID token and mitigate replay attacks. This value needs to be the same as the one that was provided to @@ -2670,7 +2783,7 @@ def oidc_logout( Connect based authentication, but can also be used by other, custom web applications or other clients. - ``_ + ``_ :param access_token: The access token to be invalidated. :param refresh_token: The refresh token to be invalidated. @@ -2733,7 +2846,7 @@ def oidc_prepare_authentication( Connect based authentication, but can also be used by other, custom web applications or other clients. - ``_ + ``_ :param iss: In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request @@ -2808,9 +2921,22 @@ def put_privileges( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update application privileges. - - ``_ + Create or update application privileges. To use this API, you must have one of + the following privileges: * The `manage_security` cluster privilege (or a greater + privilege such as `all`). * The "Manage Application Privileges" global privilege + for the application being referenced in the request. Application names are formed + from a prefix, with an optional suffix that conform to the following rules: * + The prefix must begin with a lowercase ASCII letter. * The prefix must contain + only ASCII letters or digits. * The prefix must be at least 3 characters long. + * If the suffix exists, it must begin with either a dash `-` or `_`. * The suffix + cannot contain any of the following characters: `\\`, `/`, `*`, `?`, `"`, `<`, + `>`, `|`, `,`, `*`. * No part of the name can contain whitespace. Privilege names + must begin with a lowercase ASCII letter and must contain only ASCII letters + and digits along with the characters `_`, `-`, and `.`. Action names can contain + any number of printable ASCII characters and must contain at least one of the + following characters: `/`, `*`, `:`. + + ``_ :param privileges: :param refresh: If `true` (the default) then refresh the affected shards to make @@ -2959,7 +3085,7 @@ def put_role( The create or update roles API cannot update roles that are defined in roles files. File-based role management is not available in Elastic Serverless. - ``_ + ``_ :param name: The name of the role. :param applications: A list of application privilege entries. @@ -2976,7 +3102,10 @@ def put_role( this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. :param remote_cluster: A list of remote cluster permissions entries. - :param remote_indices: A list of remote indices permissions entries. + :param remote_indices: A list of remote indices permissions entries. NOTE: Remote + indices are effective for remote clusters configured with the API key based + model. They have no effect for remote clusters configured with the certificate + based model. :param run_as: A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will @@ -3071,21 +3200,45 @@ def put_role_mapping( that are granted to those users. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role - mapping files. This API does not create roles. Rather, it maps users to existing - roles. Roles can be created by using the create or update roles API or roles - files. + mapping files. NOTE: This API does not create roles. Rather, it maps users to + existing roles. Roles can be created by using the create or update roles API + or roles files. **Role templates** The most common use for role mappings is to + create a mapping from a known value on the user to a fixed role name. For example, + all users in the `cn=admin,dc=example,dc=com` LDAP group should be given the + superuser role in Elasticsearch. The `roles` field is used for this purpose. + For more complex needs, it is possible to use Mustache templates to dynamically + determine the names of the roles that should be granted to the user. The `role_templates` + field is used for this purpose. NOTE: To use role templates successfully, the + relevant scripting feature must be enabled. Otherwise, all attempts to create + a role mapping with role templates fail. All of the user fields that are available + in the role mapping rules are also available in the role templates. Thus it is + possible to assign a user to a role that reflects their username, their groups, + or the name of the realm to which they authenticated. By default a template is + evaluated to produce a single string that is the name of the role which should + be assigned to the user. If the format of the template is set to "json" then + the template is expected to produce a JSON string or an array of JSON strings + for the role names. + + ``_ - ``_ - - :param name: Role-mapping name - :param enabled: - :param metadata: + :param name: The distinct name that identifies the role mapping. The name is + used solely as an identifier to facilitate interaction via the API; it does + not affect the behavior of the mapping in any way. + :param enabled: Mappings that have `enabled` set to `false` are ignored when + role mapping is performed. + :param metadata: Additional metadata that helps define which roles are assigned + to each user. Within the metadata object, keys beginning with `_` are reserved + for system usage. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. - :param role_templates: - :param roles: - :param rules: + :param role_templates: A list of Mustache templates that will be evaluated to + determine the roles names that should granted to the users that match the + role mapping rules. Exactly one of `roles` or `role_templates` must be specified. + :param roles: A list of role names that are granted to the users that match the + role mapping rules. Exactly one of `roles` or `role_templates` must be specified. + :param rules: The rules that determine which users should be matched by the mapping. + A rule is a logical condition that is expressed by using a JSON DSL. :param run_as: """ if name in SKIP_IN_PATH: @@ -3160,23 +3313,38 @@ def put_user( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update users. A password is required for adding a new user but is optional - when updating an existing user. To change a user’s password without updating - any other fields, use the change password API. - - ``_ - - :param username: The username of the User - :param email: - :param enabled: - :param full_name: - :param metadata: - :param password: - :param password_hash: - :param refresh: If `true` (the default) then refresh the affected shards to make - this operation visible to search, if `wait_for` then wait for a refresh to - make this operation visible to search, if `false` then do nothing with refreshes. - :param roles: + Create or update users. Add and update users in the native realm. A password + is required for adding a new user but is optional when updating an existing user. + To change a user's password without updating any other fields, use the change + password API. + + ``_ + + :param username: An identifier for the user. NOTE: Usernames must be at least + 1 and no more than 507 characters. They can contain alphanumeric characters + (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic + Latin (ASCII) block. Leading or trailing whitespace is not allowed. + :param email: The email of the user. + :param enabled: Specifies whether the user is enabled. + :param full_name: The full name of the user. + :param metadata: Arbitrary metadata that you want to associate with the user. + :param password: The user's password. Passwords must be at least 6 characters + long. When adding a user, one of `password` or `password_hash` is required. + When updating an existing user, the password is optional, so that other fields + on the user (such as their roles) may be updated without modifying the user's + password + :param password_hash: A hash of the user's password. This must be produced using + the same hashing algorithm as has been configured for password storage. For + more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` + setting in the user cache and password hash algorithm documentation. Using + this parameter allows the client to pre-hash the password for performance + and/or confidentiality reasons. The `password` parameter and the `password_hash` + parameter cannot be used in the same request. + :param refresh: Valid values are `true`, `false`, and `wait_for`. These values + have the same meaning as in the index API, but the default value for this + API is true. + :param roles: A set of roles the user has. The roles determine the user's access + permissions. To create a user without any roles, specify an empty list (`[]`). """ if username in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'username'") @@ -3260,9 +3428,14 @@ def query_api_keys( ) -> ObjectApiResponse[t.Any]: """ Find API keys with a query. Get a paginated list of API keys and their information. - You can optionally filter the results with a query. + You can optionally filter the results with a query. To use this API, you must + have at least the `manage_own_api_key` or the `read_security` cluster privileges. + If you have only the `manage_own_api_key` privilege, this API returns only the + API keys that you own. If you have the `read_security`, `manage_api_key`, or + greater privileges (including `manage_security`), this API returns all API keys + regardless of ownership. - ``_ + ``_ :param aggregations: Any aggregations to run over the corpus of returned API keys. Aggregations and queries work together. Aggregations are computed only @@ -3276,30 +3449,39 @@ def query_api_keys( `terms`, `range`, `date_range`, `missing`, `cardinality`, `value_count`, `composite`, `filter`, and `filters`. Additionally, aggregations only run over the same subset of fields that query works with. - :param from_: Starting document offset. By default, you cannot page through more - than 10,000 hits using the from and size parameters. To page through more - hits, use the `search_after` parameter. + :param from_: The starting document offset. It must not be negative. By default, + you cannot page through more than 10,000 hits using the `from` and `size` + parameters. To page through more hits, use the `search_after` parameter. :param query: A query to filter which API keys to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following public information associated with an API key: `id`, `type`, `name`, `creation`, `expiration`, `invalidated`, `invalidation`, - `username`, `realm`, and `metadata`. - :param search_after: Search after definition - :param size: The number of hits to return. By default, you cannot page through - more than 10,000 hits using the `from` and `size` parameters. To page through - more hits, use the `search_after` parameter. - :param sort: Other than `id`, all public fields of an API key are eligible for - sorting. In addition, sort can also be applied to the `_doc` field to sort - by index order. + `username`, `realm`, and `metadata`. NOTE: The queryable string values associated + with API keys are internally mapped as keywords. Consequently, if no `analyzer` + parameter is specified for a `match` query, then the provided match query + string is interpreted as a single keyword value. Such a match query is hence + equivalent to a `term` query. + :param search_after: The search after definition. + :param size: The number of hits to return. It must not be negative. The `size` + parameter can be set to `0`, in which case no API key matches are returned, + only the aggregation results. By default, you cannot page through more than + 10,000 hits using the `from` and `size` parameters. To page through more + hits, use the `search_after` parameter. + :param sort: The sort definition. Other than `id`, all public fields of an API + key are eligible for sorting. In addition, sort can also be applied to the + `_doc` field to sort by index order. :param typed_keys: Determines whether aggregation names are prefixed by their respective types in the response. :param with_limited_by: Return the snapshot of the owner user's role descriptors associated with the API key. An API key's actual permission is the intersection - of its assigned role descriptors and the owner user's role descriptors. - :param with_profile_uid: Determines whether to also retrieve the profile uid, - for the API key owner principal, if it exists. + of its assigned role descriptors and the owner user's role descriptors (effectively + limited by it). An API key cannot retrieve any API key’s limited-by role + descriptors (including itself) unless it has `manage_api_key` or higher privileges. + :param with_profile_uid: Determines whether to also retrieve the profile UID + for the API key owner principal. If it exists, the profile UID is returned + under the `profile_uid` response field for each API key. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/_query/api_key" @@ -3386,26 +3568,30 @@ def query_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Find roles with a query. Get roles in a paginated manner. You can optionally - filter the results with a query. + Find roles with a query. Get roles in a paginated manner. The role management + APIs are generally the preferred way to manage roles, rather than using file-based + role management. The query roles API does not retrieve roles that are defined + in roles files, nor built-in ones. You can optionally filter the results with + a query. Also, the results can be paginated and sorted. - ``_ + ``_ - :param from_: Starting document offset. By default, you cannot page through more - than 10,000 hits using the from and size parameters. To page through more - hits, use the `search_after` parameter. + :param from_: The starting document offset. It must not be negative. By default, + you cannot page through more than 10,000 hits using the `from` and `size` + parameters. To page through more hits, use the `search_after` parameter. :param query: A query to filter which roles to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following information associated with roles: `name`, `description`, - `metadata`, `applications.application`, `applications.privileges`, `applications.resources`. - :param search_after: Search after definition - :param size: The number of hits to return. By default, you cannot page through - more than 10,000 hits using the `from` and `size` parameters. To page through - more hits, use the `search_after` parameter. - :param sort: All public fields of a role are eligible for sorting. In addition, - sort can also be applied to the `_doc` field to sort by index order. + `metadata`, `applications.application`, `applications.privileges`, and `applications.resources`. + :param search_after: The search after definition. + :param size: The number of hits to return. It must not be negative. By default, + you cannot page through more than 10,000 hits using the `from` and `size` + parameters. To page through more hits, use the `search_after` parameter. + :param sort: The sort definition. You can sort on `username`, `roles`, or `enabled`. + In addition, sort can also be applied to the `_doc` field to sort by index + order. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/_query/role" @@ -3473,27 +3659,30 @@ def query_user( ) -> ObjectApiResponse[t.Any]: """ Find users with a query. Get information for users in a paginated manner. You - can optionally filter the results with a query. + can optionally filter the results with a query. NOTE: As opposed to the get user + API, built-in users are excluded from the result. This API is only for native + users. - ``_ + ``_ - :param from_: Starting document offset. By default, you cannot page through more - than 10,000 hits using the from and size parameters. To page through more - hits, use the `search_after` parameter. + :param from_: The starting document offset. It must not be negative. By default, + you cannot page through more than 10,000 hits using the `from` and `size` + parameters. To page through more hits, use the `search_after` parameter. :param query: A query to filter which users to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following information associated with user: `username`, - `roles`, `enabled` - :param search_after: Search after definition - :param size: The number of hits to return. By default, you cannot page through - more than 10,000 hits using the `from` and `size` parameters. To page through - more hits, use the `search_after` parameter. - :param sort: Fields eligible for sorting are: username, roles, enabled In addition, - sort can also be applied to the `_doc` field to sort by index order. - :param with_profile_uid: If true will return the User Profile ID for the users - in the query result, if any. + `roles`, `enabled`, `full_name`, and `email`. + :param search_after: The search after definition + :param size: The number of hits to return. It must not be negative. By default, + you cannot page through more than 10,000 hits using the `from` and `size` + parameters. To page through more hits, use the `search_after` parameter. + :param sort: The sort definition. Fields eligible for sorting are: `username`, + `roles`, `enabled`. In addition, sort can also be applied to the `_doc` field + to sort by index order. + :param with_profile_uid: Determines whether to retrieve the user profile UID, + if it exists, for the users. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/_query/user" @@ -3565,7 +3754,7 @@ def saml_authenticate( Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch. - ``_ + ``_ :param content: The SAML response as it was sent by the user's browser, usually a Base64 encoded XML document. @@ -3636,7 +3825,7 @@ def saml_complete_logout( of this API must prepare the request accordingly so that this API can handle either of them. - ``_ + ``_ :param ids: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. @@ -3710,7 +3899,7 @@ def saml_invalidate( to that specific SAML principal and provides a URL that contains a SAML LogoutResponse message. Thus the user can be redirected back to their IdP. - ``_ + ``_ :param query_string: The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. This query should include @@ -3784,7 +3973,7 @@ def saml_logout( a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout). - ``_ + ``_ :param token: The access token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent token that was received @@ -3854,7 +4043,7 @@ def saml_prepare_authentication( request. The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process. - ``_ + ``_ :param acs: The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. The realm is used to generate the authentication @@ -3913,7 +4102,7 @@ def saml_service_provider_metadata( This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch. - ``_ + ``_ :param realm_name: The name of the SAML realm in Elasticsearch. """ @@ -3964,7 +4153,7 @@ def suggest_user_profiles( Elastic reserves the right to change or remove this feature in future releases without prior notice. - ``_ + ``_ :param data: A comma-separated list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content, @@ -4033,38 +4222,44 @@ def update_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update an API key. Updates attributes of an existing API key. Users can only - update API keys that they created or that were granted to them. Use this API - to update API keys created by the create API Key or grant API Key APIs. If you - need to apply the same update to many API keys, you can use bulk update API Keys - to reduce overhead. It’s not possible to update expired API keys, or API keys - that have been invalidated by invalidate API Key. This API supports updates to - an API key’s access scope and metadata. The access scope of an API key is derived - from the `role_descriptors` you specify in the request, and a snapshot of the - owner user’s permissions at the time of the request. The snapshot of the owner’s - permissions is updated automatically on every call. If you don’t specify `role_descriptors` - in the request, a call to this API might still change the API key’s access scope. - This change can occur if the owner user’s permissions have changed since the - API key was created or last modified. To update another user’s API key, use the - `run_as` feature to submit a request on behalf of another user. IMPORTANT: It’s - not possible to use an API key as the authentication credential for this API. - To update an API key, the owner user’s credentials are required. - - ``_ + Update an API key. Update attributes of an existing API key. This API supports + updates to an API key's access scope, expiration, and metadata. To use this API, + you must have at least the `manage_own_api_key` cluster privilege. Users can + only update API keys that they created or that were granted to them. To update + another user’s API key, use the `run_as` feature to submit a request on behalf + of another user. IMPORTANT: It's not possible to use an API key as the authentication + credential for this API. The owner user’s credentials are required. Use this + API to update API keys created by the create API key or grant API Key APIs. If + you need to apply the same update to many API keys, you can use the bulk update + API keys API to reduce overhead. It's not possible to update expired API keys + or API keys that have been invalidated by the invalidate API key API. The access + scope of an API key is derived from the `role_descriptors` you specify in the + request and a snapshot of the owner user's permissions at the time of the request. + The snapshot of the owner's permissions is updated automatically on every call. + IMPORTANT: If you don't specify `role_descriptors` in the request, a call to + this API might still change the API key's access scope. This change can occur + if the owner user's permissions have changed since the API key was created or + last modified. + + ``_ :param id: The ID of the API key to update. - :param expiration: Expiration time for the API key. + :param expiration: The expiration time for the API key. By default, API keys + never expire. This property can be omitted to leave the expiration unchanged. :param metadata: Arbitrary metadata that you want to associate with the API key. - It supports nested data structure. Within the metadata object, keys beginning - with _ are reserved for system usage. - :param role_descriptors: An array of role descriptors for this API key. This - parameter is optional. When it is not specified or is an empty array, then - the API key will have a point in time snapshot of permissions of the authenticated - user. If you supply role descriptors then the resultant permissions would - be an intersection of API keys permissions and authenticated user’s permissions - thereby limiting the access scope for API keys. The structure of role descriptor - is the same as the request for create role API. For more details, see create - or update roles API. + It supports a nested data structure. Within the metadata object, keys beginning + with `_` are reserved for system usage. When specified, this value fully + replaces the metadata previously associated with the API key. + :param role_descriptors: The role descriptors to assign to this API key. The + API key's effective permissions are an intersection of its assigned privileges + and the point in time snapshot of permissions of the owner user. You can + assign new privileges by specifying them in this parameter. To remove assigned + privileges, you can supply an empty `role_descriptors` parameter, that is + to say, an empty object `{}`. If an API key has no assigned privileges, it + inherits the owner user's full permissions. The snapshot of the owner's permissions + is always updated, whether you supply the `role_descriptors` parameter or + not. The structure of a role descriptor is the same as the request for the + create API keys API. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -4133,7 +4328,7 @@ def update_cross_cluster_api_key( API keys, which should be updated by either the update API key or bulk update API keys API. - ``_ + ``_ :param id: The ID of the cross-cluster API key to update. :param access: The access to be granted to this API key. The access is composed @@ -4205,12 +4400,14 @@ def update_settings( """ Update security index settings. Update the user-configurable settings for the security internal index (`.security` and associated indices). Only a subset of - settings are allowed to be modified, for example `index.auto_expand_replicas` - and `index.number_of_replicas`. If a specific index is not in use on the system - and settings are provided for it, the request will be rejected. This API does - not yet support configuring the settings for indices before they are in use. + settings are allowed to be modified. This includes `index.auto_expand_replicas` + and `index.number_of_replicas`. NOTE: If `index.auto_expand_replicas` is set, + `index.number_of_replicas` will be ignored during updates. If a specific index + is not in use on the system and settings are provided for it, the request will + be rejected. This API does not yet support configuring the settings for indices + before they are in use. - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails @@ -4291,7 +4488,7 @@ def update_user_profile_data( data, content is namespaced by the top-level fields. The `update_profile_data` global privilege grants privileges for updating only the allowed namespaces. - ``_ + ``_ :param uid: A unique identifier for the user profile. :param data: Non-searchable data that you want to associate with the user profile. diff --git a/elasticsearch/_sync/client/shutdown.py b/elasticsearch/_sync/client/shutdown.py index 29cdf5ff2..573ba579c 100644 --- a/elasticsearch/_sync/client/shutdown.py +++ b/elasticsearch/_sync/client/shutdown.py @@ -50,7 +50,7 @@ def delete_node( and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator privileges feature is enabled, you must be an operator to use this API. - ``_ + ``_ :param node_id: The node id of node to be removed from the shutdown state :param master_timeout: Period to wait for a connection to the master node. If @@ -108,7 +108,7 @@ def get_node( the operator privileges feature is enabled, you must be an operator to use this API. - ``_ + ``_ :param node_id: Which node for which to retrieve the shutdown status :param master_timeout: Period to wait for a connection to the master node. If @@ -182,7 +182,7 @@ def put_node( IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the node shutdown status to determine when it is safe to stop Elasticsearch. - ``_ + ``_ :param node_id: The node identifier. This parameter is not validated against the cluster's active nodes. This enables you to register a node for shut diff --git a/elasticsearch/_sync/client/simulate.py b/elasticsearch/_sync/client/simulate.py index 9d8dfd544..0139e229f 100644 --- a/elasticsearch/_sync/client/simulate.py +++ b/elasticsearch/_sync/client/simulate.py @@ -87,7 +87,7 @@ def ingest( This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request. - ``_ + ``_ :param docs: Sample documents to test in the pipeline. :param index: The index to simulate ingesting into. This value can be overridden diff --git a/elasticsearch/_sync/client/slm.py b/elasticsearch/_sync/client/slm.py index c28277489..ff7c59c8d 100644 --- a/elasticsearch/_sync/client/slm.py +++ b/elasticsearch/_sync/client/slm.py @@ -42,7 +42,7 @@ def delete_lifecycle( prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots. - ``_ + ``_ :param policy_id: The id of the snapshot lifecycle policy to remove :param master_timeout: The period to wait for a connection to the master node. @@ -96,7 +96,7 @@ def execute_lifecycle( applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance. - ``_ + ``_ :param policy_id: The id of the snapshot lifecycle policy to be executed :param master_timeout: The period to wait for a connection to the master node. @@ -148,7 +148,7 @@ def execute_retention( removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. The retention policy is normally applied according to its schedule. - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails @@ -197,7 +197,7 @@ def get_lifecycle( Get policy information. Get snapshot lifecycle policy definitions and information about the latest snapshot attempts. - ``_ + ``_ :param policy_id: Comma-separated list of snapshot lifecycle policies to retrieve :param master_timeout: The period to wait for a connection to the master node. @@ -251,7 +251,7 @@ def get_stats( Get snapshot lifecycle management statistics. Get global and policy-level statistics about actions taken by snapshot lifecycle management. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -298,7 +298,7 @@ def get_status( """ Get the snapshot lifecycle management status. - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails @@ -358,7 +358,7 @@ def put_lifecycle( policy already exists, this request increments the policy version. Only the latest version of a policy is stored. - ``_ + ``_ :param policy_id: The identifier for the snapshot lifecycle policy you want to create or update. @@ -441,7 +441,7 @@ def start( automatically when a cluster is formed. Manually starting SLM is necessary only if it has been stopped using the stop SLM API. - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails @@ -498,7 +498,7 @@ def stop( complete and it can be safely stopped. Use the get snapshot lifecycle management status API to see if SLM is running. - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails diff --git a/elasticsearch/_sync/client/snapshot.py b/elasticsearch/_sync/client/snapshot.py index c604be816..3e86e25df 100644 --- a/elasticsearch/_sync/client/snapshot.py +++ b/elasticsearch/_sync/client/snapshot.py @@ -47,7 +47,7 @@ def cleanup_repository( Clean up the snapshot repository. Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots. - ``_ + ``_ :param name: Snapshot repository to clean up. :param master_timeout: Period to wait for a connection to the master node. @@ -101,7 +101,7 @@ def clone( Clone a snapshot. Clone part of all of a snapshot into another snapshot in the same repository. - ``_ + ``_ :param repository: A repository name :param snapshot: The name of the snapshot to clone from @@ -181,7 +181,7 @@ def create( """ Create a snapshot. Take a snapshot of a cluster or of data streams and indices. - ``_ + ``_ :param repository: Repository for the snapshot. :param snapshot: Name of the snapshot. Must be unique in the repository. @@ -289,7 +289,7 @@ def create_repository( be writeable. Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. - ``_ + ``_ :param name: A repository name :param repository: @@ -349,7 +349,7 @@ def delete( """ Delete snapshots. - ``_ + ``_ :param repository: A repository name :param snapshot: A comma-separated list of snapshot names @@ -402,7 +402,7 @@ def delete_repository( removes only the reference to the location where the repository is storing the snapshots. The snapshots themselves are left untouched and in place. - ``_ + ``_ :param name: Name of the snapshot repository to unregister. Wildcard (`*`) patterns are supported. @@ -476,7 +476,7 @@ def get( """ Get snapshot information. - ``_ + ``_ :param repository: Comma-separated list of snapshot repository names used to limit the request. Wildcard (*) expressions are supported. @@ -588,7 +588,7 @@ def get_repository( """ Get snapshot repository information. - ``_ + ``_ :param name: A comma-separated list of repository names :param local: Return local information, do not retrieve the state from master @@ -763,7 +763,7 @@ def repository_analyze( Some operations also verify the behavior on small blobs with sizes other than 8 bytes. - ``_ + ``_ :param name: The name of the repository. :param blob_count: The total number of blobs to write to the repository during @@ -899,7 +899,7 @@ def repository_verify_integrity( in future versions. NOTE: This API may not work correctly in a mixed-version cluster. - ``_ + ``_ :param name: A repository name :param blob_thread_pool_concurrency: Number of threads to use for reading blob @@ -1009,7 +1009,7 @@ def restore( or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot. - ``_ + ``_ :param repository: A repository name :param snapshot: A snapshot name @@ -1113,7 +1113,7 @@ def status( These requests can also tax machine resources and, when using cloud storage, incur high processing costs. - ``_ + ``_ :param repository: A repository name :param snapshot: A comma-separated list of snapshot names @@ -1173,7 +1173,7 @@ def verify_repository( Verify a snapshot repository. Check for common misconfigurations in a snapshot repository. - ``_ + ``_ :param name: A repository name :param master_timeout: Explicit operation timeout for connection to master node diff --git a/elasticsearch/_sync/client/sql.py b/elasticsearch/_sync/client/sql.py index e8de30d55..d56edbd03 100644 --- a/elasticsearch/_sync/client/sql.py +++ b/elasticsearch/_sync/client/sql.py @@ -41,7 +41,7 @@ def clear_cursor( """ Clear an SQL search cursor. - ``_ + ``_ :param cursor: Cursor to clear. """ @@ -90,7 +90,7 @@ def delete_async( a search: * Users with the `cancel_task` cluster privilege. * The user who first submitted the search. - ``_ + ``_ :param id: The identifier for the search. """ @@ -139,7 +139,7 @@ def get_async( features are enabled, only the user who first submitted the SQL search can retrieve the search using this API. - ``_ + ``_ :param id: The identifier for the search. :param delimiter: The separator for CSV results. The API supports this parameter @@ -198,7 +198,7 @@ def get_async_status( Get the async SQL search status. Get the current status of an async SQL search or a stored synchronous SQL search. - ``_ + ``_ :param id: The identifier for the search. """ @@ -283,7 +283,7 @@ def query( """ Get SQL search results. Run an SQL request. - ``_ + ``_ :param allow_partial_search_results: If `true`, the response has partial results when there are shard request timeouts or shard failures. If `false`, the @@ -406,7 +406,7 @@ def translate( API request containing Query DSL. It accepts the same request body parameters as the SQL search API, excluding `cursor`. - ``_ + ``_ :param query: The SQL query to run. :param fetch_size: The maximum number of rows (or entries) to return in one response. diff --git a/elasticsearch/_sync/client/ssl.py b/elasticsearch/_sync/client/ssl.py index 9faa52fad..1f3cb3bed 100644 --- a/elasticsearch/_sync/client/ssl.py +++ b/elasticsearch/_sync/client/ssl.py @@ -53,7 +53,7 @@ def certificates( the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster. - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ssl/certificates" diff --git a/elasticsearch/_sync/client/synonyms.py b/elasticsearch/_sync/client/synonyms.py index 606a85b04..92639b50c 100644 --- a/elasticsearch/_sync/client/synonyms.py +++ b/elasticsearch/_sync/client/synonyms.py @@ -52,7 +52,7 @@ def delete_synonym( finished, you can delete the index. When the synonyms set is not used in analyzers, you will be able to delete it. - ``_ + ``_ :param id: The synonyms set identifier to delete. """ @@ -93,7 +93,7 @@ def delete_synonym_rule( """ Delete a synonym rule. Delete a synonym rule from a synonym set. - ``_ + ``_ :param set_id: The ID of the synonym set to update. :param rule_id: The ID of the synonym rule to delete. @@ -143,7 +143,7 @@ def get_synonym( """ Get a synonym set. - ``_ + ``_ :param id: The synonyms set identifier to retrieve. :param from_: The starting offset for query rules to retrieve. @@ -190,7 +190,7 @@ def get_synonym_rule( """ Get a synonym rule. Get a synonym rule from a synonym set. - ``_ + ``_ :param set_id: The ID of the synonym set to retrieve the synonym rule from. :param rule_id: The ID of the synonym rule to retrieve. @@ -239,7 +239,7 @@ def get_synonyms_sets( """ Get all synonym sets. Get a summary of all defined synonym sets. - ``_ + ``_ :param from_: The starting offset for synonyms sets to retrieve. :param size: The maximum number of synonyms sets to retrieve. @@ -293,7 +293,7 @@ def put_synonym( equivalent to invoking the reload search analyzers API for all indices that use the synonyms set. - ``_ + ``_ :param id: The ID of the synonyms set to be created or updated. :param synonyms_set: The synonym rules definitions for the synonyms set. @@ -349,7 +349,7 @@ def put_synonym_rule( When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule. - ``_ + ``_ :param set_id: The ID of the synonym set. :param rule_id: The ID of the synonym rule to be updated or created. diff --git a/elasticsearch/_sync/client/tasks.py b/elasticsearch/_sync/client/tasks.py index 09d2c6be3..fe0fd20be 100644 --- a/elasticsearch/_sync/client/tasks.py +++ b/elasticsearch/_sync/client/tasks.py @@ -61,7 +61,7 @@ def cancel( threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task. - ``_ + ``_ :param task_id: The task identifier. :param actions: A comma-separated list or wildcard expression of actions that @@ -126,7 +126,7 @@ def get( task identifier is not found, a 404 response code indicates that there are no resources that match the request. - ``_ + ``_ :param task_id: The task identifier. :param timeout: The period to wait for a response. If no response is received @@ -203,7 +203,7 @@ def list( initiated by the REST request. The `X-Opaque-Id` in the children `headers` is the child task of the task that was initiated by the REST request. - ``_ + ``_ :param actions: A comma-separated list or wildcard expression of actions used to limit the request. For example, you can use `cluser:*` to retrieve all diff --git a/elasticsearch/_sync/client/text_structure.py b/elasticsearch/_sync/client/text_structure.py index bdfe65747..2acc56893 100644 --- a/elasticsearch/_sync/client/text_structure.py +++ b/elasticsearch/_sync/client/text_structure.py @@ -70,7 +70,7 @@ def find_field_structure( `explain` query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen. - ``_ + ``_ :param field: The field that should be analyzed. :param index: The name of the index that contains the analyzed field. @@ -255,7 +255,7 @@ def find_message_structure( an explanation will appear in the response. It helps determine why the returned structure was chosen. - ``_ + ``_ :param messages: The list of messages you want to analyze. :param column_names: If the format is `delimited`, you can specify the column @@ -427,7 +427,7 @@ def find_structure( However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters. - ``_ + ``_ :param text_files: :param charset: The text's character set. It must be a character set that is @@ -611,7 +611,7 @@ def test_grok_pattern( indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings. - ``_ + ``_ :param grok_pattern: The Grok pattern to run on the text. :param text: The lines of text to run the Grok pattern on. diff --git a/elasticsearch/_sync/client/transform.py b/elasticsearch/_sync/client/transform.py index 2faf60167..a94fca7b4 100644 --- a/elasticsearch/_sync/client/transform.py +++ b/elasticsearch/_sync/client/transform.py @@ -41,7 +41,7 @@ def delete_transform( """ Delete a transform. Deletes a transform. - ``_ + ``_ :param transform_id: Identifier for the transform. :param delete_dest_index: If this value is true, the destination index is deleted @@ -101,7 +101,7 @@ def get_transform( """ Get transforms. Retrieves configuration information for transforms. - ``_ + ``_ :param transform_id: Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using @@ -170,7 +170,7 @@ def get_transform_stats( """ Get transform stats. Retrieves usage information for transforms. - ``_ + ``_ :param transform_id: Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using @@ -256,7 +256,7 @@ def preview_transform( These values are determined based on the field types of the source index and the transform aggregations. - ``_ + ``_ :param transform_id: Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform configuration details in @@ -393,7 +393,7 @@ def put_transform( If you used transforms prior to 7.5, also do not give users any privileges on `.data-frame-internal*` indices. - ``_ + ``_ :param transform_id: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -496,7 +496,7 @@ def reset_transform( it; alternatively, use the `force` query parameter. If the destination index was created by the transform, it is deleted. - ``_ + ``_ :param transform_id: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -552,7 +552,7 @@ def schedule_now_transform( the transform will be processed again at now + frequency unless _schedule_now API is called again in the meantime. - ``_ + ``_ :param transform_id: Identifier for the transform. :param timeout: Controls the time to wait for the scheduling to take place @@ -616,7 +616,7 @@ def start_transform( privileges on the source and destination indices, the transform fails when it attempts unauthorized operations. - ``_ + ``_ :param transform_id: Identifier for the transform. :param from_: Restricts the set of transformed entities to those changed after @@ -670,7 +670,7 @@ def stop_transform( """ Stop transforms. Stops one or more transforms. - ``_ + ``_ :param transform_id: Identifier for the transform. To stop multiple transforms, use a comma-separated list or a wildcard expression. To stop all transforms, @@ -770,7 +770,7 @@ def update_transform( which roles the user who updated it had at the time of update and runs with those privileges. - ``_ + ``_ :param transform_id: Identifier for the transform. :param defer_validation: When true, deferrable validations are not run. This @@ -864,7 +864,7 @@ def upgrade_transforms( example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster. You may want to perform a recent cluster backup prior to the upgrade. - ``_ + ``_ :param dry_run: When true, the request checks for updates but does not run them. :param timeout: Period to wait for a response. If no response is received before diff --git a/elasticsearch/_sync/client/watcher.py b/elasticsearch/_sync/client/watcher.py index cc84cf9e4..d6025c923 100644 --- a/elasticsearch/_sync/client/watcher.py +++ b/elasticsearch/_sync/client/watcher.py @@ -46,7 +46,7 @@ def ack_watch( `ack.state` is reset to `awaits_successful_execution`. This happens when the condition of the watch is not met (the condition evaluates to false). - ``_ + ``_ :param watch_id: The watch identifier. :param action_id: A comma-separated list of the action identifiers to acknowledge. @@ -98,7 +98,7 @@ def activate_watch( """ Activate a watch. A watch can be either active or inactive. - ``_ + ``_ :param watch_id: The watch identifier. """ @@ -138,7 +138,7 @@ def deactivate_watch( """ Deactivate a watch. A watch can be either active or inactive. - ``_ + ``_ :param watch_id: The watch identifier. """ @@ -184,7 +184,7 @@ def delete_watch( delete document API When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the `.watches` index. - ``_ + ``_ :param id: The watch identifier. """ @@ -267,7 +267,7 @@ def execute_watch( that called the API will be used as a base, instead of the information who stored the watch. - ``_ + ``_ :param id: The watch identifier. :param action_modes: Determines how to handle the watch actions as part of the @@ -352,7 +352,7 @@ def get_settings( Only a subset of settings are shown, for example `index.auto_expand_replicas` and `index.number_of_replicas`. - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails @@ -394,7 +394,7 @@ def get_watch( """ Get a watch. - ``_ + ``_ :param id: The watch identifier. """ @@ -468,7 +468,7 @@ def put_watch( for which the user that stored the watch has privileges. If the user is able to read index `a`, but not index `b`, the same will apply when the watch runs. - ``_ + ``_ :param id: The identifier for the watch. :param actions: The list of actions that will be run if the condition matches. @@ -578,7 +578,7 @@ def query_watches( filter watches by a query. Note that only the `_id` and `metadata.*` fields are queryable or sortable. - ``_ + ``_ :param from_: The offset from the first result to fetch. It must be non-negative. :param query: A query that filters the watches to be returned. @@ -649,7 +649,7 @@ def start( """ Start the watch service. Start the Watcher service if it is not already running. - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. """ @@ -711,7 +711,7 @@ def stats( Get Watcher statistics. This API always returns basic metrics. You retrieve more metrics by using the metric parameter. - ``_ + ``_ :param metric: Defines which additional metrics are included in the response. :param emit_stacktraces: Defines whether stack traces are generated for each @@ -758,7 +758,7 @@ def stop( """ Stop the watch service. Stop the Watcher service if it is running. - ``_ + ``_ :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns @@ -812,7 +812,7 @@ def update_settings( (`.watches`). Only a subset of settings can be modified. This includes `index.auto_expand_replicas` and `index.number_of_replicas`. - ``_ + ``_ :param index_auto_expand_replicas: :param index_number_of_replicas: diff --git a/elasticsearch/_sync/client/xpack.py b/elasticsearch/_sync/client/xpack.py index 50a085f57..a2f26ab91 100644 --- a/elasticsearch/_sync/client/xpack.py +++ b/elasticsearch/_sync/client/xpack.py @@ -48,7 +48,7 @@ def info( installed license. * Feature information for the features that are currently enabled and available under the current license. - ``_ + ``_ :param accept_enterprise: If this param is used it must be set to true :param categories: A comma-separated list of the information categories to include @@ -94,7 +94,7 @@ def usage( enabled and available under the current license. The API also provides some usage statistics. - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails From 0566caab7756eaeba58b29777cdb1deff59419c3 Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Tue, 21 Jan 2025 17:59:25 +0400 Subject: [PATCH 24/65] Format endpoint descriptions as Markdown (#2760) --- elasticsearch/_async/client/__init__.py | 1358 +++++++++-------- elasticsearch/_async/client/async_search.py | 49 +- elasticsearch/_async/client/autoscaling.py | 48 +- elasticsearch/_async/client/cat.py | 337 ++-- elasticsearch/_async/client/ccr.py | 166 +- elasticsearch/_async/client/cluster.py | 292 ++-- elasticsearch/_async/client/connector.py | 268 ++-- .../_async/client/dangling_indices.py | 32 +- elasticsearch/_async/client/enrich.py | 31 +- elasticsearch/_async/client/eql.py | 31 +- elasticsearch/_async/client/esql.py | 43 +- elasticsearch/_async/client/features.py | 50 +- elasticsearch/_async/client/fleet.py | 24 +- elasticsearch/_async/client/graph.py | 17 +- elasticsearch/_async/client/ilm.py | 140 +- elasticsearch/_async/client/indices.py | 1236 ++++++++------- elasticsearch/_async/client/inference.py | 60 +- elasticsearch/_async/client/ingest.py | 83 +- elasticsearch/_async/client/license.py | 79 +- elasticsearch/_async/client/logstash.py | 23 +- elasticsearch/_async/client/migration.py | 37 +- elasticsearch/_async/client/ml.py | 816 ++++++---- elasticsearch/_async/client/monitoring.py | 7 +- elasticsearch/_async/client/nodes.py | 70 +- elasticsearch/_async/client/query_rules.py | 76 +- elasticsearch/_async/client/rollup.py | 184 ++- .../_async/client/search_application.py | 70 +- .../_async/client/searchable_snapshots.py | 29 +- elasticsearch/_async/client/security.py | 1065 +++++++------ elasticsearch/_async/client/shutdown.py | 57 +- elasticsearch/_async/client/simulate.py | 38 +- elasticsearch/_async/client/slm.py | 85 +- elasticsearch/_async/client/snapshot.py | 365 +++-- elasticsearch/_async/client/sql.py | 53 +- elasticsearch/_async/client/ssl.py | 33 +- elasticsearch/_async/client/synonyms.py | 79 +- elasticsearch/_async/client/tasks.py | 113 +- elasticsearch/_async/client/text_structure.py | 113 +- elasticsearch/_async/client/transform.py | 187 ++- elasticsearch/_async/client/watcher.py | 150 +- elasticsearch/_async/client/xpack.py | 23 +- elasticsearch/_sync/client/__init__.py | 1358 +++++++++-------- elasticsearch/_sync/client/async_search.py | 49 +- elasticsearch/_sync/client/autoscaling.py | 48 +- elasticsearch/_sync/client/cat.py | 337 ++-- elasticsearch/_sync/client/ccr.py | 166 +- elasticsearch/_sync/client/cluster.py | 292 ++-- elasticsearch/_sync/client/connector.py | 268 ++-- .../_sync/client/dangling_indices.py | 32 +- elasticsearch/_sync/client/enrich.py | 31 +- elasticsearch/_sync/client/eql.py | 31 +- elasticsearch/_sync/client/esql.py | 43 +- elasticsearch/_sync/client/features.py | 50 +- elasticsearch/_sync/client/fleet.py | 24 +- elasticsearch/_sync/client/graph.py | 17 +- elasticsearch/_sync/client/ilm.py | 140 +- elasticsearch/_sync/client/indices.py | 1236 ++++++++------- elasticsearch/_sync/client/inference.py | 60 +- elasticsearch/_sync/client/ingest.py | 83 +- elasticsearch/_sync/client/license.py | 79 +- elasticsearch/_sync/client/logstash.py | 23 +- elasticsearch/_sync/client/migration.py | 37 +- elasticsearch/_sync/client/ml.py | 816 ++++++---- elasticsearch/_sync/client/monitoring.py | 7 +- elasticsearch/_sync/client/nodes.py | 70 +- elasticsearch/_sync/client/query_rules.py | 76 +- elasticsearch/_sync/client/rollup.py | 184 ++- .../_sync/client/search_application.py | 70 +- .../_sync/client/searchable_snapshots.py | 29 +- elasticsearch/_sync/client/security.py | 1065 +++++++------ elasticsearch/_sync/client/shutdown.py | 57 +- elasticsearch/_sync/client/simulate.py | 38 +- elasticsearch/_sync/client/slm.py | 85 +- elasticsearch/_sync/client/snapshot.py | 365 +++-- elasticsearch/_sync/client/sql.py | 53 +- elasticsearch/_sync/client/ssl.py | 33 +- elasticsearch/_sync/client/synonyms.py | 79 +- elasticsearch/_sync/client/tasks.py | 113 +- elasticsearch/_sync/client/text_structure.py | 113 +- elasticsearch/_sync/client/transform.py | 187 ++- elasticsearch/_sync/client/watcher.py | 150 +- elasticsearch/_sync/client/xpack.py | 23 +- 82 files changed, 9310 insertions(+), 6724 deletions(-) diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index 7920715f4..1c966b828 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -646,83 +646,89 @@ async def bulk( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Bulk index or delete documents. Perform multiple `index`, `create`, `delete`, - and `update` actions in a single request. This reduces overhead and can greatly - increase indexing speed. If the Elasticsearch security features are enabled, - you must have the following index privileges for the target data stream, index, - or index alias: * To use the `create` action, you must have the `create_doc`, - `create`, `index`, or `write` index privilege. Data streams support only the - `create` action. * To use the `index` action, you must have the `create`, `index`, - or `write` index privilege. * To use the `delete` action, you must have the `delete` - or `write` index privilege. * To use the `update` action, you must have the `index` - or `write` index privilege. * To automatically create a data stream or index - with a bulk API request, you must have the `auto_configure`, `create_index`, - or `manage` index privilege. * To make the result of a bulk operation visible - to search using the `refresh` parameter, you must have the `maintenance` or `manage` - index privilege. Automatic data stream creation requires a matching index template - with data stream enabled. The actions are specified in the request body using - a newline delimited JSON (NDJSON) structure: ``` action_and_meta_data\\n optional_source\\n - action_and_meta_data\\n optional_source\\n .... action_and_meta_data\\n optional_source\\n - ``` The `index` and `create` actions expect a source on the next line and have - the same semantics as the `op_type` parameter in the standard index API. A `create` - action fails if a document with the same ID already exists in the target An `index` - action adds or replaces a document as necessary. NOTE: Data streams support only - the `create` action. To update or delete a document in a data stream, you must - target the backing index containing the document. An `update` action expects - that the partial doc, upsert, and script and its options are specified on the - next line. A `delete` action does not expect a source on the next line and has - the same semantics as the standard delete API. NOTE: The final line of data must - end with a newline character (`\\n`). Each newline character may be preceded - by a carriage return (`\\r`). When sending NDJSON data to the `_bulk` endpoint, - use a `Content-Type` header of `application/json` or `application/x-ndjson`. - Because this format uses literal newline characters (`\\n`) as delimiters, make - sure that the JSON actions and sources are not pretty printed. If you provide - a target in the request path, it is used for any actions that don't explicitly - specify an `_index` argument. A note on the format: the idea here is to make - processing as fast as possible. As some of the actions are redirected to other - shards on other nodes, only `action_meta_data` is parsed on the receiving node - side. Client libraries using this protocol should try and strive to do something - similar on the client side, and reduce buffering as much as possible. There is - no "correct" number of actions to perform in a single bulk request. Experiment - with different settings to find the optimal size for your particular workload. - Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by - default so clients must ensure that no request exceeds this size. It is not possible - to index a single document that exceeds the size limit, so you must pre-process - any such documents into smaller pieces before sending them to Elasticsearch. - For instance, split documents into pages or chapters before indexing them, or - store raw binary data in a system outside Elasticsearch and replace the raw data - with a link to the external system in the documents that you send to Elasticsearch. - **Client suppport for bulk requests** Some of the officially supported clients - provide helpers to assist with bulk requests and reindexing: * Go: Check out - `esutil.BulkIndexer` * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` - and `Search::Elasticsearch::Client::5_0::Scroll` * Python: Check out `elasticsearch.helpers.*` - * JavaScript: Check out `client.helpers.*` * .NET: Check out `BulkAllObservable` - * PHP: Check out bulk indexing. **Submitting bulk requests with cURL** If you're - providing text file input to `curl`, you must use the `--data-binary` flag instead - of plain `-d`. The latter doesn't preserve newlines. For example: ``` $ cat requests - { "index" : { "_index" : "test", "_id" : "1" } } { "field1" : "value1" } $ curl - -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary - "@requests"; echo {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} - ``` **Optimistic concurrency control** Each `index` and `delete` action within - a bulk API call may include the `if_seq_no` and `if_primary_term` parameters - in their respective action and meta data lines. The `if_seq_no` and `if_primary_term` - parameters control how operations are run, based on the last modification to - existing documents. See Optimistic concurrency control for more details. **Versioning** - Each bulk item can include the version value using the `version` field. It automatically - follows the behavior of the index or delete operation based on the `_version` - mapping. It also support the `version_type`. **Routing** Each bulk item can include - the routing value using the `routing` field. It automatically follows the behavior - of the index or delete operation based on the `_routing` mapping. NOTE: Data - streams do not support custom routing unless they were created with the `allow_custom_routing` - setting enabled in the template. **Wait for active shards** When making bulk - calls, you can set the `wait_for_active_shards` parameter to require a minimum - number of shard copies to be active before starting to process the bulk request. - **Refresh** Control when the changes made by this request are visible to search. - NOTE: Only the shards that receive the bulk request will be affected by refresh. - Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen - to be routed to different shards in an index with five shards. The request will - only wait for those three shards to refresh. The other two shards that make up - the index do not participate in the `_bulk` request at all. + .. raw:: html + +

Bulk index or delete documents. + Perform multiple index, create, delete, and update actions in a single request. + This reduces overhead and can greatly increase indexing speed.

+

If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:

+
    +
  • To use the create action, you must have the create_doc, create, index, or write index privilege. Data streams support only the create action.
  • +
  • To use the index action, you must have the create, index, or write index privilege.
  • +
  • To use the delete action, you must have the delete or write index privilege.
  • +
  • To use the update action, you must have the index or write index privilege.
  • +
  • To automatically create a data stream or index with a bulk API request, you must have the auto_configure, create_index, or manage index privilege.
  • +
  • To make the result of a bulk operation visible to search using the refresh parameter, you must have the maintenance or manage index privilege.
  • +
+

Automatic data stream creation requires a matching index template with data stream enabled.

+

The actions are specified in the request body using a newline delimited JSON (NDJSON) structure:

+
action_and_meta_data\\n
+          optional_source\\n
+          action_and_meta_data\\n
+          optional_source\\n
+          ....
+          action_and_meta_data\\n
+          optional_source\\n
+          
+

The index and create actions expect a source on the next line and have the same semantics as the op_type parameter in the standard index API. + A create action fails if a document with the same ID already exists in the target + An index action adds or replaces a document as necessary.

+

NOTE: Data streams support only the create action. + To update or delete a document in a data stream, you must target the backing index containing the document.

+

An update action expects that the partial doc, upsert, and script and its options are specified on the next line.

+

A delete action does not expect a source on the next line and has the same semantics as the standard delete API.

+

NOTE: The final line of data must end with a newline character (\\n). + Each newline character may be preceded by a carriage return (\\r). + When sending NDJSON data to the _bulk endpoint, use a Content-Type header of application/json or application/x-ndjson. + Because this format uses literal newline characters (\\n) as delimiters, make sure that the JSON actions and sources are not pretty printed.

+

If you provide a target in the request path, it is used for any actions that don't explicitly specify an _index argument.

+

A note on the format: the idea here is to make processing as fast as possible. + As some of the actions are redirected to other shards on other nodes, only action_meta_data is parsed on the receiving node side.

+

Client libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible.

+

There is no "correct" number of actions to perform in a single bulk request. + Experiment with different settings to find the optimal size for your particular workload. + Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size. + It is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch. + For instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch.

+

Client suppport for bulk requests

+

Some of the officially supported clients provide helpers to assist with bulk requests and reindexing:

+
    +
  • Go: Check out esutil.BulkIndexer
  • +
  • Perl: Check out Search::Elasticsearch::Client::5_0::Bulk and Search::Elasticsearch::Client::5_0::Scroll
  • +
  • Python: Check out elasticsearch.helpers.*
  • +
  • JavaScript: Check out client.helpers.*
  • +
  • .NET: Check out BulkAllObservable
  • +
  • PHP: Check out bulk indexing.
  • +
+

Submitting bulk requests with cURL

+

If you're providing text file input to curl, you must use the --data-binary flag instead of plain -d. + The latter doesn't preserve newlines. For example:

+
$ cat requests
+          { "index" : { "_index" : "test", "_id" : "1" } }
+          { "field1" : "value1" }
+          $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo
+          {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]}
+          
+

Optimistic concurrency control

+

Each index and delete action within a bulk API call may include the if_seq_no and if_primary_term parameters in their respective action and meta data lines. + The if_seq_no and if_primary_term parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details.

+

Versioning

+

Each bulk item can include the version value using the version field. + It automatically follows the behavior of the index or delete operation based on the _version mapping. + It also support the version_type.

+

Routing

+

Each bulk item can include the routing value using the routing field. + It automatically follows the behavior of the index or delete operation based on the _routing mapping.

+

NOTE: Data streams do not support custom routing unless they were created with the allow_custom_routing setting enabled in the template.

+

Wait for active shards

+

When making bulk calls, you can set the wait_for_active_shards parameter to require a minimum number of shard copies to be active before starting to process the bulk request.

+

Refresh

+

Control when the changes made by this request are visible to search.

+

NOTE: Only the shards that receive the bulk request will be affected by refresh. + Imagine a _bulk?refresh=wait_for request with three documents in it that happen to be routed to different shards in an index with five shards. + The request will only wait for those three shards to refresh. + The other two shards that make up the index do not participate in the _bulk request at all.

+ ``_ @@ -839,8 +845,11 @@ async def clear_scroll( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear a scrolling search. Clear the search context and results for a scrolling - search. + .. raw:: html + +

Clear a scrolling search. + Clear the search context and results for a scrolling search.

+ ``_ @@ -890,11 +899,14 @@ async def close_point_in_time( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Close a point in time. A point in time must be opened explicitly before being - used in search requests. The `keep_alive` parameter tells Elasticsearch how long - it should persist. A point in time is automatically closed when the `keep_alive` - period has elapsed. However, keeping points in time has a cost; close them as - soon as they are no longer required for search requests. + .. raw:: html + +

Close a point in time. + A point in time must be opened explicitly before being used in search requests. + The keep_alive parameter tells Elasticsearch how long it should persist. + A point in time is automatically closed when the keep_alive period has elapsed. + However, keeping points in time has a cost; close them as soon as they are no longer required for search requests.

+ ``_ @@ -968,14 +980,17 @@ async def count( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Count search results. Get the number of documents matching a query. The query - can either be provided using a simple query string as a parameter or using the - Query DSL defined within the request body. The latter must be nested in a `query` - key, which is the same as the search API. The count API supports multi-target - syntax. You can run a single count API search across multiple data streams and - indices. The operation is broadcast across all shards. For each shard ID group, - a replica is chosen and the search is run against it. This means that replicas - increase the scalability of the count. + .. raw:: html + +

Count search results. + Get the number of documents matching a query.

+

The query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body. + The latter must be nested in a query key, which is the same as the search API.

+

The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.

+

The operation is broadcast across all shards. + For each shard ID group, a replica is chosen and the search is run against it. + This means that replicas increase the scalability of the count.

+ ``_ @@ -1117,80 +1132,61 @@ async def create( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a new document in the index. You can index a new JSON document with the - `//_doc/` or `//_create/<_id>` APIs Using `_create` guarantees - that the document is indexed only if it does not already exist. It returns a - 409 response when a document with a same ID already exists in the index. To update - an existing document, you must use the `//_doc/` API. If the Elasticsearch - security features are enabled, you must have the following index privileges for - the target data stream, index, or index alias: * To add a document using the - `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, - you must have the `create_doc`, `create`, `index`, or `write` index privilege. - * To automatically create a data stream or index with this API request, you must - have the `auto_configure`, `create_index`, or `manage` index privilege. Automatic - data stream creation requires a matching index template with data stream enabled. - **Automatically create data streams and indices** If the request's target doesn't - exist and matches an index template with a `data_stream` definition, the index - operation automatically creates the data stream. If the target doesn't exist - and doesn't match a data stream template, the operation automatically creates - the index and applies any matching index templates. NOTE: Elasticsearch includes - several built-in index templates. To avoid naming collisions with these templates, - refer to index pattern documentation. If no mapping exists, the index operation - creates a dynamic mapping. By default, new fields and objects are automatically - added to the mapping if needed. Automatic index creation is controlled by the - `action.auto_create_index` setting. If it is `true`, any index can be created - automatically. You can modify this setting to explicitly allow or block automatic - creation of indices that match specified patterns or set it to `false` to turn - off automatic index creation entirely. Specify a comma-separated list of patterns - you want to allow or prefix each pattern with `+` or `-` to indicate whether - it should be allowed or blocked. When a list is specified, the default behaviour - is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic - creation of indices only. It does not affect the creation of data streams. **Routing** - By default, shard placement — or routing — is controlled by using a hash of the - document's ID value. For more explicit control, the value fed into the hash function - used by the router can be directly specified on a per-operation basis using the - `routing` parameter. When setting up explicit mapping, you can also use the `_routing` - field to direct the index operation to extract the routing value from the document - itself. This does come at the (very minimal) cost of an additional document parsing - pass. If the `_routing` mapping is defined and set to be required, the index - operation will fail if no routing value is provided or extracted. NOTE: Data - streams do not support custom routing unless they were created with the `allow_custom_routing` - setting enabled in the template. **Distributed** The index operation is directed - to the primary shard based on its route and performed on the actual node containing - this shard. After the primary shard completes the operation, if needed, the update - is distributed to applicable replicas. **Active shards** To improve the resiliency - of writes to the system, indexing operations can be configured to wait for a - certain number of active shard copies before proceeding with the operation. If - the requisite number of active shard copies are not available, then the write - operation must wait and retry, until either the requisite shard copies have started - or a timeout occurs. By default, write operations only wait for the primary shards - to be active before proceeding (that is to say `wait_for_active_shards` is `1`). - This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. - To alter this behavior per operation, use the `wait_for_active_shards request` - parameter. Valid values are all or any positive integer up to the total number - of configured copies per shard in the index (which is `number_of_replicas`+1). - Specifying a negative value or a number greater than the number of shard copies - will throw an error. For example, suppose you have a cluster of three nodes, - A, B, and C and you create an index index with the number of replicas set to - 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt - an indexing operation, by default the operation will only ensure the primary - copy of each shard is available before proceeding. This means that even if B - and C went down and A hosted the primary shard copies, the indexing operation - would still proceed with only one copy of the data. If `wait_for_active_shards` - is set on the request to `3` (and all three nodes are up), the indexing operation - will require 3 active shard copies before proceeding. This requirement should - be met because there are 3 active nodes in the cluster, each one holding a copy - of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, - which is the same in this situation), the indexing operation will not proceed - as you do not have all 4 copies of each shard active in the index. The operation - will timeout unless a new node is brought up in the cluster to host the fourth - copy of the shard. It is important to note that this setting greatly reduces - the chances of the write operation not writing to the requisite number of shard - copies, but it does not completely eliminate the possibility, because this check - occurs before the write operation starts. After the write operation is underway, - it is still possible for replication to fail on any number of shard copies but - still succeed on the primary. The `_shards` section of the API response reveals - the number of shard copies on which replication succeeded and failed. + .. raw:: html + +

Create a new document in the index.

+

You can index a new JSON document with the /<target>/_doc/ or /<target>/_create/<_id> APIs + Using _create guarantees that the document is indexed only if it does not already exist. + It returns a 409 response when a document with a same ID already exists in the index. + To update an existing document, you must use the /<target>/_doc/ API.

+

If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:

+
    +
  • To add a document using the PUT /<target>/_create/<_id> or POST /<target>/_create/<_id> request formats, you must have the create_doc, create, index, or write index privilege.
  • +
  • To automatically create a data stream or index with this API request, you must have the auto_configure, create_index, or manage index privilege.
  • +
+

Automatic data stream creation requires a matching index template with data stream enabled.

+

Automatically create data streams and indices

+

If the request's target doesn't exist and matches an index template with a data_stream definition, the index operation automatically creates the data stream.

+

If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.

+

NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.

+

If no mapping exists, the index operation creates a dynamic mapping. + By default, new fields and objects are automatically added to the mapping if needed.

+

Automatic index creation is controlled by the action.auto_create_index setting. + If it is true, any index can be created automatically. + You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to false to turn off automatic index creation entirely. + Specify a comma-separated list of patterns you want to allow or prefix each pattern with + or - to indicate whether it should be allowed or blocked. + When a list is specified, the default behaviour is to disallow.

+

NOTE: The action.auto_create_index setting affects the automatic creation of indices only. + It does not affect the creation of data streams.

+

Routing

+

By default, shard placement — or routing — is controlled by using a hash of the document's ID value. + For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the routing parameter.

+

When setting up explicit mapping, you can also use the _routing field to direct the index operation to extract the routing value from the document itself. + This does come at the (very minimal) cost of an additional document parsing pass. + If the _routing mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.

+

NOTE: Data streams do not support custom routing unless they were created with the allow_custom_routing setting enabled in the template.

+

Distributed

+

The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. + After the primary shard completes the operation, if needed, the update is distributed to applicable replicas.

+

Active shards

+

To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. + If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. + By default, write operations only wait for the primary shards to be active before proceeding (that is to say wait_for_active_shards is 1). + This default can be overridden in the index settings dynamically by setting index.write.wait_for_active_shards. + To alter this behavior per operation, use the wait_for_active_shards request parameter.

+

Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is number_of_replicas+1). + Specifying a negative value or a number greater than the number of shard copies will throw an error.

+

For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). + If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. + This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. + If wait_for_active_shards is set on the request to 3 (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. + This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. + However, if you set wait_for_active_shards to all (or to 4, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. + The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.

+

It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. + After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. + The _shards section of the API response reveals the number of shard copies on which replication succeeded and failed.

+ ``_ @@ -1304,30 +1300,33 @@ async def delete( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a document. Remove a JSON document from the specified index. NOTE: You - cannot send deletion requests directly to a data stream. To delete a document - in a data stream, you must target the backing index containing the document. - **Optimistic concurrency control** Delete operations can be made conditional - and only be performed if the last modification to the document was assigned the - sequence number and primary term specified by the `if_seq_no` and `if_primary_term` - parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` - and a status code of `409`. **Versioning** Each document indexed is versioned. - When deleting a document, the version can be specified to make sure the relevant - document you are trying to delete is actually being deleted and it has not changed - in the meantime. Every write operation run on a document, deletes included, causes - its version to be incremented. The version number of a deleted document remains - available for a short time after deletion to allow for control of concurrent - operations. The length of time for which a deleted document's version remains - available is determined by the `index.gc_deletes` index setting. **Routing** - If routing is used during indexing, the routing value also needs to be specified - to delete a document. If the `_routing` mapping is set to `required` and no routing - value is specified, the delete API throws a `RoutingMissingException` and rejects - the request. For example: ``` DELETE /my-index-000001/_doc/1?routing=shard-1 - ``` This request deletes the document with ID 1, but it is routed based on the - user. The document is not deleted if the correct routing is not specified. **Distributed** - The delete operation gets hashed into a specific shard ID. It then gets redirected - into the primary shard within that ID group and replicated (if needed) to shard - replicas within that ID group. + .. raw:: html + +

Delete a document.

+

Remove a JSON document from the specified index.

+

NOTE: You cannot send deletion requests directly to a data stream. + To delete a document in a data stream, you must target the backing index containing the document.

+

Optimistic concurrency control

+

Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the if_seq_no and if_primary_term parameters. + If a mismatch is detected, the operation will result in a VersionConflictException and a status code of 409.

+

Versioning

+

Each document indexed is versioned. + When deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime. + Every write operation run on a document, deletes included, causes its version to be incremented. + The version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations. + The length of time for which a deleted document's version remains available is determined by the index.gc_deletes index setting.

+

Routing

+

If routing is used during indexing, the routing value also needs to be specified to delete a document.

+

If the _routing mapping is set to required and no routing value is specified, the delete API throws a RoutingMissingException and rejects the request.

+

For example:

+
DELETE /my-index-000001/_doc/1?routing=shard-1
+          
+

This request deletes the document with ID 1, but it is routed based on the user. + The document is not deleted if the correct routing is not specified.

+

Distributed

+

The delete operation gets hashed into a specific shard ID. + It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group.

+ ``_ @@ -1454,7 +1453,11 @@ async def delete_by_query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete documents. Deletes documents that match the specified query. + .. raw:: html + +

Delete documents. + Deletes documents that match the specified query.

+ ``_ @@ -1632,10 +1635,12 @@ async def delete_by_query_rethrottle( requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ - Throttle a delete by query operation. Change the number of requests per second - for a particular delete by query operation. Rethrottling that speeds up the query - takes effect immediately but rethrotting that slows down the query takes effect - after completing the current batch to prevent scroll timeouts. + .. raw:: html + +

Throttle a delete by query operation.

+

Change the number of requests per second for a particular delete by query operation. + Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts.

+ ``_ @@ -1681,7 +1686,11 @@ async def delete_script( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a script or search template. Deletes a stored script or search template. + .. raw:: html + +

Delete a script or search template. + Deletes a stored script or search template.

+ ``_ @@ -1749,15 +1758,21 @@ async def exists( ] = None, ) -> HeadApiResponse: """ - Check a document. Verify that a document exists. For example, check to see if - a document with the `_id` 0 exists: ``` HEAD my-index-000001/_doc/0 ``` If the - document exists, the API returns a status code of `200 - OK`. If the document - doesn’t exist, the API returns `404 - Not Found`. **Versioning support** You - can use the `version` parameter to check the document only if its current version - is equal to the specified one. Internally, Elasticsearch has marked the old document - as deleted and added an entirely new document. The old version of the document - doesn't disappear immediately, although you won't be able to access it. Elasticsearch - cleans up deleted documents in the background as you continue to index more data. + .. raw:: html + +

Check a document.

+

Verify that a document exists. + For example, check to see if a document with the _id 0 exists:

+
HEAD my-index-000001/_doc/0
+          
+

If the document exists, the API returns a status code of 200 - OK. + If the document doesn’t exist, the API returns 404 - Not Found.

+

Versioning support

+

You can use the version parameter to check the document only if its current version is equal to the specified one.

+

Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. + The old version of the document doesn't disappear immediately, although you won't be able to access it. + Elasticsearch cleans up deleted documents in the background as you continue to index more data.

+ ``_ @@ -1872,9 +1887,15 @@ async def exists_source( ] = None, ) -> HeadApiResponse: """ - Check for a document source. Check whether a document source exists in an index. - For example: ``` HEAD my-index-000001/_source/1 ``` A document's source is not - available if it is disabled in the mapping. + .. raw:: html + +

Check for a document source.

+

Check whether a document source exists in an index. + For example:

+
HEAD my-index-000001/_source/1
+          
+

A document's source is not available if it is disabled in the mapping.

+ ``_ @@ -1975,8 +1996,11 @@ async def explain( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Explain a document match result. Returns information about why a specific document - matches, or doesn’t match, a query. + .. raw:: html + +

Explain a document match result. + Returns information about why a specific document matches, or doesn’t match, a query.

+ ``_ @@ -2095,11 +2119,14 @@ async def field_caps( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the field capabilities. Get information about the capabilities of fields - among multiple indices. For data streams, the API returns field capabilities - among the stream’s backing indices. It returns runtime fields like any other - field. For example, a runtime field with a type of keyword is returned the same - as any other field that belongs to the `keyword` family. + .. raw:: html + +

Get the field capabilities.

+

Get information about the capabilities of fields among multiple indices.

+

For data streams, the API returns field capabilities among the stream’s backing indices. + It returns runtime fields like any other field. + For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the keyword family.

+ ``_ @@ -2215,36 +2242,45 @@ async def get( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a document by its ID. Get a document and its source or stored fields from - an index. By default, this API is realtime and is not affected by the refresh - rate of the index (when data will become visible for search). In the case where - stored fields are requested with the `stored_fields` parameter and the document - has been updated but is not yet refreshed, the API will have to parse and analyze - the source to extract the stored fields. To turn off realtime behavior, set the - `realtime` parameter to false. **Source filtering** By default, the API returns - the contents of the `_source` field unless you have used the `stored_fields` - parameter or the `_source` field is turned off. You can turn off `_source` retrieval - by using the `_source` parameter: ``` GET my-index-000001/_doc/0?_source=false - ``` If you only need one or two fields from the `_source`, use the `_source_includes` - or `_source_excludes` parameters to include or filter out particular fields. - This can be helpful with large documents where partial retrieval can save on - network overhead Both parameters take a comma separated list of fields or wildcard - expressions. For example: ``` GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities - ``` If you only want to specify includes, you can use a shorter notation: ``` - GET my-index-000001/_doc/0?_source=*.id ``` **Routing** If routing is used during - indexing, the routing value also needs to be specified to retrieve a document. - For example: ``` GET my-index-000001/_doc/2?routing=user1 ``` This request gets - the document with ID 2, but it is routed based on the user. The document is not - fetched if the correct routing is not specified. **Distributed** The GET operation - is hashed into a specific shard ID. It is then redirected to one of the replicas - within that shard ID and returns the result. The replicas are the primary shard - and its replicas within that shard ID group. This means that the more replicas - you have, the better your GET scaling will be. **Versioning support** You can - use the `version` parameter to retrieve the document only if its current version - is equal to the specified one. Internally, Elasticsearch has marked the old document - as deleted and added an entirely new document. The old version of the document - doesn't disappear immediately, although you won't be able to access it. Elasticsearch - cleans up deleted documents in the background as you continue to index more data. + .. raw:: html + +

Get a document by its ID.

+

Get a document and its source or stored fields from an index.

+

By default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search). + In the case where stored fields are requested with the stored_fields parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields. + To turn off realtime behavior, set the realtime parameter to false.

+

Source filtering

+

By default, the API returns the contents of the _source field unless you have used the stored_fields parameter or the _source field is turned off. + You can turn off _source retrieval by using the _source parameter:

+
GET my-index-000001/_doc/0?_source=false
+          
+

If you only need one or two fields from the _source, use the _source_includes or _source_excludes parameters to include or filter out particular fields. + This can be helpful with large documents where partial retrieval can save on network overhead + Both parameters take a comma separated list of fields or wildcard expressions. + For example:

+
GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities
+          
+

If you only want to specify includes, you can use a shorter notation:

+
GET my-index-000001/_doc/0?_source=*.id
+          
+

Routing

+

If routing is used during indexing, the routing value also needs to be specified to retrieve a document. + For example:

+
GET my-index-000001/_doc/2?routing=user1
+          
+

This request gets the document with ID 2, but it is routed based on the user. + The document is not fetched if the correct routing is not specified.

+

Distributed

+

The GET operation is hashed into a specific shard ID. + It is then redirected to one of the replicas within that shard ID and returns the result. + The replicas are the primary shard and its replicas within that shard ID group. + This means that the more replicas you have, the better your GET scaling will be.

+

Versioning support

+

You can use the version parameter to retrieve the document only if its current version is equal to the specified one.

+

Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. + The old version of the document doesn't disappear immediately, although you won't be able to access it. + Elasticsearch cleans up deleted documents in the background as you continue to index more data.

+ ``_ @@ -2347,7 +2383,11 @@ async def get_script( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a script or search template. Retrieves a stored script or search template. + .. raw:: html + +

Get a script or search template. + Retrieves a stored script or search template.

+ ``_ @@ -2389,7 +2429,11 @@ async def get_script_context( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get script contexts. Get a list of supported script contexts and their methods. + .. raw:: html + +

Get script contexts.

+

Get a list of supported script contexts and their methods.

+ ``_ """ @@ -2424,7 +2468,11 @@ async def get_script_languages( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get script languages. Get a list of available script types, languages, and contexts. + .. raw:: html + +

Get script languages.

+

Get a list of available script types, languages, and contexts.

+ ``_ """ @@ -2479,10 +2527,17 @@ async def get_source( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a document's source. Get the source of a document. For example: ``` GET my-index-000001/_source/1 - ``` You can use the source filtering parameters to control which parts of the - `_source` are returned: ``` GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities - ``` + .. raw:: html + +

Get a document's source.

+

Get the source of a document. + For example:

+
GET my-index-000001/_source/1
+          
+

You can use the source filtering parameters to control which parts of the _source are returned:

+
GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities
+          
+ ``_ @@ -2567,26 +2622,22 @@ async def health_report( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the cluster health. Get a report with the health status of an Elasticsearch - cluster. The report contains a list of indicators that compose Elasticsearch - functionality. Each indicator has a health status of: green, unknown, yellow - or red. The indicator will provide an explanation and metadata describing the - reason for its current health status. The cluster’s status is controlled by the - worst indicator status. In the event that an indicator’s status is non-green, - a list of impacts may be present in the indicator result which detail the functionalities - that are negatively affected by the health issue. Each impact carries with it - a severity level, an area of the system that is affected, and a simple description - of the impact on the system. Some health indicators can determine the root cause - of a health problem and prescribe a set of steps that can be performed in order - to improve the health of the system. The root cause and remediation steps are - encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause - analysis, an action containing a brief description of the steps to take to fix - the problem, the list of affected resources (if applicable), and a detailed step-by-step - troubleshooting guide to fix the diagnosed problem. NOTE: The health indicators - perform root cause analysis of non-green health statuses. This can be computationally - expensive when called frequently. When setting up automated polling of the API - for health status, set verbose to false to disable the more expensive analysis - logic. + .. raw:: html + +

Get the cluster health. + Get a report with the health status of an Elasticsearch cluster. + The report contains a list of indicators that compose Elasticsearch functionality.

+

Each indicator has a health status of: green, unknown, yellow or red. + The indicator will provide an explanation and metadata describing the reason for its current health status.

+

The cluster’s status is controlled by the worst indicator status.

+

In the event that an indicator’s status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue. + Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system.

+

Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system. + The root cause and remediation steps are encapsulated in a diagnosis. + A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed step-by-step troubleshooting guide to fix the diagnosed problem.

+

NOTE: The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. + When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic.

+ ``_ @@ -2661,120 +2712,96 @@ async def index( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a document in an index. Add a JSON document to the specified - data stream or index and make it searchable. If the target is an index and the - document already exists, the request updates the document and increments its - version. NOTE: You cannot use this API to send update requests for existing documents - in a data stream. If the Elasticsearch security features are enabled, you must - have the following index privileges for the target data stream, index, or index - alias: * To add or overwrite a document using the `PUT //_doc/<_id>` - request format, you must have the `create`, `index`, or `write` index privilege. - * To add a document using the `POST //_doc/` request format, you must - have the `create_doc`, `create`, `index`, or `write` index privilege. * To automatically - create a data stream or index with this API request, you must have the `auto_configure`, - `create_index`, or `manage` index privilege. Automatic data stream creation requires - a matching index template with data stream enabled. NOTE: Replica shards might - not all be started when an indexing operation returns successfully. By default, - only the primary is required. Set `wait_for_active_shards` to change this default - behavior. **Automatically create data streams and indices** If the request's - target doesn't exist and matches an index template with a `data_stream` definition, - the index operation automatically creates the data stream. If the target doesn't - exist and doesn't match a data stream template, the operation automatically creates - the index and applies any matching index templates. NOTE: Elasticsearch includes - several built-in index templates. To avoid naming collisions with these templates, - refer to index pattern documentation. If no mapping exists, the index operation - creates a dynamic mapping. By default, new fields and objects are automatically - added to the mapping if needed. Automatic index creation is controlled by the - `action.auto_create_index` setting. If it is `true`, any index can be created - automatically. You can modify this setting to explicitly allow or block automatic - creation of indices that match specified patterns or set it to `false` to turn - off automatic index creation entirely. Specify a comma-separated list of patterns - you want to allow or prefix each pattern with `+` or `-` to indicate whether - it should be allowed or blocked. When a list is specified, the default behaviour - is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic - creation of indices only. It does not affect the creation of data streams. **Optimistic - concurrency control** Index operations can be made conditional and only be performed - if the last modification to the document was assigned the sequence number and - primary term specified by the `if_seq_no` and `if_primary_term` parameters. If - a mismatch is detected, the operation will result in a `VersionConflictException` - and a status code of `409`. **Routing** By default, shard placement — or routing - — is controlled by using a hash of the document's ID value. For more explicit - control, the value fed into the hash function used by the router can be directly - specified on a per-operation basis using the `routing` parameter. When setting - up explicit mapping, you can also use the `_routing` field to direct the index - operation to extract the routing value from the document itself. This does come - at the (very minimal) cost of an additional document parsing pass. If the `_routing` - mapping is defined and set to be required, the index operation will fail if no - routing value is provided or extracted. NOTE: Data streams do not support custom - routing unless they were created with the `allow_custom_routing` setting enabled - in the template. **Distributed** The index operation is directed to the primary - shard based on its route and performed on the actual node containing this shard. - After the primary shard completes the operation, if needed, the update is distributed - to applicable replicas. **Active shards** To improve the resiliency of writes - to the system, indexing operations can be configured to wait for a certain number - of active shard copies before proceeding with the operation. If the requisite - number of active shard copies are not available, then the write operation must - wait and retry, until either the requisite shard copies have started or a timeout - occurs. By default, write operations only wait for the primary shards to be active - before proceeding (that is to say `wait_for_active_shards` is `1`). This default - can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. - To alter this behavior per operation, use the `wait_for_active_shards request` - parameter. Valid values are all or any positive integer up to the total number - of configured copies per shard in the index (which is `number_of_replicas`+1). - Specifying a negative value or a number greater than the number of shard copies - will throw an error. For example, suppose you have a cluster of three nodes, - A, B, and C and you create an index index with the number of replicas set to - 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt - an indexing operation, by default the operation will only ensure the primary - copy of each shard is available before proceeding. This means that even if B - and C went down and A hosted the primary shard copies, the indexing operation - would still proceed with only one copy of the data. If `wait_for_active_shards` - is set on the request to `3` (and all three nodes are up), the indexing operation - will require 3 active shard copies before proceeding. This requirement should - be met because there are 3 active nodes in the cluster, each one holding a copy - of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, - which is the same in this situation), the indexing operation will not proceed - as you do not have all 4 copies of each shard active in the index. The operation - will timeout unless a new node is brought up in the cluster to host the fourth - copy of the shard. It is important to note that this setting greatly reduces - the chances of the write operation not writing to the requisite number of shard - copies, but it does not completely eliminate the possibility, because this check - occurs before the write operation starts. After the write operation is underway, - it is still possible for replication to fail on any number of shard copies but - still succeed on the primary. The `_shards` section of the API response reveals - the number of shard copies on which replication succeeded and failed. **No operation - (noop) updates** When updating a document by using this API, a new version of - the document is always created even if the document hasn't changed. If this isn't - acceptable use the `_update` API with `detect_noop` set to `true`. The `detect_noop` - option isn't available on this API because it doesn’t fetch the old source and - isn't able to compare it against the new source. There isn't a definitive rule - for when noop updates aren't acceptable. It's a combination of lots of factors - like how frequently your data source sends updates that are actually noops and - how many queries per second Elasticsearch runs on the shard receiving the updates. - **Versioning** Each indexed document is given a version number. By default, internal - versioning is used that starts at 1 and increments with each update, deletes - included. Optionally, the version number can be set to an external value (for - example, if maintained in a database). To enable this functionality, `version_type` - should be set to `external`. The value provided must be a numeric, long value - greater than or equal to 0, and less than around `9.2e+18`. NOTE: Versioning - is completely real time, and is not affected by the near real time aspects of - search operations. If no version is provided, the operation runs without any - version checks. When using the external version type, the system checks to see - if the version number passed to the index request is greater than the version - of the currently stored document. If true, the document will be indexed and the - new version number used. If the value provided is less than or equal to the stored - document's version number, a version conflict will occur and the index operation - will fail. For example: ``` PUT my-index-000001/_doc/1?version=2&version_type=external - { "user": { "id": "elkbee" } } In this example, the operation will succeed since - the supplied version of 2 is higher than the current document version of 1. If - the document was already updated and its version was set to 2 or higher, the - indexing command will fail and result in a conflict (409 HTTP status code). A - nice side effect is that there is no need to maintain strict ordering of async - indexing operations run as a result of changes to a source database, as long - as version numbers from the source database are used. Even the simple case of - updating the Elasticsearch index using data from a database is simplified if - external versioning is used, as only the latest version will be used if the index - operations arrive out of order. + .. raw:: html + +

Create or update a document in an index.

+

Add a JSON document to the specified data stream or index and make it searchable. + If the target is an index and the document already exists, the request updates the document and increments its version.

+

NOTE: You cannot use this API to send update requests for existing documents in a data stream.

+

If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:

+
    +
  • To add or overwrite a document using the PUT /<target>/_doc/<_id> request format, you must have the create, index, or write index privilege.
  • +
  • To add a document using the POST /<target>/_doc/ request format, you must have the create_doc, create, index, or write index privilege.
  • +
  • To automatically create a data stream or index with this API request, you must have the auto_configure, create_index, or manage index privilege.
  • +
+

Automatic data stream creation requires a matching index template with data stream enabled.

+

NOTE: Replica shards might not all be started when an indexing operation returns successfully. + By default, only the primary is required. Set wait_for_active_shards to change this default behavior.

+

Automatically create data streams and indices

+

If the request's target doesn't exist and matches an index template with a data_stream definition, the index operation automatically creates the data stream.

+

If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.

+

NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.

+

If no mapping exists, the index operation creates a dynamic mapping. + By default, new fields and objects are automatically added to the mapping if needed.

+

Automatic index creation is controlled by the action.auto_create_index setting. + If it is true, any index can be created automatically. + You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to false to turn off automatic index creation entirely. + Specify a comma-separated list of patterns you want to allow or prefix each pattern with + or - to indicate whether it should be allowed or blocked. + When a list is specified, the default behaviour is to disallow.

+

NOTE: The action.auto_create_index setting affects the automatic creation of indices only. + It does not affect the creation of data streams.

+

Optimistic concurrency control

+

Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the if_seq_no and if_primary_term parameters. + If a mismatch is detected, the operation will result in a VersionConflictException and a status code of 409.

+

Routing

+

By default, shard placement — or routing — is controlled by using a hash of the document's ID value. + For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the routing parameter.

+

When setting up explicit mapping, you can also use the _routing field to direct the index operation to extract the routing value from the document itself. + This does come at the (very minimal) cost of an additional document parsing pass. + If the _routing mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.

+

NOTE: Data streams do not support custom routing unless they were created with the allow_custom_routing setting enabled in the template.

+

Distributed

+

The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. + After the primary shard completes the operation, if needed, the update is distributed to applicable replicas.

+

Active shards

+

To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. + If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. + By default, write operations only wait for the primary shards to be active before proceeding (that is to say wait_for_active_shards is 1). + This default can be overridden in the index settings dynamically by setting index.write.wait_for_active_shards. + To alter this behavior per operation, use the wait_for_active_shards request parameter.

+

Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is number_of_replicas+1). + Specifying a negative value or a number greater than the number of shard copies will throw an error.

+

For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). + If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. + This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. + If wait_for_active_shards is set on the request to 3 (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. + This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. + However, if you set wait_for_active_shards to all (or to 4, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. + The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.

+

It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. + After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. + The _shards section of the API response reveals the number of shard copies on which replication succeeded and failed.

+

No operation (noop) updates

+

When updating a document by using this API, a new version of the document is always created even if the document hasn't changed. + If this isn't acceptable use the _update API with detect_noop set to true. + The detect_noop option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.

+

There isn't a definitive rule for when noop updates aren't acceptable. + It's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.

+

Versioning

+

Each indexed document is given a version number. + By default, internal versioning is used that starts at 1 and increments with each update, deletes included. + Optionally, the version number can be set to an external value (for example, if maintained in a database). + To enable this functionality, version_type should be set to external. + The value provided must be a numeric, long value greater than or equal to 0, and less than around 9.2e+18.

+

NOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations. + If no version is provided, the operation runs without any version checks.

+

When using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document. + If true, the document will be indexed and the new version number used. + If the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:

+
PUT my-index-000001/_doc/1?version=2&version_type=external
+          {
+            "user": {
+              "id": "elkbee"
+            }
+          }
+
+          In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.
+          If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).
+
+          A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.
+          Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.
+          
+ ``_ @@ -2898,7 +2925,11 @@ async def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get cluster info. Get basic build, version, and cluster information. + .. raw:: html + +

Get cluster info. + Get basic build, version, and cluster information.

+ ``_ """ @@ -2955,15 +2986,18 @@ async def knn_search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Run a knn search. NOTE: The kNN search API has been replaced by the `knn` option - in the search API. Perform a k-nearest neighbor (kNN) search on a dense_vector - field and return the matching documents. Given a query vector, the API finds - the k closest vectors and returns those documents as search hits. Elasticsearch - uses the HNSW algorithm to support efficient kNN search. Like most kNN algorithms, - HNSW is an approximate method that sacrifices result accuracy for improved search - speed. This means the results returned are not always the true k closest neighbors. - The kNN search API supports restricting the search using a filter. The search - will return the top k documents that also match the filter query. + .. raw:: html + +

Run a knn search.

+

NOTE: The kNN search API has been replaced by the knn option in the search API.

+

Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. + Given a query vector, the API finds the k closest vectors and returns those documents as search hits.

+

Elasticsearch uses the HNSW algorithm to support efficient kNN search. + Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. + This means the results returned are not always the true k closest neighbors.

+

The kNN search API supports restricting the search using a filter. + The search will return the top k documents that also match the filter query.

+ ``_ @@ -3064,10 +3098,13 @@ async def mget( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get multiple documents. Get multiple JSON documents by ID from one or more indices. - If you specify an index in the request URI, you only need to specify the document - IDs in the request body. To ensure fast responses, this multi get (mget) API - responds with partial results if one or more shards fail. + .. raw:: html + +

Get multiple documents.

+

Get multiple JSON documents by ID from one or more indices. + If you specify an index in the request URI, you only need to specify the document IDs in the request body. + To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.

+ ``_ @@ -3188,13 +3225,21 @@ async def msearch( typed_keys: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Run multiple searches. The format of the request is similar to the bulk API format - and makes use of the newline delimited JSON (NDJSON) format. The structure is - as follows: ``` header\\n body\\n header\\n body\\n ``` This structure is specifically - optimized to reduce parsing if a specific search ends up redirected to another - node. IMPORTANT: The final line of data must end with a newline character `\\n`. - Each newline character may be preceded by a carriage return `\\r`. When sending - requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. + .. raw:: html + +

Run multiple searches.

+

The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. + The structure is as follows:

+
header\\n
+          body\\n
+          header\\n
+          body\\n
+          
+

This structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node.

+

IMPORTANT: The final line of data must end with a newline character \\n. + Each newline character may be preceded by a carriage return \\r. + When sending requests to this endpoint the Content-Type header should be set to application/x-ndjson.

+ ``_ @@ -3326,7 +3371,10 @@ async def msearch_template( typed_keys: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Run multiple templated searches. + .. raw:: html + +

Run multiple templated searches.

+ ``_ @@ -3421,11 +3469,14 @@ async def mtermvectors( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get multiple term vectors. You can specify existing documents by index and ID - or provide artificial documents in the body of the request. You can specify the - index in the request body or request URI. The response contains a `docs` array - with all the fetched termvectors. Each element has the structure provided by - the termvectors API. + .. raw:: html + +

Get multiple term vectors.

+

You can specify existing documents by index and ID or provide artificial documents in the body of the request. + You can specify the index in the request body or request URI. + The response contains a docs array with all the fetched termvectors. + Each element has the structure provided by the termvectors API.

+ ``_ @@ -3537,15 +3588,18 @@ async def open_point_in_time( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Open a point in time. A search request by default runs against the most recent - visible data of the target indices, which is called point in time. Elasticsearch - pit (point in time) is a lightweight view into the state of the data as it existed - when initiated. In some cases, it’s preferred to perform multiple search requests - using the same point in time. For example, if refreshes happen between `search_after` - requests, then the results of those requests might not be consistent as changes - happening between searches are only visible to the more recent point in time. - A point in time must be opened explicitly before being used in search requests. - The `keep_alive` parameter tells Elasticsearch how long it should persist. + .. raw:: html + +

Open a point in time.

+

A search request by default runs against the most recent visible data of the target indices, + which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the + state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple + search requests using the same point in time. For example, if refreshes happen between + search_after requests, then the results of those requests might not be consistent as changes happening + between searches are only visible to the more recent point in time.

+

A point in time must be opened explicitly before being used in search requests. + The keep_alive parameter tells Elasticsearch how long it should persist.

+ ``_ @@ -3632,8 +3686,11 @@ async def put_script( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a script or search template. Creates or updates a stored script - or search template. + .. raw:: html + +

Create or update a script or search template. + Creates or updates a stored script or search template.

+ ``_ @@ -3718,8 +3775,11 @@ async def rank_eval( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Evaluate ranked search results. Evaluate the quality of ranked search results - over a set of typical search queries. + .. raw:: html + +

Evaluate ranked search results.

+

Evaluate the quality of ranked search results over a set of typical search queries.

+ ``_ @@ -3813,149 +3873,145 @@ async def reindex( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reindex documents. Copy documents from a source to a destination. You can copy - all documents to the destination index or reindex a subset of the documents. - The source can be any existing index, alias, or data stream. The destination - must differ from the source. For example, you cannot reindex a data stream into - itself. IMPORTANT: Reindex requires `_source` to be enabled for all documents - in the source. The destination should be configured as wanted before calling - the reindex API. Reindex does not copy the settings from the source or its associated - template. Mappings, shard counts, and replicas, for example, must be configured - ahead of time. If the Elasticsearch security features are enabled, you must have - the following security privileges: * The `read` index privilege for the source - data stream, index, or alias. * The `write` index privilege for the destination - data stream, index, or index alias. * To automatically create a data stream or - index with a reindex API request, you must have the `auto_configure`, `create_index`, - or `manage` index privilege for the destination data stream, index, or alias. - * If reindexing from a remote cluster, the `source.remote.user` must have the - `monitor` cluster privilege and the `read` index privilege for the source data - stream, index, or alias. If reindexing from a remote cluster, you must explicitly - allow the remote host in the `reindex.remote.whitelist` setting. Automatic data - stream creation requires a matching index template with data stream enabled. - The `dest` element can be configured like the index API to control optimistic - concurrency control. Omitting `version_type` or setting it to `internal` causes - Elasticsearch to blindly dump documents into the destination, overwriting any - that happen to have the same ID. Setting `version_type` to `external` causes - Elasticsearch to preserve the `version` from the source, create any documents - that are missing, and update any documents that have an older version in the - destination than they do in the source. Setting `op_type` to `create` causes - the reindex API to create only missing documents in the destination. All existing - documents will cause a version conflict. IMPORTANT: Because data streams are - append-only, any reindex request to a destination data stream must have an `op_type` - of `create`. A reindex can only add new documents to a destination data stream. - It cannot update existing documents in a destination data stream. By default, - version conflicts abort the reindex process. To continue reindexing if there - are conflicts, set the `conflicts` request body property to `proceed`. In this - case, the response includes a count of the version conflicts that were encountered. - Note that the handling of other error types is unaffected by the `conflicts` - property. Additionally, if you opt to count version conflicts, the operation - could attempt to reindex more documents from the source than `max_docs` until - it has successfully indexed `max_docs` documents into the target or it has gone - through every document in the source query. NOTE: The reindex API makes no effort - to handle ID collisions. The last document written will "win" but the order isn't - usually predictable so it is not a good idea to rely on this behavior. Instead, - make sure that IDs are unique by using a script. **Running reindex asynchronously** - If the request contains `wait_for_completion=false`, Elasticsearch performs some - preflight checks, launches the request, and returns a task you can use to cancel - or get the status of the task. Elasticsearch creates a record of this task as - a document at `_tasks/`. **Reindex from multiple sources** If you have - many sources to reindex it is generally better to reindex them one at a time - rather than using a glob pattern to pick up multiple sources. That way you can - resume the process if there are any errors by removing the partially completed - source and starting over. It also makes parallelizing the process fairly simple: - split the list of sources to reindex and run each list in parallel. For example, - you can use a bash script like this: ``` for index in i1 i2 i3 i4 i5; do curl - -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{ "source": - { "index": "'$index'" }, "dest": { "index": "'$index'-reindexed" } }' done ``` - **Throttling** Set `requests_per_second` to any positive decimal number (`1.4`, - `6`, `1000`, for example) to throttle the rate at which reindex issues batches - of index operations. Requests are throttled by padding each batch with a wait - time. To turn off throttling, set `requests_per_second` to `-1`. The throttling - is done by waiting between batches so that the scroll that reindex uses internally - can be given a timeout that takes into account the padding. The padding time - is the difference between the batch size divided by the `requests_per_second` - and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` - is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time - = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the - batch is issued as a single bulk request, large batch sizes cause Elasticsearch - to create many requests and then wait for a while before starting the next set. - This is "bursty" instead of "smooth". **Slicing** Reindex supports sliced scroll - to parallelize the reindexing process. This parallelization can improve efficiency - and provide a convenient way to break the request down into smaller parts. NOTE: - Reindexing from remote clusters does not support manual or automatic slicing. - You can slice a reindex request manually by providing a slice ID and total number - of slices to each request. You can also let reindex automatically parallelize - by using sliced scroll to slice on `_id`. The `slices` parameter specifies the - number of slices to use. Adding `slices` to the reindex request just automates - the manual process, creating sub-requests which means it has some quirks: * You - can see these requests in the tasks API. These sub-requests are "child" tasks - of the task for the request with slices. * Fetching the status of the task for - the request with `slices` only contains the status of completed slices. * These - sub-requests are individually addressable for things like cancellation and rethrottling. - * Rethrottling the request with `slices` will rethrottle the unfinished sub-request - proportionally. * Canceling the request with `slices` will cancel each sub-request. - * Due to the nature of `slices`, each sub-request won't get a perfectly even - portion of the documents. All documents will be addressed, but some slices may - be larger than others. Expect larger slices to have a more even distribution. - * Parameters like `requests_per_second` and `max_docs` on a request with `slices` - are distributed proportionally to each sub-request. Combine that with the previous - point about distribution being uneven and you should conclude that using `max_docs` - with `slices` might not result in exactly `max_docs` documents being reindexed. - * Each sub-request gets a slightly different snapshot of the source, though these - are all taken at approximately the same time. If slicing automatically, setting - `slices` to `auto` will choose a reasonable number for most indices. If slicing - manually or otherwise tuning automatic slicing, use the following guidelines. - Query performance is most efficient when the number of slices is equal to the - number of shards in the index. If that number is large (for example, `500`), - choose a lower number as too many slices will hurt performance. Setting slices - higher than the number of shards generally does not improve efficiency and adds - overhead. Indexing performance scales linearly across available resources with - the number of slices. Whether query or indexing performance dominates the runtime - depends on the documents being reindexed and cluster resources. **Modify documents - during reindexing** Like `_update_by_query`, reindex operations support a script - that modifies the document. Unlike `_update_by_query`, the script is allowed - to modify the document's metadata. Just as in `_update_by_query`, you can set - `ctx.op` to change the operation that is run on the destination. For example, - set `ctx.op` to `noop` if your script decides that the document doesn’t have - to be indexed in the destination. This "no operation" will be reported in the - `noop` counter in the response body. Set `ctx.op` to `delete` if your script - decides that the document must be deleted from the destination. The deletion - will be reported in the `deleted` counter in the response body. Setting `ctx.op` - to anything else will return an error, as will setting any other field in `ctx`. - Think of the possibilities! Just be careful; you are able to change: * `_id` - * `_index` * `_version` * `_routing` Setting `_version` to `null` or clearing - it from the `ctx` map is just like not sending the version in an indexing request. - It will cause the document to be overwritten in the destination regardless of - the version on the target or the version type you use in the reindex API. **Reindex - from remote** Reindex supports reindexing from a remote Elasticsearch cluster. - The `host` parameter must contain a scheme, host, port, and optional path. The - `username` and `password` parameters are optional and when they are present the - reindex operation will connect to the remote Elasticsearch node using basic authentication. - Be sure to use HTTPS when using basic authentication or the password will be - sent in plain text. There are a range of settings available to configure the - behavior of the HTTPS connection. When using Elastic Cloud, it is also possible - to authenticate against the remote cluster through the use of a valid API key. - Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting. - It can be set to a comma delimited list of allowed remote host and port combinations. - Scheme is ignored; only the host and port are used. For example: ``` reindex.remote.whitelist: - [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"] ``` The list of - allowed hosts must be configured on any nodes that will coordinate the reindex. - This feature should work with remote clusters of any version of Elasticsearch. - This should enable you to upgrade from any version of Elasticsearch to the current - version by reindexing from a cluster of the old version. WARNING: Elasticsearch - does not support forward compatibility across major versions. For example, you - cannot reindex from a 7.x cluster into a 6.x cluster. To enable queries sent - to older versions of Elasticsearch, the `query` parameter is sent directly to - the remote host without validation or modification. NOTE: Reindexing from remote - clusters does not support manual or automatic slicing. Reindexing from a remote - server uses an on-heap buffer that defaults to a maximum size of 100mb. If the - remote index includes very large documents you'll need to use a smaller batch - size. It is also possible to set the socket read timeout on the remote connection - with the `socket_timeout` field and the connection timeout with the `connect_timeout` - field. Both default to 30 seconds. **Configuring SSL parameters** Reindex from - remote supports configurable SSL settings. These must be specified in the `elasticsearch.yml` - file, with the exception of the secure settings, which you add in the Elasticsearch - keystore. It is not possible to configure SSL in the body of the reindex request. + .. raw:: html + +

Reindex documents.

+

Copy documents from a source to a destination. + You can copy all documents to the destination index or reindex a subset of the documents. + The source can be any existing index, alias, or data stream. + The destination must differ from the source. + For example, you cannot reindex a data stream into itself.

+

IMPORTANT: Reindex requires _source to be enabled for all documents in the source. + The destination should be configured as wanted before calling the reindex API. + Reindex does not copy the settings from the source or its associated template. + Mappings, shard counts, and replicas, for example, must be configured ahead of time.

+

If the Elasticsearch security features are enabled, you must have the following security privileges:

+
    +
  • The read index privilege for the source data stream, index, or alias.
  • +
  • The write index privilege for the destination data stream, index, or index alias.
  • +
  • To automatically create a data stream or index with a reindex API request, you must have the auto_configure, create_index, or manage index privilege for the destination data stream, index, or alias.
  • +
  • If reindexing from a remote cluster, the source.remote.user must have the monitor cluster privilege and the read index privilege for the source data stream, index, or alias.
  • +
+

If reindexing from a remote cluster, you must explicitly allow the remote host in the reindex.remote.whitelist setting. + Automatic data stream creation requires a matching index template with data stream enabled.

+

The dest element can be configured like the index API to control optimistic concurrency control. + Omitting version_type or setting it to internal causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID.

+

Setting version_type to external causes Elasticsearch to preserve the version from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source.

+

Setting op_type to create causes the reindex API to create only missing documents in the destination. + All existing documents will cause a version conflict.

+

IMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an op_type of create. + A reindex can only add new documents to a destination data stream. + It cannot update existing documents in a destination data stream.

+

By default, version conflicts abort the reindex process. + To continue reindexing if there are conflicts, set the conflicts request body property to proceed. + In this case, the response includes a count of the version conflicts that were encountered. + Note that the handling of other error types is unaffected by the conflicts property. + Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than max_docs until it has successfully indexed max_docs documents into the target or it has gone through every document in the source query.

+

NOTE: The reindex API makes no effort to handle ID collisions. + The last document written will "win" but the order isn't usually predictable so it is not a good idea to rely on this behavior. + Instead, make sure that IDs are unique by using a script.

+

Running reindex asynchronously

+

If the request contains wait_for_completion=false, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. + Elasticsearch creates a record of this task as a document at _tasks/<task_id>.

+

Reindex from multiple sources

+

If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources. + That way you can resume the process if there are any errors by removing the partially completed source and starting over. + It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel.

+

For example, you can use a bash script like this:

+
for index in i1 i2 i3 i4 i5; do
+            curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{
+              "source": {
+                "index": "'$index'"
+              },
+              "dest": {
+                "index": "'$index'-reindexed"
+              }
+            }'
+          done
+          
+

Throttling

+

Set requests_per_second to any positive decimal number (1.4, 6, 1000, for example) to throttle the rate at which reindex issues batches of index operations. + Requests are throttled by padding each batch with a wait time. + To turn off throttling, set requests_per_second to -1.

+

The throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding. + The padding time is the difference between the batch size divided by the requests_per_second and the time spent writing. + By default the batch size is 1000, so if requests_per_second is set to 500:

+
target_time = 1000 / 500 per second = 2 seconds
+          wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
+          
+

Since the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set. + This is "bursty" instead of "smooth".

+

Slicing

+

Reindex supports sliced scroll to parallelize the reindexing process. + This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts.

+

NOTE: Reindexing from remote clusters does not support manual or automatic slicing.

+

You can slice a reindex request manually by providing a slice ID and total number of slices to each request. + You can also let reindex automatically parallelize by using sliced scroll to slice on _id. + The slices parameter specifies the number of slices to use.

+

Adding slices to the reindex request just automates the manual process, creating sub-requests which means it has some quirks:

+
    +
  • You can see these requests in the tasks API. These sub-requests are "child" tasks of the task for the request with slices.
  • +
  • Fetching the status of the task for the request with slices only contains the status of completed slices.
  • +
  • These sub-requests are individually addressable for things like cancellation and rethrottling.
  • +
  • Rethrottling the request with slices will rethrottle the unfinished sub-request proportionally.
  • +
  • Canceling the request with slices will cancel each sub-request.
  • +
  • Due to the nature of slices, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.
  • +
  • Parameters like requests_per_second and max_docs on a request with slices are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using max_docs with slices might not result in exactly max_docs documents being reindexed.
  • +
  • Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time.
  • +
+

If slicing automatically, setting slices to auto will choose a reasonable number for most indices. + If slicing manually or otherwise tuning automatic slicing, use the following guidelines.

+

Query performance is most efficient when the number of slices is equal to the number of shards in the index. + If that number is large (for example, 500), choose a lower number as too many slices will hurt performance. + Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.

+

Indexing performance scales linearly across available resources with the number of slices.

+

Whether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources.

+

Modify documents during reindexing

+

Like _update_by_query, reindex operations support a script that modifies the document. + Unlike _update_by_query, the script is allowed to modify the document's metadata.

+

Just as in _update_by_query, you can set ctx.op to change the operation that is run on the destination. + For example, set ctx.op to noop if your script decides that the document doesn’t have to be indexed in the destination. This "no operation" will be reported in the noop counter in the response body. + Set ctx.op to delete if your script decides that the document must be deleted from the destination. + The deletion will be reported in the deleted counter in the response body. + Setting ctx.op to anything else will return an error, as will setting any other field in ctx.

+

Think of the possibilities! Just be careful; you are able to change:

+
    +
  • _id
  • +
  • _index
  • +
  • _version
  • +
  • _routing
  • +
+

Setting _version to null or clearing it from the ctx map is just like not sending the version in an indexing request. + It will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API.

+

Reindex from remote

+

Reindex supports reindexing from a remote Elasticsearch cluster. + The host parameter must contain a scheme, host, port, and optional path. + The username and password parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication. + Be sure to use HTTPS when using basic authentication or the password will be sent in plain text. + There are a range of settings available to configure the behavior of the HTTPS connection.

+

When using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key. + Remote hosts must be explicitly allowed with the reindex.remote.whitelist setting. + It can be set to a comma delimited list of allowed remote host and port combinations. + Scheme is ignored; only the host and port are used. + For example:

+
reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"]
+          
+

The list of allowed hosts must be configured on any nodes that will coordinate the reindex. + This feature should work with remote clusters of any version of Elasticsearch. + This should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version.

+

WARNING: Elasticsearch does not support forward compatibility across major versions. + For example, you cannot reindex from a 7.x cluster into a 6.x cluster.

+

To enable queries sent to older versions of Elasticsearch, the query parameter is sent directly to the remote host without validation or modification.

+

NOTE: Reindexing from remote clusters does not support manual or automatic slicing.

+

Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb. + If the remote index includes very large documents you'll need to use a smaller batch size. + It is also possible to set the socket read timeout on the remote connection with the socket_timeout field and the connection timeout with the connect_timeout field. + Both default to 30 seconds.

+

Configuring SSL parameters

+

Reindex from remote supports configurable SSL settings. + These must be specified in the elasticsearch.yml file, with the exception of the secure settings, which you add in the Elasticsearch keystore. + It is not possible to configure SSL in the body of the reindex request.

+ ``_ @@ -4069,11 +4125,17 @@ async def reindex_rethrottle( requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ - Throttle a reindex operation. Change the number of requests per second for a - particular reindex operation. For example: ``` POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 - ``` Rethrottling that speeds up the query takes effect immediately. Rethrottling - that slows down the query will take effect after completing the current batch. - This behavior prevents scroll timeouts. + .. raw:: html + +

Throttle a reindex operation.

+

Change the number of requests per second for a particular reindex operation. + For example:

+
POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1
+          
+

Rethrottling that speeds up the query takes effect immediately. + Rethrottling that slows down the query will take effect after completing the current batch. + This behavior prevents scroll timeouts.

+ ``_ @@ -4125,7 +4187,11 @@ async def render_search_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Render a search template. Render a search template as a search request body. + .. raw:: html + +

Render a search template.

+

Render a search template as a search request body.

+ ``_ @@ -4194,7 +4260,11 @@ async def scripts_painless_execute( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Run a script. Runs a script and returns a result. + .. raw:: html + +

Run a script. + Runs a script and returns a result.

+ ``_ @@ -4252,22 +4322,19 @@ async def scroll( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Run a scrolling search. IMPORTANT: The scroll API is no longer recommend for - deep pagination. If you need to preserve the index state while paging through - more than 10,000 hits, use the `search_after` parameter with a point in time - (PIT). The scroll API gets large sets of results from a single scrolling search - request. To get the necessary scroll ID, submit a search API request that includes - an argument for the `scroll` query parameter. The `scroll` parameter indicates - how long Elasticsearch should retain the search context for the request. The - search response returns a scroll ID in the `_scroll_id` response body parameter. - You can then use the scroll ID with the scroll API to retrieve the next batch - of results for the request. If the Elasticsearch security features are enabled, - the access to the results of a specific scroll ID is restricted to the user or - API key that submitted the search. You can also use the scroll API to specify - a new scroll parameter that extends or shortens the retention period for the - search context. IMPORTANT: Results from a scrolling search reflect the state - of the index at the time of the initial search request. Subsequent indexing or - document changes only affect later search and scroll requests. + .. raw:: html + +

Run a scrolling search.

+

IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the search_after parameter with a point in time (PIT).

+

The scroll API gets large sets of results from a single scrolling search request. + To get the necessary scroll ID, submit a search API request that includes an argument for the scroll query parameter. + The scroll parameter indicates how long Elasticsearch should retain the search context for the request. + The search response returns a scroll ID in the _scroll_id response body parameter. + You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. + If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search.

+

You can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context.

+

IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.

+ ``_ @@ -4457,9 +4524,13 @@ async def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Run a search. Get search hits that match the query defined in the request. You - can provide search queries using the `q` query string parameter or the request - body. If both are specified, only the query parameter is used. + .. raw:: html + +

Run a search.

+

Get search hits that match the query defined in the request. + You can provide search queries using the q query string parameter or the request body. + If both are specified, only the query parameter is used.

+ ``_ @@ -4889,7 +4960,11 @@ async def search_mvt( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> BinaryApiResponse: """ - Search a vector tile. Search a vector tile for geospatial values. + .. raw:: html + +

Search a vector tile.

+

Search a vector tile for geospatial values.

+ ``_ @@ -5044,10 +5119,13 @@ async def search_shards( routing: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the search shards. Get the indices and shards that a search request would - be run against. This information can be useful for working out issues or planning - optimizations with routing and shard preferences. When filtered aliases are used, - the filter is returned as part of the indices section. + .. raw:: html + +

Get the search shards.

+

Get the indices and shards that a search request would be run against. + This information can be useful for working out issues or planning optimizations with routing and shard preferences. + When filtered aliases are used, the filter is returned as part of the indices section.

+ ``_ @@ -5151,7 +5229,10 @@ async def search_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Run a search with a search template. + .. raw:: html + +

Run a search with a search template.

+ ``_ @@ -5283,15 +5364,15 @@ async def terms_enum( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get terms in an index. Discover terms that match a partial string in an index. - This "terms enum" API is designed for low-latency look-ups used in auto-complete - scenarios. If the `complete` property in the response is false, the returned - terms set may be incomplete and should be treated as approximate. This can occur - due to a few reasons, such as a request timeout or a node error. NOTE: The terms - enum API may return terms from deleted documents. Deleted documents are initially - only marked as deleted. It is not until their segments are merged that documents - are actually deleted. Until that happens, the terms enum API will return terms - from these documents. + .. raw:: html + +

Get terms in an index.

+

Discover terms that match a partial string in an index. + This "terms enum" API is designed for low-latency look-ups used in auto-complete scenarios.

+

If the complete property in the response is false, the returned terms set may be incomplete and should be treated as approximate. + This can occur due to a few reasons, such as a request timeout or a node error.

+

NOTE: The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents.

+ ``_ @@ -5389,8 +5470,11 @@ async def termvectors( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get term vector information. Get information and statistics about terms in the - fields of a particular document. + .. raw:: html + +

Get term vector information.

+

Get information and statistics about terms in the fields of a particular document.

+ ``_ @@ -5532,19 +5616,24 @@ async def update( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update a document. Update a document by running a script or passing a partial - document. If the Elasticsearch security features are enabled, you must have the - `index` or `write` index privilege for the target index or index alias. The script - can update, delete, or skip modifying the document. The API also supports passing - a partial document, which is merged into the existing document. To fully replace - an existing document, use the index API. This operation: * Gets the document - (collocated with the shard) from the index. * Runs the specified script. * Indexes - the result. The document must still be reindexed, but using this API removes - some network roundtrips and reduces chances of version conflicts between the - GET and the index operation. The `_source` field must be enabled to use this - API. In addition to `_source`, you can access the following variables through - the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the - current timestamp). + .. raw:: html + +

Update a document.

+

Update a document by running a script or passing a partial document.

+

If the Elasticsearch security features are enabled, you must have the index or write index privilege for the target index or index alias.

+

The script can update, delete, or skip modifying the document. + The API also supports passing a partial document, which is merged into the existing document. + To fully replace an existing document, use the index API. + This operation:

+
    +
  • Gets the document (collocated with the shard) from the index.
  • +
  • Runs the specified script.
  • +
  • Indexes the result.
  • +
+

The document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation.

+

The _source field must be enabled to use this API. + In addition to _source, you can access the following variables through the ctx map: _index, _type, _id, _version, _routing, and _now (the current timestamp).

+ ``_ @@ -5711,9 +5800,12 @@ async def update_by_query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update documents. Updates documents that match the specified query. If no query - is specified, performs an update on every document in the data stream or index - without modifying the source, which is useful for picking up mapping changes. + .. raw:: html + +

Update documents. + Updates documents that match the specified query. + If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes.

+ ``_ @@ -5909,10 +6001,12 @@ async def update_by_query_rethrottle( requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ - Throttle an update by query operation. Change the number of requests per second - for a particular update by query operation. Rethrottling that speeds up the query - takes effect immediately but rethrotting that slows down the query takes effect - after completing the current batch to prevent scroll timeouts. + .. raw:: html + +

Throttle an update by query operation.

+

Change the number of requests per second for a particular update by query operation. + Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts.

+ ``_ diff --git a/elasticsearch/_async/client/async_search.py b/elasticsearch/_async/client/async_search.py index c2c3f9526..ab8421898 100644 --- a/elasticsearch/_async/client/async_search.py +++ b/elasticsearch/_async/client/async_search.py @@ -36,11 +36,13 @@ async def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete an async search. If the asynchronous search is still running, it is cancelled. - Otherwise, the saved search results are deleted. If the Elasticsearch security - features are enabled, the deletion of a specific async search is restricted to: - the authenticated user that submitted the original search request; users that - have the `cancel_task` cluster privilege. + .. raw:: html + +

Delete an async search.

+

If the asynchronous search is still running, it is cancelled. + Otherwise, the saved search results are deleted. + If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the cancel_task cluster privilege.

+ ``_ @@ -85,10 +87,12 @@ async def get( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Get async search results. Retrieve the results of a previously submitted asynchronous - search request. If the Elasticsearch security features are enabled, access to - the results of a specific async search is restricted to the user or API key that - submitted it. + .. raw:: html + +

Get async search results.

+

Retrieve the results of a previously submitted asynchronous search request. + If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it.

+ ``_ @@ -149,10 +153,12 @@ async def status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the async search status. Get the status of a previously submitted async search - request given its identifier, without retrieving search results. If the Elasticsearch - security features are enabled, use of this API is restricted to the `monitoring_user` - role. + .. raw:: html + +

Get the async search status.

+

Get the status of a previously submitted async search request given its identifier, without retrieving search results. + If the Elasticsearch security features are enabled, use of this API is restricted to the monitoring_user role.

+ ``_ @@ -326,15 +332,14 @@ async def submit( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Run an async search. When the primary sort of the results is an indexed field, - shards get sorted based on minimum and maximum value that they hold for that - field. Partial results become available following the sort criteria that was - requested. Warning: Asynchronous search does not support scroll or search requests - that include only the suggest section. By default, Elasticsearch does not allow - you to store an async search response larger than 10Mb and an attempt to do this - results in an error. The maximum allowed size for a stored async search response - can be set by changing the `search.max_async_search_response_size` cluster level - setting. + .. raw:: html + +

Run an async search.

+

When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field. Partial results become available following the sort criteria that was requested.

+

Warning: Asynchronous search does not support scroll or search requests that include only the suggest section.

+

By default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. + The maximum allowed size for a stored async search response can be set by changing the search.max_async_search_response_size cluster level setting.

+ ``_ diff --git a/elasticsearch/_async/client/autoscaling.py b/elasticsearch/_async/client/autoscaling.py index 82e0e6d8c..9999f82e8 100644 --- a/elasticsearch/_async/client/autoscaling.py +++ b/elasticsearch/_async/client/autoscaling.py @@ -38,9 +38,11 @@ async def delete_autoscaling_policy( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete an autoscaling policy. NOTE: This feature is designed for indirect use - by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. - Direct use is not supported. + .. raw:: html + +

Delete an autoscaling policy.

+

NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

+ ``_ @@ -89,18 +91,18 @@ async def get_autoscaling_capacity( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the autoscaling capacity. NOTE: This feature is designed for indirect use - by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. - Direct use is not supported. This API gets the current autoscaling capacity based - on the configured autoscaling policy. It will return information to size the - cluster appropriately to the current workload. The `required_capacity` is calculated - as the maximum of the `required_capacity` result of all individual deciders that - are enabled for the policy. The operator should verify that the `current_nodes` - match the operator’s knowledge of the cluster to avoid making autoscaling decisions - based on stale or incomplete information. The response contains decider-specific - information you can use to diagnose how and why autoscaling determined a certain - capacity was required. This information is provided for diagnosis only. Do not - use this information to make autoscaling decisions. + .. raw:: html + +

Get the autoscaling capacity.

+

NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

+

This API gets the current autoscaling capacity based on the configured autoscaling policy. + It will return information to size the cluster appropriately to the current workload.

+

The required_capacity is calculated as the maximum of the required_capacity result of all individual deciders that are enabled for the policy.

+

The operator should verify that the current_nodes match the operator’s knowledge of the cluster to avoid making autoscaling decisions based on stale or incomplete information.

+

The response contains decider-specific information you can use to diagnose how and why autoscaling determined a certain capacity was required. + This information is provided for diagnosis only. + Do not use this information to make autoscaling decisions.

+ ``_ @@ -143,9 +145,11 @@ async def get_autoscaling_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get an autoscaling policy. NOTE: This feature is designed for indirect use by - Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. - Direct use is not supported. + .. raw:: html + +

Get an autoscaling policy.

+

NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

+ ``_ @@ -196,9 +200,11 @@ async def put_autoscaling_policy( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update an autoscaling policy. NOTE: This feature is designed for indirect - use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on - Kubernetes. Direct use is not supported. + .. raw:: html + +

Create or update an autoscaling policy.

+

NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

+ ``_ diff --git a/elasticsearch/_async/client/cat.py b/elasticsearch/_async/client/cat.py index f51f75373..6ffa36b68 100644 --- a/elasticsearch/_async/client/cat.py +++ b/elasticsearch/_async/client/cat.py @@ -57,11 +57,13 @@ async def aliases( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get aliases. Get the cluster's index aliases, including filter and routing information. - This API does not return data stream aliases. IMPORTANT: CAT APIs are only intended - for human consumption using the command line or the Kibana console. They are - not intended for use by applications. For application consumption, use the aliases - API. + .. raw:: html + +

Get aliases.

+

Get the cluster's index aliases, including filter and routing information. + This API does not return data stream aliases.

+

IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.

+ ``_ @@ -152,10 +154,12 @@ async def allocation( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get shard allocation information. Get a snapshot of the number of shards allocated - to each data node and their disk space. IMPORTANT: CAT APIs are only intended - for human consumption using the command line or Kibana console. They are not - intended for use by applications. + .. raw:: html + +

Get shard allocation information.

+

Get a snapshot of the number of shards allocated to each data node and their disk space.

+

IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.

+ ``_ @@ -237,12 +241,14 @@ async def component_templates( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get component templates. Get information about component templates in a cluster. - Component templates are building blocks for constructing index templates that - specify index mappings, settings, and aliases. IMPORTANT: CAT APIs are only intended - for human consumption using the command line or Kibana console. They are not - intended for use by applications. For application consumption, use the get component - template API. + .. raw:: html + +

Get component templates.

+

Get information about component templates in a cluster. + Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.

+

IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. + They are not intended for use by applications. For application consumption, use the get component template API.

+ ``_ @@ -319,12 +325,14 @@ async def count( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get a document count. Get quick access to a document count for a data stream, - an index, or an entire cluster. The document count only includes live documents, - not deleted documents which have not yet been removed by the merge process. IMPORTANT: - CAT APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the count API. + .. raw:: html + +

Get a document count.

+

Get quick access to a document count for a data stream, an index, or an entire cluster. + The document count only includes live documents, not deleted documents which have not yet been removed by the merge process.

+

IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. + They are not intended for use by applications. For application consumption, use the count API.

+ ``_ @@ -396,11 +404,13 @@ async def fielddata( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get field data cache information. Get the amount of heap memory currently used - by the field data cache on every data node in the cluster. IMPORTANT: cat APIs - are only intended for human consumption using the command line or Kibana console. - They are not intended for use by applications. For application consumption, use - the nodes stats API. + .. raw:: html + +

Get field data cache information.

+

Get the amount of heap memory currently used by the field data cache on every data node in the cluster.

+

IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. + They are not intended for use by applications. For application consumption, use the nodes stats API.

+ ``_ @@ -474,17 +484,19 @@ async def health( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get the cluster health status. IMPORTANT: CAT APIs are only intended for human - consumption using the command line or Kibana console. They are not intended for - use by applications. For application consumption, use the cluster health API. - This API is often used to check malfunctioning clusters. To help you track cluster - health alongside log files and alerting systems, the API returns timestamps in - two formats: `HH:MM:SS`, which is human-readable but includes no date information; - `Unix epoch time`, which is machine-sortable and includes date information. The - latter format is useful for cluster recoveries that take multiple days. You can - use the cat health API to verify cluster health across multiple nodes. You also - can use the API to track the recovery of a large cluster over a longer period - of time. + .. raw:: html + +

Get the cluster health status.

+

IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. + They are not intended for use by applications. For application consumption, use the cluster health API. + This API is often used to check malfunctioning clusters. + To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: + HH:MM:SS, which is human-readable but includes no date information; + Unix epoch time, which is machine-sortable and includes date information. + The latter format is useful for cluster recoveries that take multiple days. + You can use the cat health API to verify cluster health across multiple nodes. + You also can use the API to track the recovery of a large cluster over a longer period of time.

+ ``_ @@ -538,7 +550,11 @@ async def health( @_rewrite_parameters() async def help(self) -> TextApiResponse: """ - Get CAT help. Get help for the CAT APIs. + .. raw:: html + +

Get CAT help.

+

Get help for the CAT APIs.

+ ``_ """ @@ -589,16 +605,23 @@ async def indices( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get index information. Get high-level information about indices in a cluster, - including backing indices for data streams. Use this request to get the following - information for each index in a cluster: - shard count - document count - deleted - document count - primary store size - total store size of all shards, including - shard replicas These metrics are retrieved directly from Lucene, which Elasticsearch - uses internally to power indexing and search. As a result, all document counts - include hidden nested documents. To get an accurate count of Elasticsearch documents, - use the cat count or count APIs. CAT APIs are only intended for human consumption - using the command line or Kibana console. They are not intended for use by applications. - For application consumption, use an index endpoint. + .. raw:: html + +

Get index information.

+

Get high-level information about indices in a cluster, including backing indices for data streams.

+

Use this request to get the following information for each index in a cluster:

+
    +
  • shard count
  • +
  • document count
  • +
  • deleted document count
  • +
  • primary store size
  • +
  • total store size of all shards, including shard replicas
  • +
+

These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. + To get an accurate count of Elasticsearch documents, use the cat count or count APIs.

+

CAT APIs are only intended for human consumption using the command line or Kibana console. + They are not intended for use by applications. For application consumption, use an index endpoint.

+ ``_ @@ -691,10 +714,12 @@ async def master( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get master node information. Get information about the master node, including - the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for - human consumption using the command line or Kibana console. They are not intended - for use by applications. For application consumption, use the nodes info API. + .. raw:: html + +

Get master node information.

+

Get information about the master node, including the ID, bound IP address, and name.

+

IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

+ ``_ @@ -865,11 +890,14 @@ async def ml_data_frame_analytics( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get data frame analytics jobs. Get configuration and usage information about - data frame analytics jobs. IMPORTANT: CAT APIs are only intended for human consumption - using the Kibana console or command line. They are not intended for use by applications. - For application consumption, use the get data frame analytics jobs statistics - API. + .. raw:: html + +

Get data frame analytics jobs.

+

Get configuration and usage information about data frame analytics jobs.

+

IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + console or command line. They are not intended for use by applications. For + application consumption, use the get data frame analytics jobs statistics API.

+ ``_ @@ -1027,13 +1055,17 @@ async def ml_datafeeds( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get datafeeds. Get configuration and usage information about datafeeds. This - API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features - are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` - cluster privileges to use this API. IMPORTANT: CAT APIs are only intended for - human consumption using the Kibana console or command line. They are not intended - for use by applications. For application consumption, use the get datafeed statistics - API. + .. raw:: html + +

Get datafeeds.

+

Get configuration and usage information about datafeeds. + This API returns a maximum of 10,000 datafeeds. + If the Elasticsearch security features are enabled, you must have monitor_ml, monitor, manage_ml, or manage + cluster privileges to use this API.

+

IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + console or command line. They are not intended for use by applications. For + application consumption, use the get datafeed statistics API.

+ ``_ @@ -1389,13 +1421,17 @@ async def ml_jobs( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get anomaly detection jobs. Get configuration and usage information for anomaly - detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch - security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, - or `manage` cluster privileges to use this API. IMPORTANT: CAT APIs are only - intended for human consumption using the Kibana console or command line. They - are not intended for use by applications. For application consumption, use the - get anomaly detection job statistics API. + .. raw:: html + +

Get anomaly detection jobs.

+

Get configuration and usage information for anomaly detection jobs. + This API returns a maximum of 10,000 jobs. + If the Elasticsearch security features are enabled, you must have monitor_ml, + monitor, manage_ml, or manage cluster privileges to use this API.

+

IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + console or command line. They are not intended for use by applications. For + application consumption, use the get anomaly detection job statistics API.

+ ``_ @@ -1573,10 +1609,14 @@ async def ml_trained_models( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get trained models. Get configuration and usage information about inference trained - models. IMPORTANT: CAT APIs are only intended for human consumption using the - Kibana console or command line. They are not intended for use by applications. - For application consumption, use the get trained models statistics API. + .. raw:: html + +

Get trained models.

+

Get configuration and usage information about inference trained models.

+

IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + console or command line. They are not intended for use by applications. For + application consumption, use the get trained models statistics API.

+ ``_ @@ -1664,10 +1704,12 @@ async def nodeattrs( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get node attribute information. Get information about custom node attributes. - IMPORTANT: cat APIs are only intended for human consumption using the command - line or Kibana console. They are not intended for use by applications. For application - consumption, use the nodes info API. + .. raw:: html + +

Get node attribute information.

+

Get information about custom node attributes. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

+ ``_ @@ -1745,10 +1787,12 @@ async def nodes( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get node information. Get information about the nodes in a cluster. IMPORTANT: - cat APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the nodes info API. + .. raw:: html + +

Get node information.

+

Get information about the nodes in a cluster. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

+ ``_ @@ -1830,10 +1874,12 @@ async def pending_tasks( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get pending task information. Get information about cluster-level changes that - have not yet taken effect. IMPORTANT: cat APIs are only intended for human consumption - using the command line or Kibana console. They are not intended for use by applications. - For application consumption, use the pending cluster tasks API. + .. raw:: html + +

Get pending task information.

+

Get information about cluster-level changes that have not yet taken effect. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API.

+ ``_ @@ -1908,10 +1954,12 @@ async def plugins( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get plugin information. Get a list of plugins running on each node of a cluster. - IMPORTANT: cat APIs are only intended for human consumption using the command - line or Kibana console. They are not intended for use by applications. For application - consumption, use the nodes info API. + .. raw:: html + +

Get plugin information.

+

Get a list of plugins running on each node of a cluster. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

+ ``_ @@ -1992,14 +2040,14 @@ async def recovery( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get shard recovery information. Get information about ongoing and completed shard - recoveries. Shard recovery is the process of initializing a shard copy, such - as restoring a primary shard from a snapshot or syncing a replica shard from - a primary shard. When a shard recovery completes, the recovered shard is available - for search and indexing. For data streams, the API returns information about - the stream’s backing indices. IMPORTANT: cat APIs are only intended for human - consumption using the command line or Kibana console. They are not intended for - use by applications. For application consumption, use the index recovery API. + .. raw:: html + +

Get shard recovery information.

+

Get information about ongoing and completed shard recoveries. + Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. + For data streams, the API returns information about the stream’s backing indices. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API.

+ ``_ @@ -2082,10 +2130,12 @@ async def repositories( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get snapshot repository information. Get a list of snapshot repositories for - a cluster. IMPORTANT: cat APIs are only intended for human consumption using - the command line or Kibana console. They are not intended for use by applications. - For application consumption, use the get snapshot repository API. + .. raw:: html + +

Get snapshot repository information.

+

Get a list of snapshot repositories for a cluster. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API.

+ ``_ @@ -2160,11 +2210,13 @@ async def segments( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get segment information. Get low-level information about the Lucene segments - in index shards. For data streams, the API returns information about the backing - indices. IMPORTANT: cat APIs are only intended for human consumption using the - command line or Kibana console. They are not intended for use by applications. - For application consumption, use the index segments API. + .. raw:: html + +

Get segment information.

+

Get low-level information about the Lucene segments in index shards. + For data streams, the API returns information about the backing indices. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API.

+ ``_ @@ -2252,10 +2304,13 @@ async def shards( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get shard information. Get information about the shards in a cluster. For data - streams, the API returns information about the backing indices. IMPORTANT: cat - APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. + .. raw:: html + +

Get shard information.

+

Get information about the shards in a cluster. + For data streams, the API returns information about the backing indices. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.

+ ``_ @@ -2338,11 +2393,13 @@ async def snapshots( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get snapshot information. Get information about the snapshots stored in one or - more repositories. A snapshot is a backup of an index or running Elasticsearch - cluster. IMPORTANT: cat APIs are only intended for human consumption using the - command line or Kibana console. They are not intended for use by applications. - For application consumption, use the get snapshot API. + .. raw:: html + +

Get snapshot information.

+

Get information about the snapshots stored in one or more repositories. + A snapshot is a backup of an index or running Elasticsearch cluster. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API.

+ ``_ @@ -2430,10 +2487,12 @@ async def tasks( wait_for_completion: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get task information. Get information about tasks currently running in the cluster. - IMPORTANT: cat APIs are only intended for human consumption using the command - line or Kibana console. They are not intended for use by applications. For application - consumption, use the task management API. + .. raw:: html + +

Get task information.

+

Get information about tasks currently running in the cluster. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API.

+ ``_ @@ -2521,11 +2580,13 @@ async def templates( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get index template information. Get information about the index templates in - a cluster. You can use index templates to apply index settings and field mappings - to new indices at creation. IMPORTANT: cat APIs are only intended for human consumption - using the command line or Kibana console. They are not intended for use by applications. - For application consumption, use the get index template API. + .. raw:: html + +

Get index template information.

+

Get information about the index templates in a cluster. + You can use index templates to apply index settings and field mappings to new indices at creation. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API.

+ ``_ @@ -2607,11 +2668,13 @@ async def thread_pool( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get thread pool statistics. Get thread pool statistics for each node in a cluster. - Returned information includes all built-in thread pools and custom thread pools. - IMPORTANT: cat APIs are only intended for human consumption using the command - line or Kibana console. They are not intended for use by applications. For application - consumption, use the nodes info API. + .. raw:: html + +

Get thread pool statistics.

+

Get thread pool statistics for each node in a cluster. + Returned information includes all built-in thread pools and custom thread pools. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

+ ``_ @@ -2861,10 +2924,14 @@ async def transforms( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get transform information. Get configuration and usage information about transforms. - CAT APIs are only intended for human consumption using the Kibana console or - command line. They are not intended for use by applications. For application - consumption, use the get transform statistics API. + .. raw:: html + +

Get transform information.

+

Get configuration and usage information about transforms.

+

CAT APIs are only intended for human consumption using the Kibana + console or command line. They are not intended for use by applications. For + application consumption, use the get transform statistics API.

+ ``_ diff --git a/elasticsearch/_async/client/ccr.py b/elasticsearch/_async/client/ccr.py index 5dc4ae038..aa7b4d085 100644 --- a/elasticsearch/_async/client/ccr.py +++ b/elasticsearch/_async/client/ccr.py @@ -37,8 +37,11 @@ async def delete_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete auto-follow patterns. Delete a collection of cross-cluster replication - auto-follow patterns. + .. raw:: html + +

Delete auto-follow patterns. + Delete a collection of cross-cluster replication auto-follow patterns.

+ ``_ @@ -117,10 +120,12 @@ async def follow( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a follower. Create a cross-cluster replication follower index that follows - a specific leader index. When the API returns, the follower index exists and - cross-cluster replication starts replicating operations from the leader index - to the follower index. + .. raw:: html + +

Create a follower. + Create a cross-cluster replication follower index that follows a specific leader index. + When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index.

+ ``_ @@ -244,10 +249,12 @@ async def follow_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get follower information. Get information about all cross-cluster replication - follower indices. For example, the results include follower index names, leader - index names, replication options, and whether the follower indices are active - or paused. + .. raw:: html + +

Get follower information. + Get information about all cross-cluster replication follower indices. + For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused.

+ ``_ @@ -292,9 +299,12 @@ async def follow_stats( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get follower stats. Get cross-cluster replication follower stats. The API returns - shard-level stats about the "following tasks" associated with each shard for - the specified indices. + .. raw:: html + +

Get follower stats. + Get cross-cluster replication follower stats. + The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices.

+ ``_ @@ -352,23 +362,19 @@ async def forget_follower( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Forget a follower. Remove the cross-cluster replication follower retention leases - from the leader. A following index takes out retention leases on its leader index. - These leases are used to increase the likelihood that the shards of the leader - index retain the history of operations that the shards of the following index - need to run replication. When a follower index is converted to a regular index - by the unfollow API (either by directly calling the API or by index lifecycle - management tasks), these leases are removed. However, removal of the leases can - fail, for example when the remote cluster containing the leader index is unavailable. - While the leases will eventually expire on their own, their extended existence - can cause the leader index to hold more history than necessary and prevent index - lifecycle management from performing some operations on the leader index. This - API exists to enable manually removing the leases when the unfollow API is unable - to do so. NOTE: This API does not stop replication by a following index. If you - use this API with a follower index that is still actively following, the following - index will add back retention leases on the leader. The only purpose of this - API is to handle the case of failure to remove the following retention leases - after the unfollow API is invoked. + .. raw:: html + +

Forget a follower. + Remove the cross-cluster replication follower retention leases from the leader.

+

A following index takes out retention leases on its leader index. + These leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication. + When a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed. + However, removal of the leases can fail, for example when the remote cluster containing the leader index is unavailable. + While the leases will eventually expire on their own, their extended existence can cause the leader index to hold more history than necessary and prevent index lifecycle management from performing some operations on the leader index. + This API exists to enable manually removing the leases when the unfollow API is unable to do so.

+

NOTE: This API does not stop replication by a following index. If you use this API with a follower index that is still actively following, the following index will add back retention leases on the leader. + The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked.

+ ``_ @@ -429,7 +435,11 @@ async def get_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. + .. raw:: html + +

Get auto-follow patterns. + Get cross-cluster replication auto-follow patterns.

+ ``_ @@ -477,14 +487,16 @@ async def pause_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Pause an auto-follow pattern. Pause a cross-cluster replication auto-follow pattern. - When the API returns, the auto-follow pattern is inactive. New indices that are - created on the remote cluster and match the auto-follow patterns are ignored. - You can resume auto-following with the resume auto-follow pattern API. When it - resumes, the auto-follow pattern is active again and automatically configures - follower indices for newly created indices on the remote cluster that match its - patterns. Remote indices that were created while the pattern was paused will - also be followed, unless they have been deleted or closed in the interim. + .. raw:: html + +

Pause an auto-follow pattern. + Pause a cross-cluster replication auto-follow pattern. + When the API returns, the auto-follow pattern is inactive. + New indices that are created on the remote cluster and match the auto-follow patterns are ignored.

+

You can resume auto-following with the resume auto-follow pattern API. + When it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns. + Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim.

+ ``_ @@ -529,10 +541,14 @@ async def pause_follow( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Pause a follower. Pause a cross-cluster replication follower index. The follower - index will not fetch any additional operations from the leader index. You can - resume following with the resume follower API. You can pause and resume a follower - index to change the configuration of the following task. + .. raw:: html + +

Pause a follower. + Pause a cross-cluster replication follower index. + The follower index will not fetch any additional operations from the leader index. + You can resume following with the resume follower API. + You can pause and resume a follower index to change the configuration of the following task.

+ ``_ @@ -611,14 +627,15 @@ async def put_auto_follow_pattern( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update auto-follow patterns. Create a collection of cross-cluster replication - auto-follow patterns for a remote cluster. Newly created indices on the remote - cluster that match any of the patterns are automatically configured as follower - indices. Indices on the remote cluster that were created before the auto-follow - pattern was created will not be auto-followed even if they match the pattern. - This API can also be used to update auto-follow patterns. NOTE: Follower indices - that were configured automatically before updating an auto-follow pattern will - remain unchanged even if they do not match against the new patterns. + .. raw:: html + +

Create or update auto-follow patterns. + Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. + Newly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices. + Indices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern.

+

This API can also be used to update auto-follow patterns. + NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns.

+ ``_ @@ -746,11 +763,13 @@ async def resume_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resume an auto-follow pattern. Resume a cross-cluster replication auto-follow - pattern that was paused. The auto-follow pattern will resume configuring following - indices for newly created indices that match its patterns on the remote cluster. - Remote indices created while the pattern was paused will also be followed unless - they have been deleted or closed in the interim. + .. raw:: html + +

Resume an auto-follow pattern. + Resume a cross-cluster replication auto-follow pattern that was paused. + The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. + Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim.

+ ``_ @@ -819,11 +838,14 @@ async def resume_follow( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Resume a follower. Resume a cross-cluster replication follower index that was - paused. The follower index could have been paused with the pause follower API. - Alternatively it could be paused due to replication that cannot be retried due - to failures during following tasks. When this API returns, the follower index - will resume fetching operations from the leader index. + .. raw:: html + +

Resume a follower. + Resume a cross-cluster replication follower index that was paused. + The follower index could have been paused with the pause follower API. + Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. + When this API returns, the follower index will resume fetching operations from the leader index.

+ ``_ @@ -910,8 +932,11 @@ async def stats( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get cross-cluster replication stats. This API returns stats about auto-following - and the same shard-level stats as the get follower stats API. + .. raw:: html + +

Get cross-cluster replication stats. + This API returns stats about auto-following and the same shard-level stats as the get follower stats API.

+ ``_ @@ -956,13 +981,14 @@ async def unfollow( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Unfollow an index. Convert a cross-cluster replication follower index to a regular - index. The API stops the following task associated with a follower index and - removes index metadata and settings associated with cross-cluster replication. - The follower index must be paused and closed before you call the unfollow API. - NOTE: Currently cross-cluster replication does not support converting an existing - regular index to a follower index. Converting a follower index to a regular index - is an irreversible operation. + .. raw:: html + +

Unfollow an index. + Convert a cross-cluster replication follower index to a regular index. + The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. + The follower index must be paused and closed before you call the unfollow API.

+

NOTE: Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation.

+ ``_ diff --git a/elasticsearch/_async/client/cluster.py b/elasticsearch/_async/client/cluster.py index 4fb033e73..e440392a4 100644 --- a/elasticsearch/_async/client/cluster.py +++ b/elasticsearch/_async/client/cluster.py @@ -45,13 +45,14 @@ async def allocation_explain( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Explain the shard allocations. Get explanations for shard allocations in the - cluster. For unassigned shards, it provides an explanation for why the shard - is unassigned. For assigned shards, it provides an explanation for why the shard - is remaining on its current node and has not moved or rebalanced to another node. - This API can be very useful when attempting to diagnose why a shard is unassigned - or why a shard continues to remain on its current node when you might expect - otherwise. + .. raw:: html + +

Explain the shard allocations. + Get explanations for shard allocations in the cluster. + For unassigned shards, it provides an explanation for why the shard is unassigned. + For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. + This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise.

+ ``_ @@ -123,8 +124,11 @@ async def delete_component_template( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete component templates. Component templates are building blocks for constructing - index templates that specify index mappings, settings, and aliases. + .. raw:: html + +

Delete component templates. + Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.

+ ``_ @@ -175,8 +179,11 @@ async def delete_voting_config_exclusions( wait_for_removal: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear cluster voting config exclusions. Remove master-eligible nodes from the - voting configuration exclusion list. + .. raw:: html + +

Clear cluster voting config exclusions. + Remove master-eligible nodes from the voting configuration exclusion list.

+ ``_ @@ -226,8 +233,11 @@ async def exists_component_template( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Check component templates. Returns information about whether a particular component - template exists. + .. raw:: html + +

Check component templates. + Returns information about whether a particular component template exists.

+ ``_ @@ -282,7 +292,11 @@ async def get_component_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get component templates. Get information about component templates. + .. raw:: html + +

Get component templates. + Get information about component templates.

+ ``_ @@ -345,8 +359,11 @@ async def get_settings( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get cluster-wide settings. By default, it returns only settings that have been - explicitly defined. + .. raw:: html + +

Get cluster-wide settings. + By default, it returns only settings that have been explicitly defined.

+ ``_ @@ -428,16 +445,17 @@ async def health( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the cluster health status. You can also use the API to get the health status - of only specified data streams and indices. For data streams, the API retrieves - the health status of the stream’s backing indices. The cluster health status - is: green, yellow or red. On the shard level, a red status indicates that the - specific shard is not allocated in the cluster. Yellow means that the primary - shard is allocated but replicas are not. Green means that all shards are allocated. - The index level status is controlled by the worst shard status. One of the main - benefits of the API is the ability to wait until the cluster reaches a certain - high watermark health level. The cluster status is controlled by the worst index - status. + .. raw:: html + +

Get the cluster health status. + You can also use the API to get the health status of only specified data streams and indices. + For data streams, the API retrieves the health status of the stream’s backing indices.

+

The cluster health status is: green, yellow or red. + On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated. + The index level status is controlled by the worst shard status.

+

One of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level. + The cluster status is controlled by the worst index status.

+ ``_ @@ -541,7 +559,11 @@ async def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get cluster info. Returns basic information about the cluster. + .. raw:: html + +

Get cluster info. + Returns basic information about the cluster.

+ ``_ @@ -583,14 +605,14 @@ async def pending_tasks( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the pending cluster tasks. Get information about cluster-level changes (such - as create index, update mapping, allocate or fail shard) that have not yet taken - effect. NOTE: This API returns a list of any pending updates to the cluster state. - These are distinct from the tasks reported by the task management API which include - periodic tasks and tasks initiated by the user, such as node stats, search queries, - or create index requests. However, if a user-initiated task such as a create - index command causes a cluster state update, the activity of this task might - be reported by both task api and pending cluster tasks API. + .. raw:: html + +

Get the pending cluster tasks. + Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect.

+

NOTE: This API returns a list of any pending updates to the cluster state. + These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. + However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API.

+ ``_ @@ -639,33 +661,24 @@ async def post_voting_config_exclusions( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update voting configuration exclusions. Update the cluster voting config exclusions - by node IDs or node names. By default, if there are more than three master-eligible - nodes in the cluster and you remove fewer than half of the master-eligible nodes - in the cluster at once, the voting configuration automatically shrinks. If you - want to shrink the voting configuration to contain fewer than three nodes or - to remove half or more of the master-eligible nodes in the cluster at once, use - this API to remove departing nodes from the voting configuration manually. The - API adds an entry for each specified node to the cluster’s voting configuration - exclusions list. It then waits until the cluster has reconfigured its voting - configuration to exclude the specified nodes. Clusters should have no voting - configuration exclusions in normal operation. Once the excluded nodes have stopped, - clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. - This API waits for the nodes to be fully removed from the cluster before it returns. - If your cluster has voting configuration exclusions for nodes that you no longer - intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` - to clear the voting configuration exclusions without waiting for the nodes to - leave the cluster. A response to `POST /_cluster/voting_config_exclusions` with - an HTTP status code of 200 OK guarantees that the node has been removed from - the voting configuration and will not be reinstated until the voting configuration - exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. - If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response - with an HTTP status code other than 200 OK then the node may not have been removed - from the voting configuration. In that case, you may safely retry the call. NOTE: - Voting exclusions are required only when you remove at least half of the master-eligible - nodes from a cluster in a short time period. They are not required when removing - master-ineligible nodes or when removing fewer than half of the master-eligible - nodes. + .. raw:: html + +

Update voting configuration exclusions. + Update the cluster voting config exclusions by node IDs or node names. + By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks. + If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually. + The API adds an entry for each specified node to the cluster’s voting configuration exclusions list. + It then waits until the cluster has reconfigured its voting configuration to exclude the specified nodes.

+

Clusters should have no voting configuration exclusions in normal operation. + Once the excluded nodes have stopped, clear the voting configuration exclusions with DELETE /_cluster/voting_config_exclusions. + This API waits for the nodes to be fully removed from the cluster before it returns. + If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use DELETE /_cluster/voting_config_exclusions?wait_for_removal=false to clear the voting configuration exclusions without waiting for the nodes to leave the cluster.

+

A response to POST /_cluster/voting_config_exclusions with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling DELETE /_cluster/voting_config_exclusions. + If the call to POST /_cluster/voting_config_exclusions fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration. + In that case, you may safely retry the call.

+

NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. + They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes.

+ ``_ @@ -730,21 +743,23 @@ async def put_component_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a component template. Component templates are building blocks - for constructing index templates that specify index mappings, settings, and aliases. - An index template can be composed of multiple component templates. To use a component - template, specify it in an index template’s `composed_of` list. Component templates - are only applied to new data streams and indices as part of a matching index - template. Settings and mappings specified directly in the index template or the - create index request override any settings or mappings specified in a component - template. Component templates are only used during index creation. For data streams, - this includes data stream creation and the creation of a stream’s backing indices. - Changes to component templates do not affect existing indices, including a stream’s - backing indices. You can use C-style `/* *\\/` block comments in component templates. - You can include comments anywhere in the request body except before the opening - curly bracket. **Applying component templates** You cannot directly apply a component - template to a data stream or index. To be applied, a component template must - be included in an index template's `composed_of` list. + .. raw:: html + +

Create or update a component template. + Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.

+

An index template can be composed of multiple component templates. + To use a component template, specify it in an index template’s composed_of list. + Component templates are only applied to new data streams and indices as part of a matching index template.

+

Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.

+

Component templates are only used during index creation. + For data streams, this includes data stream creation and the creation of a stream’s backing indices. + Changes to component templates do not affect existing indices, including a stream’s backing indices.

+

You can use C-style /* *\\/ block comments in component templates. + You can include comments anywhere in the request body except before the opening curly bracket.

+

Applying component templates

+

You cannot directly apply a component template to a data stream or index. + To be applied, a component template must be included in an index template's composed_of list.

+ ``_ @@ -833,26 +848,23 @@ async def put_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the cluster settings. Configure and update dynamic settings on a running - cluster. You can also configure dynamic settings locally on an unstarted or shut - down node in `elasticsearch.yml`. Updates made with this API can be persistent, - which apply across cluster restarts, or transient, which reset after a cluster - restart. You can also reset transient or persistent settings by assigning them - a null value. If you configure the same setting using multiple methods, Elasticsearch - applies the settings in following order of precedence: 1) Transient setting; - 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. - For example, you can apply a transient setting to override a persistent setting - or `elasticsearch.yml` setting. However, a change to an `elasticsearch.yml` setting - will not override a defined transient or persistent setting. TIP: In Elastic - Cloud, use the user settings feature to configure all cluster settings. This - method automatically rejects unsafe settings that could break your cluster. If - you run Elasticsearch on your own hardware, use this API to configure dynamic - cluster settings. Only use `elasticsearch.yml` for static cluster settings and - node settings. The API doesn’t require a restart and ensures a setting’s value - is the same on all nodes. WARNING: Transient cluster settings are no longer recommended. - Use persistent cluster settings instead. If a cluster becomes unstable, transient - settings can clear unexpectedly, resulting in a potentially undesired cluster - configuration. + .. raw:: html + +

Update the cluster settings. + Configure and update dynamic settings on a running cluster. + You can also configure dynamic settings locally on an unstarted or shut down node in elasticsearch.yml.

+

Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart. + You can also reset transient or persistent settings by assigning them a null value.

+

If you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) elasticsearch.yml setting; 4) Default setting value. + For example, you can apply a transient setting to override a persistent setting or elasticsearch.yml setting. + However, a change to an elasticsearch.yml setting will not override a defined transient or persistent setting.

+

TIP: In Elastic Cloud, use the user settings feature to configure all cluster settings. This method automatically rejects unsafe settings that could break your cluster. + If you run Elasticsearch on your own hardware, use this API to configure dynamic cluster settings. + Only use elasticsearch.yml for static cluster settings and node settings. + The API doesn’t require a restart and ensures a setting’s value is the same on all nodes.

+

WARNING: Transient cluster settings are no longer recommended. Use persistent cluster settings instead. + If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration.

+ ``_ @@ -906,9 +918,12 @@ async def remote_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get remote cluster information. Get all of the configured remote cluster information. - This API returns connection and endpoint information keyed by the configured - remote cluster alias. + .. raw:: html + +

Get remote cluster information. + Get all of the configured remote cluster information. + This API returns connection and endpoint information keyed by the configured remote cluster alias.

+ ``_ """ @@ -953,25 +968,19 @@ async def reroute( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reroute the cluster. Manually change the allocation of individual shards in the - cluster. For example, a shard can be moved from one node to another explicitly, - an allocation can be canceled, and an unassigned shard can be explicitly allocated - to a specific node. It is important to note that after processing any reroute - commands Elasticsearch will perform rebalancing as normal (respecting the values - of settings such as `cluster.routing.rebalance.enable`) in order to remain in - a balanced state. For example, if the requested allocation includes moving a - shard from node1 to node2 then this may cause a shard to be moved from node2 - back to node1 to even things out. The cluster can be set to disable allocations - using the `cluster.routing.allocation.enable` setting. If allocations are disabled - then the only allocations that will be performed are explicit ones given using - the reroute command, and consequent allocations due to rebalancing. The cluster - will attempt to allocate a shard a maximum of `index.allocation.max_retries` - times in a row (defaults to `5`), before giving up and leaving the shard unallocated. - This scenario can be caused by structural problems such as having an analyzer - which refers to a stopwords file which doesn’t exist on all nodes. Once the problem - has been corrected, allocation can be manually retried by calling the reroute - API with the `?retry_failed` URI query parameter, which will attempt a single - retry round for these shards. + .. raw:: html + +

Reroute the cluster. + Manually change the allocation of individual shards in the cluster. + For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node.

+

It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as cluster.routing.rebalance.enable) in order to remain in a balanced state. + For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out.

+

The cluster can be set to disable allocations using the cluster.routing.allocation.enable setting. + If allocations are disabled then the only allocations that will be performed are explicit ones given using the reroute command, and consequent allocations due to rebalancing.

+

The cluster will attempt to allocate a shard a maximum of index.allocation.max_retries times in a row (defaults to 5), before giving up and leaving the shard unallocated. + This scenario can be caused by structural problems such as having an analyzer which refers to a stopwords file which doesn’t exist on all nodes.

+

Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the ?retry_failed URI query parameter, which will attempt a single retry round for these shards.

+ ``_ @@ -1060,26 +1069,23 @@ async def state( wait_for_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the cluster state. Get comprehensive information about the state of the cluster. - The cluster state is an internal data structure which keeps track of a variety - of information needed by every node, including the identity and attributes of - the other nodes in the cluster; cluster-wide settings; index metadata, including - the mapping and settings for each index; the location and status of every shard - copy in the cluster. The elected master node ensures that every node in the cluster - has a copy of the same cluster state. This API lets you retrieve a representation - of this internal state for debugging or diagnostic purposes. You may need to - consult the Elasticsearch source code to determine the precise meaning of the - response. By default the API will route requests to the elected master node since - this node is the authoritative source of cluster states. You can also retrieve - the cluster state held on the node handling the API request by adding the `?local=true` - query parameter. Elasticsearch may need to expend significant effort to compute - a response to this API in larger clusters, and the response may comprise a very - large quantity of data. If you use this API repeatedly, your cluster may become - unstable. WARNING: The response is a representation of an internal data structure. - Its format is not subject to the same compatibility guarantees as other more - stable APIs and may change from version to version. Do not query this API using - external monitoring tools. Instead, obtain the information you require using - other more stable cluster APIs. + .. raw:: html + +

Get the cluster state. + Get comprehensive information about the state of the cluster.

+

The cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster.

+

The elected master node ensures that every node in the cluster has a copy of the same cluster state. + This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes. + You may need to consult the Elasticsearch source code to determine the precise meaning of the response.

+

By default the API will route requests to the elected master node since this node is the authoritative source of cluster states. + You can also retrieve the cluster state held on the node handling the API request by adding the ?local=true query parameter.

+

Elasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data. + If you use this API repeatedly, your cluster may become unstable.

+

WARNING: The response is a representation of an internal data structure. + Its format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version. + Do not query this API using external monitoring tools. + Instead, obtain the information you require using other more stable cluster APIs.

+ ``_ @@ -1163,9 +1169,11 @@ async def stats( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get cluster statistics. Get basic index metrics (shard numbers, store size, memory - usage) and information about the current nodes that form the cluster (number, - roles, os, jvm versions, memory usage, cpu and installed plugins). + .. raw:: html + +

Get cluster statistics. + Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins).

+ ``_ diff --git a/elasticsearch/_async/client/connector.py b/elasticsearch/_async/client/connector.py index e83cbaa53..2f5080821 100644 --- a/elasticsearch/_async/client/connector.py +++ b/elasticsearch/_async/client/connector.py @@ -43,8 +43,11 @@ async def check_in( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Check in a connector. Update the `last_seen` field in the connector and set it - to the current timestamp. + .. raw:: html + +

Check in a connector.

+

Update the last_seen field in the connector and set it to the current timestamp.

+ ``_ @@ -86,10 +89,14 @@ async def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a connector. Removes a connector and associated sync jobs. This is a destructive - action that is not recoverable. NOTE: This action doesn’t delete any API keys, - ingest pipelines, or data indices associated with the connector. These need to - be removed manually. + .. raw:: html + +

Delete a connector.

+

Removes a connector and associated sync jobs. + This is a destructive action that is not recoverable. + NOTE: This action doesn’t delete any API keys, ingest pipelines, or data indices associated with the connector. + These need to be removed manually.

+ ``_ @@ -134,7 +141,11 @@ async def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a connector. Get the details about a connector. + .. raw:: html + +

Get a connector.

+

Get the details about a connector.

+ ``_ @@ -229,8 +240,12 @@ async def last_sync( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector last sync stats. Update the fields related to the last sync - of a connector. This action is used for analytics and monitoring. + .. raw:: html + +

Update the connector last sync stats.

+

Update the fields related to the last sync of a connector. + This action is used for analytics and monitoring.

+ ``_ @@ -325,7 +340,11 @@ async def list( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Get all connectors. Get information about all connectors. + .. raw:: html + +

Get all connectors.

+

Get information about all connectors.

+ ``_ @@ -400,11 +419,13 @@ async def post( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a connector. Connectors are Elasticsearch integrations that bring content - from third-party data sources, which can be deployed on Elastic Cloud or hosted - on your own infrastructure. Elastic managed connectors (Native connectors) are - a managed service on Elastic Cloud. Self-managed connectors (Connector clients) - are self-managed on your infrastructure. + .. raw:: html + +

Create a connector.

+

Connectors are Elasticsearch integrations that bring content from third-party data sources, which can be deployed on Elastic Cloud or hosted on your own infrastructure. + Elastic managed connectors (Native connectors) are a managed service on Elastic Cloud. + Self-managed connectors (Connector clients) are self-managed on your infrastructure.

+ ``_ @@ -483,7 +504,10 @@ async def put( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a connector. + .. raw:: html + +

Create or update a connector.

+ ``_ @@ -553,10 +577,12 @@ async def sync_job_cancel( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Cancel a connector sync job. Cancel a connector sync job, which sets the status - to cancelling and updates `cancellation_requested_at` to the current time. The - connector service is then responsible for setting the status of connector sync - jobs to cancelled. + .. raw:: html + +

Cancel a connector sync job.

+

Cancel a connector sync job, which sets the status to cancelling and updates cancellation_requested_at to the current time. + The connector service is then responsible for setting the status of connector sync jobs to cancelled.

+ ``_ @@ -601,11 +627,13 @@ async def sync_job_check_in( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Check in a connector sync job. Check in a connector sync job and set the `last_seen` - field to the current time before updating it in the internal index. To sync data - using self-managed connectors, you need to deploy the Elastic connector service - on your own infrastructure. This service runs automatically on Elastic Cloud - for Elastic managed connectors. + .. raw:: html + +

Check in a connector sync job. + Check in a connector sync job and set the last_seen field to the current time before updating it in the internal index.

+

To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. + This service runs automatically on Elastic Cloud for Elastic managed connectors.

+ ``_ @@ -656,14 +684,16 @@ async def sync_job_claim( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Claim a connector sync job. This action updates the job status to `in_progress` - and sets the `last_seen` and `started_at` timestamps to the current time. Additionally, - it can set the `sync_cursor` property for the sync job. This API is not intended - for direct connector management by users. It supports the implementation of services - that utilize the connector protocol to communicate with Elasticsearch. To sync - data using self-managed connectors, you need to deploy the Elastic connector - service on your own infrastructure. This service runs automatically on Elastic - Cloud for Elastic managed connectors. + .. raw:: html + +

Claim a connector sync job. + This action updates the job status to in_progress and sets the last_seen and started_at timestamps to the current time. + Additionally, it can set the sync_cursor property for the sync job.

+

This API is not intended for direct connector management by users. + It supports the implementation of services that utilize the connector protocol to communicate with Elasticsearch.

+

To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. + This service runs automatically on Elastic Cloud for Elastic managed connectors.

+ ``_ @@ -720,8 +750,12 @@ async def sync_job_delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a connector sync job. Remove a connector sync job and its associated data. - This is a destructive action that is not recoverable. + .. raw:: html + +

Delete a connector sync job.

+

Remove a connector sync job and its associated data. + This is a destructive action that is not recoverable.

+ ``_ @@ -769,10 +803,13 @@ async def sync_job_error( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Set a connector sync job error. Set the `error` field for a connector sync job - and set its `status` to `error`. To sync data using self-managed connectors, - you need to deploy the Elastic connector service on your own infrastructure. - This service runs automatically on Elastic Cloud for Elastic managed connectors. + .. raw:: html + +

Set a connector sync job error. + Set the error field for a connector sync job and set its status to error.

+

To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. + This service runs automatically on Elastic Cloud for Elastic managed connectors.

+ ``_ @@ -823,7 +860,10 @@ async def sync_job_get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a connector sync job. + .. raw:: html + +

Get a connector sync job.

+ ``_ @@ -892,8 +932,11 @@ async def sync_job_list( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Get all connector sync jobs. Get information about all stored connector sync - jobs listed by their creation date in ascending order. + .. raw:: html + +

Get all connector sync jobs.

+

Get information about all stored connector sync jobs listed by their creation date in ascending order.

+ ``_ @@ -955,8 +998,11 @@ async def sync_job_post( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a connector sync job. Create a connector sync job document in the internal - index and initialize its counters and timestamps with default values. + .. raw:: html + +

Create a connector sync job.

+

Create a connector sync job document in the internal index and initialize its counters and timestamps with default values.

+ ``_ @@ -1024,12 +1070,15 @@ async def sync_job_update_stats( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Set the connector sync job stats. Stats include: `deleted_document_count`, `indexed_document_count`, - `indexed_document_volume`, and `total_document_count`. You can also update `last_seen`. - This API is mainly used by the connector service for updating sync job information. - To sync data using self-managed connectors, you need to deploy the Elastic connector - service on your own infrastructure. This service runs automatically on Elastic - Cloud for Elastic managed connectors. + .. raw:: html + +

Set the connector sync job stats. + Stats include: deleted_document_count, indexed_document_count, indexed_document_volume, and total_document_count. + You can also update last_seen. + This API is mainly used by the connector service for updating sync job information.

+

To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. + This service runs automatically on Elastic Cloud for Elastic managed connectors.

+ ``_ @@ -1108,8 +1157,11 @@ async def update_active_filtering( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Activate the connector draft filter. Activates the valid draft filtering for - a connector. + .. raw:: html + +

Activate the connector draft filter.

+

Activates the valid draft filtering for a connector.

+ ``_ @@ -1155,11 +1207,14 @@ async def update_api_key_id( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector API key ID. Update the `api_key_id` and `api_key_secret_id` - fields of a connector. You can specify the ID of the API key used for authorization - and the ID of the connector secret where the API key is stored. The connector - secret ID is required only for Elastic managed (native) connectors. Self-managed - connectors (connector clients) do not use this field. + .. raw:: html + +

Update the connector API key ID.

+

Update the api_key_id and api_key_secret_id fields of a connector. + You can specify the ID of the API key used for authorization and the ID of the connector secret where the API key is stored. + The connector secret ID is required only for Elastic managed (native) connectors. + Self-managed connectors (connector clients) do not use this field.

+ ``_ @@ -1214,8 +1269,11 @@ async def update_configuration( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector configuration. Update the configuration field in the connector - document. + .. raw:: html + +

Update the connector configuration.

+

Update the configuration field in the connector document.

+ ``_ @@ -1269,10 +1327,13 @@ async def update_error( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector error field. Set the error field for the connector. If the - error provided in the request body is non-null, the connector’s status is updated - to error. Otherwise, if the error is reset to null, the connector status is updated - to connected. + .. raw:: html + +

Update the connector error field.

+

Set the error field for the connector. + If the error provided in the request body is non-null, the connector’s status is updated to error. + Otherwise, if the error is reset to null, the connector status is updated to connected.

+ ``_ @@ -1325,14 +1386,22 @@ async def update_features( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector features. Update the connector features in the connector - document. This API can be used to control the following aspects of a connector: - * document-level security * incremental syncs * advanced sync rules * basic sync - rules Normally, the running connector service automatically manages these features. - However, you can use this API to override the default behavior. To sync data - using self-managed connectors, you need to deploy the Elastic connector service - on your own infrastructure. This service runs automatically on Elastic Cloud - for Elastic managed connectors. + .. raw:: html + +

Update the connector features. + Update the connector features in the connector document. + This API can be used to control the following aspects of a connector:

+
    +
  • document-level security
  • +
  • incremental syncs
  • +
  • advanced sync rules
  • +
  • basic sync rules
  • +
+

Normally, the running connector service automatically manages these features. + However, you can use this API to override the default behavior.

+

To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. + This service runs automatically on Elastic Cloud for Elastic managed connectors.

+ ``_ @@ -1387,10 +1456,13 @@ async def update_filtering( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector filtering. Update the draft filtering configuration of a - connector and marks the draft validation state as edited. The filtering draft - is activated once validated by the running Elastic connector service. The filtering - property is used to configure sync rules (both basic and advanced) for a connector. + .. raw:: html + +

Update the connector filtering.

+

Update the draft filtering configuration of a connector and marks the draft validation state as edited. + The filtering draft is activated once validated by the running Elastic connector service. + The filtering property is used to configure sync rules (both basic and advanced) for a connector.

+ ``_ @@ -1447,8 +1519,11 @@ async def update_filtering_validation( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector draft filtering validation. Update the draft filtering validation - info for a connector. + .. raw:: html + +

Update the connector draft filtering validation.

+

Update the draft filtering validation info for a connector.

+ ``_ @@ -1501,8 +1576,11 @@ async def update_index_name( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector index name. Update the `index_name` field of a connector, - specifying the index where the data ingested by the connector is stored. + .. raw:: html + +

Update the connector index name.

+

Update the index_name field of a connector, specifying the index where the data ingested by the connector is stored.

+ ``_ @@ -1556,7 +1634,10 @@ async def update_name( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector name and description. + .. raw:: html + +

Update the connector name and description.

+ ``_ @@ -1610,7 +1691,10 @@ async def update_native( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector is_native flag. + .. raw:: html + +

Update the connector is_native flag.

+ ``_ @@ -1663,8 +1747,11 @@ async def update_pipeline( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector pipeline. When you create a new connector, the configuration - of an ingest pipeline is populated with default settings. + .. raw:: html + +

Update the connector pipeline.

+

When you create a new connector, the configuration of an ingest pipeline is populated with default settings.

+ ``_ @@ -1717,7 +1804,10 @@ async def update_scheduling( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector scheduling. + .. raw:: html + +

Update the connector scheduling.

+ ``_ @@ -1770,7 +1860,10 @@ async def update_service_type( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector service type. + .. raw:: html + +

Update the connector service type.

+ ``_ @@ -1830,7 +1923,10 @@ async def update_status( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector status. + .. raw:: html + +

Update the connector status.

+ ``_ diff --git a/elasticsearch/_async/client/dangling_indices.py b/elasticsearch/_async/client/dangling_indices.py index 4f0fe7c82..d7b5d5597 100644 --- a/elasticsearch/_async/client/dangling_indices.py +++ b/elasticsearch/_async/client/dangling_indices.py @@ -39,10 +39,12 @@ async def delete_dangling_index( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a dangling index. If Elasticsearch encounters index data that is absent - from the current cluster state, those indices are considered to be dangling. - For example, this can happen if you delete more than `cluster.indices.tombstones.size` - indices while an Elasticsearch node is offline. + .. raw:: html + +

Delete a dangling index. + If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. + For example, this can happen if you delete more than cluster.indices.tombstones.size indices while an Elasticsearch node is offline.

+ ``_ @@ -98,10 +100,12 @@ async def import_dangling_index( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Import a dangling index. If Elasticsearch encounters index data that is absent - from the current cluster state, those indices are considered to be dangling. - For example, this can happen if you delete more than `cluster.indices.tombstones.size` - indices while an Elasticsearch node is offline. + .. raw:: html + +

Import a dangling index.

+

If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. + For example, this can happen if you delete more than cluster.indices.tombstones.size indices while an Elasticsearch node is offline.

+ ``_ @@ -156,11 +160,13 @@ async def list_dangling_indices( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the dangling indices. If Elasticsearch encounters index data that is absent - from the current cluster state, those indices are considered to be dangling. - For example, this can happen if you delete more than `cluster.indices.tombstones.size` - indices while an Elasticsearch node is offline. Use this API to list dangling - indices, which you can then import or delete. + .. raw:: html + +

Get the dangling indices.

+

If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. + For example, this can happen if you delete more than cluster.indices.tombstones.size indices while an Elasticsearch node is offline.

+

Use this API to list dangling indices, which you can then import or delete.

+ ``_ """ diff --git a/elasticsearch/_async/client/enrich.py b/elasticsearch/_async/client/enrich.py index 73ba3c227..83161c9dd 100644 --- a/elasticsearch/_async/client/enrich.py +++ b/elasticsearch/_async/client/enrich.py @@ -37,7 +37,11 @@ async def delete_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete an enrich policy. Deletes an existing enrich policy and its enrich index. + .. raw:: html + +

Delete an enrich policy. + Deletes an existing enrich policy and its enrich index.

+ ``_ @@ -82,7 +86,11 @@ async def execute_policy( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Run an enrich policy. Create the enrich index for an existing enrich policy. + .. raw:: html + +

Run an enrich policy. + Create the enrich index for an existing enrich policy.

+ ``_ @@ -130,7 +138,11 @@ async def get_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get an enrich policy. Returns information about an enrich policy. + .. raw:: html + +

Get an enrich policy. + Returns information about an enrich policy.

+ ``_ @@ -184,7 +196,11 @@ async def put_policy( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create an enrich policy. Creates an enrich policy. + .. raw:: html + +

Create an enrich policy. + Creates an enrich policy.

+ ``_ @@ -241,8 +257,11 @@ async def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get enrich stats. Returns enrich coordinator statistics and information about - enrich policies that are currently executing. + .. raw:: html + +

Get enrich stats. + Returns enrich coordinator statistics and information about enrich policies that are currently executing.

+ ``_ diff --git a/elasticsearch/_async/client/eql.py b/elasticsearch/_async/client/eql.py index 17c896e12..871c37bcc 100644 --- a/elasticsearch/_async/client/eql.py +++ b/elasticsearch/_async/client/eql.py @@ -36,8 +36,12 @@ async def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete an async EQL search. Delete an async EQL search or a stored synchronous - EQL search. The API also deletes results for the search. + .. raw:: html + +

Delete an async EQL search. + Delete an async EQL search or a stored synchronous EQL search. + The API also deletes results for the search.

+ ``_ @@ -83,8 +87,11 @@ async def get( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Get async EQL search results. Get the current status and available results for - an async EQL search or a stored synchronous EQL search. + .. raw:: html + +

Get async EQL search results. + Get the current status and available results for an async EQL search or a stored synchronous EQL search.

+ ``_ @@ -134,8 +141,11 @@ async def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the async EQL status. Get the current status for an async EQL search or a - stored synchronous EQL search without returning results. + .. raw:: html + +

Get the async EQL status. + Get the current status for an async EQL search or a stored synchronous EQL search without returning results.

+ ``_ @@ -229,9 +239,12 @@ async def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get EQL search results. Returns search results for an Event Query Language (EQL) - query. EQL assumes each document in a data stream or index corresponds to an - event. + .. raw:: html + +

Get EQL search results. + Returns search results for an Event Query Language (EQL) query. + EQL assumes each document in a data stream or index corresponds to an event.

+ ``_ diff --git a/elasticsearch/_async/client/esql.py b/elasticsearch/_async/client/esql.py index 7da230d9d..bfdda5bbf 100644 --- a/elasticsearch/_async/client/esql.py +++ b/elasticsearch/_async/client/esql.py @@ -73,10 +73,12 @@ async def async_query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Run an async ES|QL query. Asynchronously run an ES|QL (Elasticsearch query language) - query, monitor its progress, and retrieve results when they become available. - The API accepts the same parameters and request body as the synchronous query - API, along with additional async related properties. + .. raw:: html + +

Run an async ES|QL query. + Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available.

+

The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties.

+ ``_ @@ -183,11 +185,17 @@ async def async_query_delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete an async ES|QL query. If the query is still running, it is cancelled. - Otherwise, the stored results are deleted. If the Elasticsearch security features - are enabled, only the following users can use this API to delete a query: * The - authenticated user that submitted the original query request * Users with the - `cancel_task` cluster privilege + .. raw:: html + +

Delete an async ES|QL query. + If the query is still running, it is cancelled. + Otherwise, the stored results are deleted.

+

If the Elasticsearch security features are enabled, only the following users can use this API to delete a query:

+
    +
  • The authenticated user that submitted the original query request
  • +
  • Users with the cancel_task cluster privilege
  • +
+ ``_ @@ -235,10 +243,12 @@ async def async_query_get( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Get async ES|QL query results. Get the current status and available results or - stored results for an ES|QL asynchronous query. If the Elasticsearch security - features are enabled, only the user who first submitted the ES|QL query can retrieve - the results using this API. + .. raw:: html + +

Get async ES|QL query results. + Get the current status and available results or stored results for an ES|QL asynchronous query. + If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API.

+ ``_ @@ -331,8 +341,11 @@ async def query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Run an ES|QL query. Get search results for an ES|QL (Elasticsearch query language) - query. + .. raw:: html + +

Run an ES|QL query. + Get search results for an ES|QL (Elasticsearch query language) query.

+ ``_ diff --git a/elasticsearch/_async/client/features.py b/elasticsearch/_async/client/features.py index d3391f777..7615ddddf 100644 --- a/elasticsearch/_async/client/features.py +++ b/elasticsearch/_async/client/features.py @@ -36,17 +36,17 @@ async def get_features( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the features. Get a list of features that can be included in snapshots using - the `feature_states` field when creating a snapshot. You can use this API to - determine which feature states to include when taking a snapshot. By default, - all feature states are included in a snapshot if that snapshot includes the global - state, or none if it does not. A feature state includes one or more system indices - necessary for a given feature to function. In order to ensure data integrity, - all system indices that comprise a feature state are snapshotted and restored - together. The features listed by this API are a combination of built-in features - and features defined by plugins. In order for a feature state to be listed in - this API and recognized as a valid feature state by the create snapshot API, - the plugin that defines that feature must be installed on the master node. + .. raw:: html + +

Get the features. + Get a list of features that can be included in snapshots using the feature_states field when creating a snapshot. + You can use this API to determine which feature states to include when taking a snapshot. + By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not.

+

A feature state includes one or more system indices necessary for a given feature to function. + In order to ensure data integrity, all system indices that comprise a feature state are snapshotted and restored together.

+

The features listed by this API are a combination of built-in features and features defined by plugins. + In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node.

+ ``_ @@ -87,20 +87,20 @@ async def reset_features( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Reset the features. Clear all of the state information stored in system indices - by Elasticsearch features, including the security and machine learning indices. - WARNING: Intended for development and testing use only. Do not reset features - on a production cluster. Return a cluster to the same state as a new installation - by resetting the feature state for all Elasticsearch features. This deletes all - state information stored in system indices. The response code is HTTP 200 if - the state is successfully reset for all features. It is HTTP 500 if the reset - operation failed for any feature. Note that select features might provide a way - to reset particular system indices. Using this API resets all features, both - those that are built-in and implemented as plugins. To list the features that - will be affected, use the get features API. IMPORTANT: The features installed - on the node you submit this request to are the features that will be reset. Run - on the master node if you have any doubts about which plugins are installed on - individual nodes. + .. raw:: html + +

Reset the features. + Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices.

+

WARNING: Intended for development and testing use only. Do not reset features on a production cluster.

+

Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. + This deletes all state information stored in system indices.

+

The response code is HTTP 200 if the state is successfully reset for all features. + It is HTTP 500 if the reset operation failed for any feature.

+

Note that select features might provide a way to reset particular system indices. + Using this API resets all features, both those that are built-in and implemented as plugins.

+

To list the features that will be affected, use the get features API.

+

IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes.

+ ``_ diff --git a/elasticsearch/_async/client/fleet.py b/elasticsearch/_async/client/fleet.py index ba6637b1d..acdb046de 100644 --- a/elasticsearch/_async/client/fleet.py +++ b/elasticsearch/_async/client/fleet.py @@ -46,8 +46,10 @@ async def global_checkpoints( wait_for_index: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current global checkpoints for an index. This API is design for internal - use by the fleet server project. + .. raw:: html + +

Returns the current global checkpoints for an index. This API is design for internal use by the fleet server project.

+ ``_ @@ -132,10 +134,12 @@ async def msearch( wait_for_checkpoints: t.Optional[t.Sequence[int]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes several [fleet searches](https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-search.html) - with a single API request. The API follows the same structure as the [multi search](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html) - API. However, similar to the fleet search API, it supports the wait_for_checkpoints - parameter. + .. raw:: html + +

Executes several fleet searches with a single API request. + The API follows the same structure as the multi search API. However, similar to the fleet search API, it + supports the wait_for_checkpoints parameter.

+ :param searches: :param index: A single target to search. If the target is an index alias, it @@ -378,9 +382,11 @@ async def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - The purpose of the fleet search api is to provide a search api where the search - will only be executed after provided checkpoint has been processed and is visible - for searches inside of Elasticsearch. + .. raw:: html + +

The purpose of the fleet search api is to provide a search api where the search will only be executed + after provided checkpoint has been processed and is visible for searches inside of Elasticsearch.

+ :param index: A single target to search. If the target is an index alias, it must resolve to a single index. diff --git a/elasticsearch/_async/client/graph.py b/elasticsearch/_async/client/graph.py index e713aa26b..5b86970b1 100644 --- a/elasticsearch/_async/client/graph.py +++ b/elasticsearch/_async/client/graph.py @@ -45,14 +45,15 @@ async def explore( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Explore graph analytics. Extract and summarize information about the documents - and terms in an Elasticsearch data stream or index. The easiest way to understand - the behavior of this API is to use the Graph UI to explore connections. An initial - request to the `_explore` API contains a seed query that identifies the documents - of interest and specifies the fields that define the vertices and connections - you want to include in the graph. Subsequent requests enable you to spider out - from one more vertices of interest. You can exclude vertices that have already - been returned. + .. raw:: html + +

Explore graph analytics. + Extract and summarize information about the documents and terms in an Elasticsearch data stream or index. + The easiest way to understand the behavior of this API is to use the Graph UI to explore connections. + An initial request to the _explore API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph. + Subsequent requests enable you to spider out from one more vertices of interest. + You can exclude vertices that have already been returned.

+ ``_ diff --git a/elasticsearch/_async/client/ilm.py b/elasticsearch/_async/client/ilm.py index f4adf9473..482bb3fdd 100644 --- a/elasticsearch/_async/client/ilm.py +++ b/elasticsearch/_async/client/ilm.py @@ -38,9 +38,11 @@ async def delete_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a lifecycle policy. You cannot delete policies that are currently in use. - If the policy is being used to manage any indices, the request fails and returns - an error. + .. raw:: html + +

Delete a lifecycle policy. + You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error.

+ ``_ @@ -92,11 +94,13 @@ async def explain_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Explain the lifecycle state. Get the current lifecycle status for one or more - indices. For data streams, the API retrieves the current lifecycle status for - the stream's backing indices. The response indicates when the index entered each - lifecycle state, provides the definition of the running phase, and information - about any failures. + .. raw:: html + +

Explain the lifecycle state. + Get the current lifecycle status for one or more indices. + For data streams, the API retrieves the current lifecycle status for the stream's backing indices.

+

The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures.

+ ``_ @@ -154,7 +158,10 @@ async def get_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get lifecycle policies. + .. raw:: html + +

Get lifecycle policies.

+ ``_ @@ -205,7 +212,11 @@ async def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the ILM status. Get the current index lifecycle management status. + .. raw:: html + +

Get the ILM status. + Get the current index lifecycle management status.

+ ``_ """ @@ -246,18 +257,22 @@ async def migrate_to_data_tiers( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, - composable, and component templates from using custom node attributes and attribute-based - allocation filters to using data tiers. Optionally, delete one legacy index template. - Using node roles enables ILM to automatically move the indices between data tiers. - Migrating away from custom node attributes routing can be manually performed. - This API provides an automated way of performing three out of the four manual - steps listed in the migration guide: 1. Stop setting the custom hot attribute - on new indices. 1. Remove custom allocation settings from existing ILM policies. - 1. Replace custom allocation settings from existing indices with the corresponding - tier preference. ILM must be stopped before performing the migration. Use the - stop ILM and get ILM status APIs to wait until the reported operation mode is - `STOPPED`. + .. raw:: html + +

Migrate to data tiers routing. + Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. + Optionally, delete one legacy index template. + Using node roles enables ILM to automatically move the indices between data tiers.

+

Migrating away from custom node attributes routing can be manually performed. + This API provides an automated way of performing three out of the four manual steps listed in the migration guide:

+
    +
  1. Stop setting the custom hot attribute on new indices.
  2. +
  3. Remove custom allocation settings from existing ILM policies.
  4. +
  5. Replace custom allocation settings from existing indices with the corresponding tier preference.
  6. +
+

ILM must be stopped before performing the migration. + Use the stop ILM and get ILM status APIs to wait until the reported operation mode is STOPPED.

+ ``_ @@ -317,21 +332,20 @@ async def move_to_step( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Move to a lifecycle step. Manually move an index into a specific step in the - lifecycle policy and run that step. WARNING: This operation can result in the - loss of data. Manually moving an index into a specific step runs that step even - if it has already been performed. This is a potentially destructive action and - this should be considered an expert level API. You must specify both the current - step and the step to be executed in the body of the request. The request will - fail if the current step does not match the step currently running for the index - This is to prevent the index from being moved from an unexpected step into the - next step. When specifying the target (`next_step`) to which the index will be - moved, either the name or both the action and name fields are optional. If only - the phase is specified, the index will move to the first step of the first action - in the target phase. If the phase and action are specified, the index will move - to the first step of the specified action in the specified phase. Only actions - specified in the ILM policy are considered valid. An index cannot move to a step - that is not part of its policy. + .. raw:: html + +

Move to a lifecycle step. + Manually move an index into a specific step in the lifecycle policy and run that step.

+

WARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API.

+

You must specify both the current step and the step to be executed in the body of the request. + The request will fail if the current step does not match the step currently running for the index + This is to prevent the index from being moved from an unexpected step into the next step.

+

When specifying the target (next_step) to which the index will be moved, either the name or both the action and name fields are optional. + If only the phase is specified, the index will move to the first step of the first action in the target phase. + If the phase and action are specified, the index will move to the first step of the specified action in the specified phase. + Only actions specified in the ILM policy are considered valid. + An index cannot move to a step that is not part of its policy.

+ ``_ @@ -394,9 +408,12 @@ async def put_lifecycle( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a lifecycle policy. If the specified policy exists, it is replaced - and the policy version is incremented. NOTE: Only the latest version of the policy - is stored, you cannot revert to previous versions. + .. raw:: html + +

Create or update a lifecycle policy. + If the specified policy exists, it is replaced and the policy version is incremented.

+

NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions.

+ ``_ @@ -455,8 +472,12 @@ async def remove_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Remove policies from an index. Remove the assigned lifecycle policies from an - index or a data stream's backing indices. It also stops managing the indices. + .. raw:: html + +

Remove policies from an index. + Remove the assigned lifecycle policies from an index or a data stream's backing indices. + It also stops managing the indices.

+ ``_ @@ -496,10 +517,13 @@ async def retry( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retry a policy. Retry running the lifecycle policy for an index that is in the - ERROR step. The API sets the policy back to the step where the error occurred - and runs the step. Use the explain lifecycle state API to determine whether an - index is in the ERROR step. + .. raw:: html + +

Retry a policy. + Retry running the lifecycle policy for an index that is in the ERROR step. + The API sets the policy back to the step where the error occurred and runs the step. + Use the explain lifecycle state API to determine whether an index is in the ERROR step.

+ ``_ @@ -541,9 +565,13 @@ async def start( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Start the ILM plugin. Start the index lifecycle management plugin if it is currently - stopped. ILM is started automatically when the cluster is formed. Restarting - ILM is necessary only when it has been stopped using the stop ILM API. + .. raw:: html + +

Start the ILM plugin. + Start the index lifecycle management plugin if it is currently stopped. + ILM is started automatically when the cluster is formed. + Restarting ILM is necessary only when it has been stopped using the stop ILM API.

+ ``_ @@ -590,12 +618,14 @@ async def stop( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Stop the ILM plugin. Halt all lifecycle management operations and stop the index - lifecycle management plugin. This is useful when you are performing maintenance - on the cluster and need to prevent ILM from performing any actions on your indices. - The API returns as soon as the stop request has been acknowledged, but the plugin - might continue to run until in-progress operations complete and the plugin can - be safely stopped. Use the get ILM status API to check whether ILM is running. + .. raw:: html + +

Stop the ILM plugin. + Halt all lifecycle management operations and stop the index lifecycle management plugin. + This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices.

+

The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. + Use the get ILM status API to check whether ILM is running.

+ ``_ diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index 0725a88e5..2edac7ded 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -55,8 +55,11 @@ async def add_block( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Add an index block. Limits the operations allowed on an index by blocking specific - operation types. + .. raw:: html + +

Add an index block. + Limits the operations allowed on an index by blocking specific operation types.

+ ``_ @@ -143,12 +146,15 @@ async def analyze( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get tokens from text analysis. The analyze API performs analysis on a text string - and returns the resulting tokens. Generating excessive amount of tokens may cause - a node to run out of memory. The `index.analyze.max_token_count` setting enables - you to limit the number of tokens that can be produced. If more than this limit - of tokens gets generated, an error occurs. The `_analyze` endpoint without a - specified index will always use `10000` as its limit. + .. raw:: html + +

Get tokens from text analysis. + The analyze API performs analysis on a text string and returns the resulting tokens.

+

Generating excessive amount of tokens may cause a node to run out of memory. + The index.analyze.max_token_count setting enables you to limit the number of tokens that can be produced. + If more than this limit of tokens gets generated, an error occurs. + The _analyze endpoint without a specified index will always use 10000 as its limit.

+ ``_ @@ -249,11 +255,15 @@ async def clear_cache( request: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear the cache. Clear the cache of one or more indices. For data streams, the - API clears the caches of the stream's backing indices. By default, the clear - cache API clears all caches. To clear only specific caches, use the `fielddata`, - `query`, or `request` parameters. To clear the cache only of specific fields, - use the `fields` parameter. + .. raw:: html + +

Clear the cache. + Clear the cache of one or more indices. + For data streams, the API clears the caches of the stream's backing indices.

+

By default, the clear cache API clears all caches. + To clear only specific caches, use the fielddata, query, or request parameters. + To clear the cache only of specific fields, use the fields parameter.

+ ``_ @@ -338,44 +348,44 @@ async def clone( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clone an index. Clone an existing index into a new index. Each original primary - shard is cloned into a new primary shard in the new index. IMPORTANT: Elasticsearch - does not apply index templates to the resulting index. The API also does not - copy index metadata from the original index. Index metadata includes aliases, - index lifecycle management phase definitions, and cross-cluster replication (CCR) - follower information. For example, if you clone a CCR follower index, the resulting - clone will not be a follower index. The clone API copies most index settings - from the source index to the resulting index, with the exception of `index.number_of_replicas` - and `index.auto_expand_replicas`. To set the number of replicas in the resulting - index, configure these settings in the clone request. Cloning works as follows: - * First, it creates a new target index with the same definition as the source - index. * Then it hard-links segments from the source index into the target index. - If the file system does not support hard-linking, all segments are copied into - the new index, which is a much more time consuming process. * Finally, it recovers - the target index as though it were a closed index which had just been re-opened. - IMPORTANT: Indices can only be cloned if they meet the following requirements: - * The index must be marked as read-only and have a cluster health status of green. - * The target index must not exist. * The source index must have the same number - of primary shards as the target index. * The node handling the clone process - must have sufficient free disk space to accommodate a second copy of the existing - index. The current write index on a data stream cannot be cloned. In order to - clone the current write index, the data stream must first be rolled over so that - a new write index is created and then the previous write index can be cloned. - NOTE: Mappings cannot be specified in the `_clone` request. The mappings of the - source index will be used for the target index. **Monitor the cloning process** - The cloning process can be monitored with the cat recovery API or the cluster - health API can be used to wait until all primary shards have been allocated by - setting the `wait_for_status` parameter to `yellow`. The `_clone` API returns - as soon as the target index has been added to the cluster state, before any shards - have been allocated. At this point, all shards are in the state unassigned. If, - for any reason, the target index can't be allocated, its primary shard will remain - unassigned until it can be allocated on that node. Once the primary shard is - allocated, it moves to state initializing, and the clone process begins. When - the clone operation completes, the shard will become active. At that point, Elasticsearch - will try to allocate any replicas and may decide to relocate the primary shard - to another node. **Wait for active shards** Because the clone operation creates - a new index to clone the shards to, the wait for active shards setting on index - creation applies to the clone index action as well. + .. raw:: html + +

Clone an index. + Clone an existing index into a new index. + Each original primary shard is cloned into a new primary shard in the new index.

+

IMPORTANT: Elasticsearch does not apply index templates to the resulting index. + The API also does not copy index metadata from the original index. + Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. + For example, if you clone a CCR follower index, the resulting clone will not be a follower index.

+

The clone API copies most index settings from the source index to the resulting index, with the exception of index.number_of_replicas and index.auto_expand_replicas. + To set the number of replicas in the resulting index, configure these settings in the clone request.

+

Cloning works as follows:

+
    +
  • First, it creates a new target index with the same definition as the source index.
  • +
  • Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process.
  • +
  • Finally, it recovers the target index as though it were a closed index which had just been re-opened.
  • +
+

IMPORTANT: Indices can only be cloned if they meet the following requirements:

+
    +
  • The index must be marked as read-only and have a cluster health status of green.
  • +
  • The target index must not exist.
  • +
  • The source index must have the same number of primary shards as the target index.
  • +
  • The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index.
  • +
+

The current write index on a data stream cannot be cloned. + In order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned.

+

NOTE: Mappings cannot be specified in the _clone request. The mappings of the source index will be used for the target index.

+

Monitor the cloning process

+

The cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the wait_for_status parameter to yellow.

+

The _clone API returns as soon as the target index has been added to the cluster state, before any shards have been allocated. + At this point, all shards are in the state unassigned. + If, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node.

+

Once the primary shard is allocated, it moves to state initializing, and the clone process begins. + When the clone operation completes, the shard will become active. + At that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node.

+

Wait for active shards

+

Because the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well.

+ ``_ @@ -463,24 +473,23 @@ async def close( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Close an index. A closed index is blocked for read or write operations and does - not allow all operations that opened indices allow. It is not possible to index - documents or to search for documents in a closed index. Closed indices do not - have to maintain internal data structures for indexing or searching documents, - which results in a smaller overhead on the cluster. When opening or closing an - index, the master node is responsible for restarting the index shards to reflect - the new state of the index. The shards will then go through the normal recovery - process. The data of opened and closed indices is automatically replicated by - the cluster to ensure that enough shard copies are safely kept around at all - times. You can open and close multiple indices. An error is thrown if the request - explicitly refers to a missing index. This behaviour can be turned off using - the `ignore_unavailable=true` parameter. By default, you must explicitly name - the indices you are opening or closing. To open or close indices with `_all`, - `*`, or other wildcard expressions, change the` action.destructive_requires_name` - setting to `false`. This setting can also be changed with the cluster update - settings API. Closed indices consume a significant amount of disk-space which - can cause problems in managed environments. Closing indices can be turned off - with the cluster settings API by setting `cluster.indices.close.enable` to `false`. + .. raw:: html + +

Close an index. + A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. + It is not possible to index documents or to search for documents in a closed index. + Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster.

+

When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. + The shards will then go through the normal recovery process. + The data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times.

+

You can open and close multiple indices. + An error is thrown if the request explicitly refers to a missing index. + This behaviour can be turned off using the ignore_unavailable=true parameter.

+

By default, you must explicitly name the indices you are opening or closing. + To open or close indices with _all, *, or other wildcard expressions, change the action.destructive_requires_name setting to false. This setting can also be changed with the cluster update settings API.

+

Closed indices consume a significant amount of disk-space which can cause problems in managed environments. + Closing indices can be turned off with the cluster settings API by setting cluster.indices.close.enable to false.

+ ``_ @@ -561,26 +570,27 @@ async def create( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create an index. You can use the create index API to add a new index to an Elasticsearch - cluster. When creating an index, you can specify the following: * Settings for - the index. * Mappings for fields in the index. * Index aliases **Wait for active - shards** By default, index creation will only return a response to the client - when the primary copies of each shard have been started, or the request times - out. The index creation response will indicate what happened. For example, `acknowledged` - indicates whether the index was successfully created in the cluster, `while shards_acknowledged` - indicates whether the requisite number of shard copies were started for each - shard in the index before timing out. Note that it is still possible for either - `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation - to be successful. These values simply indicate whether the operation completed - before the timeout. If `acknowledged` is false, the request timed out before - the cluster state was updated with the newly created index, but it probably will - be created sometime soon. If `shards_acknowledged` is false, then the request - timed out before the requisite number of shards were started (by default just - the primaries), even if the cluster state was successfully updated to reflect - the newly created index (that is to say, `acknowledged` is `true`). You can change - the default of only waiting for the primary shards to start through the index - setting `index.write.wait_for_active_shards`. Note that changing this setting - will also affect the `wait_for_active_shards` value on all subsequent write operations. + .. raw:: html + +

Create an index. + You can use the create index API to add a new index to an Elasticsearch cluster. + When creating an index, you can specify the following:

+
    +
  • Settings for the index.
  • +
  • Mappings for fields in the index.
  • +
  • Index aliases
  • +
+

Wait for active shards

+

By default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out. + The index creation response will indicate what happened. + For example, acknowledged indicates whether the index was successfully created in the cluster, while shards_acknowledged indicates whether the requisite number of shard copies were started for each shard in the index before timing out. + Note that it is still possible for either acknowledged or shards_acknowledged to be false, but for the index creation to be successful. + These values simply indicate whether the operation completed before the timeout. + If acknowledged is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon. + If shards_acknowledged is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, acknowledged is true).

+

You can change the default of only waiting for the primary shards to start through the index setting index.write.wait_for_active_shards. + Note that changing this setting will also affect the wait_for_active_shards value on all subsequent write operations.

+ ``_ @@ -653,8 +663,12 @@ async def create_data_stream( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a data stream. Creates a data stream. You must have a matching index template - with data stream enabled. + .. raw:: html + +

Create a data stream. + Creates a data stream. + You must have a matching index template with data stream enabled.

+ ``_ @@ -715,7 +729,11 @@ async def data_streams_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get data stream stats. Retrieves statistics for one or more data streams. + .. raw:: html + +

Get data stream stats. + Retrieves statistics for one or more data streams.

+ ``_ @@ -776,11 +794,15 @@ async def delete( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete indices. Deleting an index deletes its documents, shards, and metadata. - It does not delete related Kibana components, such as data views, visualizations, - or dashboards. You cannot delete the current write index of a data stream. To - delete the index, you must roll over the data stream so a new write index is - created. You can then use the delete index API to delete the previous write index. + .. raw:: html + +

Delete indices. + Deleting an index deletes its documents, shards, and metadata. + It does not delete related Kibana components, such as data views, visualizations, or dashboards.

+

You cannot delete the current write index of a data stream. + To delete the index, you must roll over the data stream so a new write index is created. + You can then use the delete index API to delete the previous write index.

+ ``_ @@ -850,7 +872,11 @@ async def delete_alias( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete an alias. Removes a data stream or index from an alias. + .. raw:: html + +

Delete an alias. + Removes a data stream or index from an alias.

+ ``_ @@ -914,8 +940,11 @@ async def delete_data_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete data stream lifecycles. Removes the data stream lifecycle from a data - stream, rendering it not managed by the data stream lifecycle. + .. raw:: html + +

Delete data stream lifecycles. + Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle.

+ ``_ @@ -975,7 +1004,11 @@ async def delete_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete data streams. Deletes one or more data streams and their backing indices. + .. raw:: html + +

Delete data streams. + Deletes one or more data streams and their backing indices.

+ ``_ @@ -1027,10 +1060,13 @@ async def delete_index_template( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete an index template. The provided may contain multiple - template names separated by a comma. If multiple template names are specified - then there is no wildcard support and the provided names should match completely - with existing templates. + .. raw:: html + +

Delete an index template. + The provided may contain multiple template names separated by a comma. If multiple template + names are specified then there is no wildcard support and the provided names should match completely with + existing templates.

+ ``_ @@ -1082,7 +1118,10 @@ async def delete_template( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a legacy index template. + .. raw:: html + +

Delete a legacy index template.

+ ``_ @@ -1145,16 +1184,16 @@ async def disk_usage( run_expensive_tasks: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Analyze the index disk usage. Analyze the disk usage of each field of an index - or data stream. This API might not support indices created in previous Elasticsearch - versions. The result of a small index can be inaccurate as some parts of an index - might not be analyzed by the API. NOTE: The total size of fields of the analyzed - shards of the index in the response is usually smaller than the index `store_size` - value because some small metadata files are ignored and some parts of data files - might not be scanned by the API. Since stored fields are stored together in a - compressed format, the sizes of stored fields are also estimates and can be inaccurate. - The stored size of the `_id` field is likely underestimated while the `_source` - field is overestimated. + .. raw:: html + +

Analyze the index disk usage. + Analyze the disk usage of each field of an index or data stream. + This API might not support indices created in previous Elasticsearch versions. + The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API.

+

NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index store_size value because some small metadata files are ignored and some parts of data files might not be scanned by the API. + Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. + The stored size of the _id field is likely underestimated while the _source field is overestimated.

+ ``_ @@ -1228,14 +1267,16 @@ async def downsample( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Downsample an index. Aggregate a time series (TSDS) index and store pre-computed - statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each - metric field grouped by a configured time interval. For example, a TSDS index - that contains metrics sampled every 10 seconds can be downsampled to an hourly - index. All documents within an hour interval are summarized and stored as a single - document in the downsample index. NOTE: Only indices in a time series data stream - are supported. Neither field nor document level security can be defined on the - source index. The source index must be read only (`index.blocks.write: true`). + .. raw:: html + +

Downsample an index. + Aggregate a time series (TSDS) index and store pre-computed statistical summaries (min, max, sum, value_count and avg) for each metric field grouped by a configured time interval. + For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. + All documents within an hour interval are summarized and stored as a single document in the downsample index.

+

NOTE: Only indices in a time series data stream are supported. + Neither field nor document level security can be defined on the source index. + The source index must be read only (index.blocks.write: true).

+ ``_ @@ -1303,7 +1344,11 @@ async def exists( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Check indices. Check if one or more indices, index aliases, or data streams exist. + .. raw:: html + +

Check indices. + Check if one or more indices, index aliases, or data streams exist.

+ ``_ @@ -1381,7 +1426,11 @@ async def exists_alias( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Check aliases. Checks if one or more data stream or index aliases exist. + .. raw:: html + +

Check aliases. + Checks if one or more data stream or index aliases exist.

+ ``_ @@ -1451,7 +1500,11 @@ async def exists_index_template( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Check index templates. Check whether index templates exist. + .. raw:: html + +

Check index templates. + Check whether index templates exist.

+ ``_ @@ -1500,11 +1553,13 @@ async def exists_template( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Check existence of index templates. Get information about whether index templates - exist. Index templates define settings, mappings, and aliases that can be applied - automatically to new indices. IMPORTANT: This documentation is about legacy index - templates, which are deprecated and will be replaced by the composable templates - introduced in Elasticsearch 7.8. + .. raw:: html + +

Check existence of index templates. + Get information about whether index templates exist. + Index templates define settings, mappings, and aliases that can be applied automatically to new indices.

+

IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

+ ``_ @@ -1558,10 +1613,11 @@ async def explain_data_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the status for a data stream lifecycle. Get information about an index or - data stream's current data stream lifecycle status, such as time since index - creation, time since rollover, the lifecycle configuration managing the index, - or any errors encountered during lifecycle execution. + .. raw:: html + +

Get the status for a data stream lifecycle. + Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution.

+ ``_ @@ -1623,13 +1679,15 @@ async def field_usage_stats( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Get field usage stats. Get field usage information for each shard and field of - an index. Field usage statistics are automatically captured when queries are - running on a cluster. A shard-level search request that accesses a given field, - even if multiple times during that request, is counted as a single use. The response - body reports the per-shard usage count of the data structures that back the fields - in the index. A given request will increment each count by a maximum value of - 1, even if the request accesses the same field multiple times. + .. raw:: html + +

Get field usage stats. + Get field usage information for each shard and field of an index. + Field usage statistics are automatically captured when queries are running on a cluster. + A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use.

+

The response body reports the per-shard usage count of the data structures that back the fields in the index. + A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times.

+ ``_ @@ -1708,22 +1766,18 @@ async def flush( wait_if_ongoing: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Flush data streams or indices. Flushing a data stream or index is the process - of making sure that any data that is currently only stored in the transaction - log is also permanently stored in the Lucene index. When restarting, Elasticsearch - replays any unflushed operations from the transaction log into the Lucene index - to bring it back into the state that it was in before the restart. Elasticsearch - automatically triggers flushes as needed, using heuristics that trade off the - size of the unflushed transaction log against the cost of performing each flush. - After each operation has been flushed it is permanently stored in the Lucene - index. This may mean that there is no need to maintain an additional copy of - it in the transaction log. The transaction log is made up of multiple files, - called generations, and Elasticsearch will delete any generation files when they - are no longer needed, freeing up disk space. It is also possible to trigger a - flush on one or more indices using the flush API, although it is rare for users - to need to call this API directly. If you call the flush API after indexing some - documents then a successful response indicates that Elasticsearch has flushed - all the documents that were indexed before the flush API was called. + .. raw:: html + +

Flush data streams or indices. + Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. + When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. + Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush.

+

After each operation has been flushed it is permanently stored in the Lucene index. + This may mean that there is no need to maintain an additional copy of it in the transaction log. + The transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space.

+

It is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly. + If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called.

+ ``_ @@ -1806,49 +1860,49 @@ async def forcemerge( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Force a merge. Perform the force merge operation on the shards of one or more - indices. For data streams, the API forces a merge on the shards of the stream's - backing indices. Merging reduces the number of segments in each shard by merging - some of them together and also frees up the space used by deleted documents. - Merging normally happens automatically, but sometimes it is useful to trigger - a merge manually. WARNING: We recommend force merging only a read-only index - (meaning the index is no longer receiving writes). When documents are updated - or deleted, the old version is not immediately removed but instead soft-deleted - and marked with a "tombstone". These soft-deleted documents are automatically - cleaned up during regular segment merges. But force merge can cause very large - (greater than 5 GB) segments to be produced, which are not eligible for regular - merges. So the number of soft-deleted documents can then grow rapidly, resulting - in higher disk usage and worse search performance. If you regularly force merge - an index receiving writes, this can also make snapshots more expensive, since - the new documents can't be backed up incrementally. **Blocks during a force merge** - Calls to this API block until the merge is complete (unless request contains - `wait_for_completion=false`). If the client connection is lost before completion - then the force merge process will continue in the background. Any new requests - to force merge the same indices will also block until the ongoing force merge - is complete. **Running force merge asynchronously** If the request contains `wait_for_completion=false`, - Elasticsearch performs some preflight checks, launches the request, and returns - a task you can use to get the status of the task. However, you can not cancel - this task as the force merge task is not cancelable. Elasticsearch creates a - record of this task as a document at `_tasks/`. When you are done with - a task, you should delete the task document so Elasticsearch can reclaim the - space. **Force merging multiple indices** You can force merge multiple indices - with a single request by targeting: * One or more data streams that contain multiple - backing indices * Multiple indices * One or more aliases * All data streams and - indices in a cluster Each targeted shard is force-merged separately using the - force_merge threadpool. By default each node only has a single `force_merge` - thread which means that the shards on that node are force-merged one at a time. - If you expand the `force_merge` threadpool on a node then it will force merge - its shards in parallel Force merge makes the storage for the shard being merged - temporarily increase, as it may require free space up to triple its size in case - `max_num_segments parameter` is set to `1`, to rewrite all segments into a new - one. **Data streams and time-based indices** Force-merging is useful for managing - a data stream's older backing indices and other time-based indices, particularly - after a rollover. In these cases, each index only receives indexing traffic for - a certain period of time. Once an index receive no more writes, its shards can - be force-merged to a single segment. This can be a good idea because single-segment - shards can sometimes use simpler and more efficient data structures to perform - searches. For example: ``` POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 - ``` + .. raw:: html + +

Force a merge. + Perform the force merge operation on the shards of one or more indices. + For data streams, the API forces a merge on the shards of the stream's backing indices.

+

Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. + Merging normally happens automatically, but sometimes it is useful to trigger a merge manually.

+

WARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes). + When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". + These soft-deleted documents are automatically cleaned up during regular segment merges. + But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. + So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. + If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally.

+

Blocks during a force merge

+

Calls to this API block until the merge is complete (unless request contains wait_for_completion=false). + If the client connection is lost before completion then the force merge process will continue in the background. + Any new requests to force merge the same indices will also block until the ongoing force merge is complete.

+

Running force merge asynchronously

+

If the request contains wait_for_completion=false, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task. + However, you can not cancel this task as the force merge task is not cancelable. + Elasticsearch creates a record of this task as a document at _tasks/<task_id>. + When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space.

+

Force merging multiple indices

+

You can force merge multiple indices with a single request by targeting:

+
    +
  • One or more data streams that contain multiple backing indices
  • +
  • Multiple indices
  • +
  • One or more aliases
  • +
  • All data streams and indices in a cluster
  • +
+

Each targeted shard is force-merged separately using the force_merge threadpool. + By default each node only has a single force_merge thread which means that the shards on that node are force-merged one at a time. + If you expand the force_merge threadpool on a node then it will force merge its shards in parallel

+

Force merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case max_num_segments parameter is set to 1, to rewrite all segments into a new one.

+

Data streams and time-based indices

+

Force-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover. + In these cases, each index only receives indexing traffic for a certain period of time. + Once an index receive no more writes, its shards can be force-merged to a single segment. + This can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches. + For example:

+
POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1
+          
+ ``_ @@ -1941,8 +1995,12 @@ async def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index information. Get information about one or more indices. For data streams, - the API returns information about the stream’s backing indices. + .. raw:: html + +

Get index information. + Get information about one or more indices. For data streams, the API returns information about the + stream’s backing indices.

+ ``_ @@ -2031,7 +2089,11 @@ async def get_alias( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get aliases. Retrieves information for one or more data stream or index aliases. + .. raw:: html + +

Get aliases. + Retrieves information for one or more data stream or index aliases.

+ ``_ @@ -2113,8 +2175,11 @@ async def get_data_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get data stream lifecycles. Retrieves the data stream lifecycle configuration - of one or more data streams. + .. raw:: html + +

Get data stream lifecycles. + Retrieves the data stream lifecycle configuration of one or more data streams.

+ ``_ @@ -2168,8 +2233,11 @@ async def get_data_lifecycle_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get data stream lifecycle stats. Get statistics about the data streams that are - managed by a data stream lifecycle. + .. raw:: html + +

Get data stream lifecycle stats. + Get statistics about the data streams that are managed by a data stream lifecycle.

+ ``_ """ @@ -2216,7 +2284,11 @@ async def get_data_stream( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get data streams. Retrieves information about one or more data streams. + .. raw:: html + +

Get data streams. + Retrieves information about one or more data streams.

+ ``_ @@ -2291,10 +2363,13 @@ async def get_field_mapping( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get mapping definitions. Retrieves mapping definitions for one or more fields. - For data streams, the API retrieves field mappings for the stream’s backing indices. - This API is useful if you don't need a complete mapping or if an index mapping - contains a large number of fields. + .. raw:: html + +

Get mapping definitions. + Retrieves mapping definitions for one or more fields. + For data streams, the API retrieves field mappings for the stream’s backing indices.

+

This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields.

+ ``_ @@ -2371,7 +2446,11 @@ async def get_index_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index templates. Get information about one or more index templates. + .. raw:: html + +

Get index templates. + Get information about one or more index templates.

+ ``_ @@ -2444,8 +2523,11 @@ async def get_mapping( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get mapping definitions. For data streams, the API retrieves mappings for the - stream’s backing indices. + .. raw:: html + +

Get mapping definitions. + For data streams, the API retrieves mappings for the stream’s backing indices.

+ ``_ @@ -2529,8 +2611,12 @@ async def get_settings( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index settings. Get setting information for one or more indices. For data - streams, it returns setting information for the stream's backing indices. + .. raw:: html + +

Get index settings. + Get setting information for one or more indices. + For data streams, it returns setting information for the stream's backing indices.

+ ``_ @@ -2617,9 +2703,12 @@ async def get_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index templates. Get information about one or more index templates. IMPORTANT: - This documentation is about legacy index templates, which are deprecated and - will be replaced by the composable templates introduced in Elasticsearch 7.8. + .. raw:: html + +

Get index templates. + Get information about one or more index templates.

+

IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

+ ``_ @@ -2678,14 +2767,20 @@ async def migrate_to_data_stream( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Convert an index alias to a data stream. Converts an index alias to a data stream. - You must have a matching index template that is data stream enabled. The alias - must meet the following criteria: The alias must have a write index; All indices - for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` - field type; The alias must not have any filters; The alias must not use custom - routing. If successful, the request removes the alias and creates a data stream - with the same name. The indices for the alias become hidden backing indices for - the stream. The write index for the alias becomes the write index for the stream. + .. raw:: html + +

Convert an index alias to a data stream. + Converts an index alias to a data stream. + You must have a matching index template that is data stream enabled. + The alias must meet the following criteria: + The alias must have a write index; + All indices for the alias must have a @timestamp field mapping of a date or date_nanos field type; + The alias must not have any filters; + The alias must not use custom routing. + If successful, the request removes the alias and creates a data stream with the same name. + The indices for the alias become hidden backing indices for the stream. + The write index for the alias becomes the write index for the stream.

+ ``_ @@ -2737,8 +2832,11 @@ async def modify_data_stream( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update data streams. Performs one or more data stream modification actions in - a single atomic operation. + .. raw:: html + +

Update data streams. + Performs one or more data stream modification actions in a single atomic operation.

+ ``_ @@ -2798,27 +2896,26 @@ async def open( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Open a closed index. For data streams, the API opens any closed backing indices. - A closed index is blocked for read/write operations and does not allow all operations - that opened indices allow. It is not possible to index documents or to search - for documents in a closed index. This allows closed indices to not have to maintain - internal data structures for indexing or searching documents, resulting in a - smaller overhead on the cluster. When opening or closing an index, the master - is responsible for restarting the index shards to reflect the new state of the - index. The shards will then go through the normal recovery process. The data - of opened or closed indices is automatically replicated by the cluster to ensure - that enough shard copies are safely kept around at all times. You can open and - close multiple indices. An error is thrown if the request explicitly refers to - a missing index. This behavior can be turned off by using the `ignore_unavailable=true` - parameter. By default, you must explicitly name the indices you are opening or - closing. To open or close indices with `_all`, `*`, or other wildcard expressions, - change the `action.destructive_requires_name` setting to `false`. This setting - can also be changed with the cluster update settings API. Closed indices consume - a significant amount of disk-space which can cause problems in managed environments. - Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` - to `false`. Because opening or closing an index allocates its shards, the `wait_for_active_shards` - setting on index creation applies to the `_open` and `_close` index actions as - well. + .. raw:: html + +

Open a closed index. + For data streams, the API opens any closed backing indices.

+

A closed index is blocked for read/write operations and does not allow all operations that opened indices allow. + It is not possible to index documents or to search for documents in a closed index. + This allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster.

+

When opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index. + The shards will then go through the normal recovery process. + The data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times.

+

You can open and close multiple indices. + An error is thrown if the request explicitly refers to a missing index. + This behavior can be turned off by using the ignore_unavailable=true parameter.

+

By default, you must explicitly name the indices you are opening or closing. + To open or close indices with _all, *, or other wildcard expressions, change the action.destructive_requires_name setting to false. + This setting can also be changed with the cluster update settings API.

+

Closed indices consume a significant amount of disk-space which can cause problems in managed environments. + Closing indices can be turned off with the cluster settings API by setting cluster.indices.close.enable to false.

+

Because opening or closing an index allocates its shards, the wait_for_active_shards setting on index creation applies to the _open and _close index actions as well.

+ ``_ @@ -2893,18 +2990,18 @@ async def promote_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Promote a data stream. Promote a data stream from a replicated data stream managed - by cross-cluster replication (CCR) to a regular data stream. With CCR auto following, - a data stream from a remote cluster can be replicated to the local cluster. These - data streams can't be rolled over in the local cluster. These replicated data - streams roll over only if the upstream data stream rolls over. In the event that - the remote cluster is no longer available, the data stream in the local cluster - can be promoted to a regular data stream, which allows these data streams to - be rolled over in the local cluster. NOTE: When promoting a data stream, ensure - the local cluster has a data stream enabled index template that matches the data - stream. If this is missing, the data stream will not be able to roll over until - a matching index template is created. This will affect the lifecycle management - of the data stream and interfere with the data stream size and retention. + .. raw:: html + +

Promote a data stream. + Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream.

+

With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. + These data streams can't be rolled over in the local cluster. + These replicated data streams roll over only if the upstream data stream rolls over. + In the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster.

+

NOTE: When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream. + If this is missing, the data stream will not be able to roll over until a matching index template is created. + This will affect the lifecycle management of the data stream and interfere with the data stream size and retention.

+ ``_ @@ -2966,7 +3063,11 @@ async def put_alias( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update an alias. Adds a data stream or index to an alias. + .. raw:: html + +

Create or update an alias. + Adds a data stream or index to an alias.

+ ``_ @@ -3067,8 +3168,11 @@ async def put_data_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update data stream lifecycles. Update the data stream lifecycle of the specified - data streams. + .. raw:: html + +

Update data stream lifecycles. + Update the data stream lifecycle of the specified data streams.

+ ``_ @@ -3160,34 +3264,30 @@ async def put_index_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update an index template. Index templates define settings, mappings, - and aliases that can be applied automatically to new indices. Elasticsearch applies - templates to new indices based on an wildcard pattern that matches the index - name. Index templates are applied during data stream or index creation. For data - streams, these settings and mappings are applied when the stream's backing indices - are created. Settings and mappings specified in a create index API request override - any settings or mappings specified in an index template. Changes to index templates - do not affect existing indices, including the existing backing indices of a data - stream. You can use C-style `/* *\\/` block comments in index templates. You - can include comments anywhere in the request body, except before the opening - curly bracket. **Multiple matching templates** If multiple index templates match - the name of a new index or data stream, the template with the highest priority - is used. Multiple templates with overlapping index patterns at the same priority - are not allowed and an error will be thrown when attempting to create a template - matching an existing index template at identical priorities. **Composing aliases, - mappings, and settings** When multiple component templates are specified in the - `composed_of` field for an index template, they are merged in the order specified, - meaning that later component templates override earlier component templates. - Any mappings, settings, or aliases from the parent index template are merged - in next. Finally, any configuration on the index request itself is merged. Mapping - definitions are merged recursively, which means that later mapping components - can introduce new field mappings and update the mapping configuration. If a field - mapping is already contained in an earlier component, its definition will be - completely overwritten by the later one. This recursive merging strategy applies - not only to field mappings, but also root options like `dynamic_templates` and - `meta`. If an earlier component contains a `dynamic_templates` block, then by - default new `dynamic_templates` entries are appended onto the end. If an entry - already exists with the same key, then it is overwritten by the new definition. + .. raw:: html + +

Create or update an index template. + Index templates define settings, mappings, and aliases that can be applied automatically to new indices.

+

Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. + Index templates are applied during data stream or index creation. + For data streams, these settings and mappings are applied when the stream's backing indices are created. + Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. + Changes to index templates do not affect existing indices, including the existing backing indices of a data stream.

+

You can use C-style /* *\\/ block comments in index templates. + You can include comments anywhere in the request body, except before the opening curly bracket.

+

Multiple matching templates

+

If multiple index templates match the name of a new index or data stream, the template with the highest priority is used.

+

Multiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities.

+

Composing aliases, mappings, and settings

+

When multiple component templates are specified in the composed_of field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. + Any mappings, settings, or aliases from the parent index template are merged in next. + Finally, any configuration on the index request itself is merged. + Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. + If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. + This recursive merging strategy applies not only to field mappings, but also root options like dynamic_templates and meta. + If an earlier component contains a dynamic_templates block, then by default new dynamic_templates entries are appended onto the end. + If an entry already exists with the same key, then it is overwritten by the new definition.

+ ``_ @@ -3351,27 +3451,29 @@ async def put_mapping( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update field mappings. Add new fields to an existing data stream or index. You - can also use this API to change the search settings of existing fields and add - new properties to existing object fields. For data streams, these changes are - applied to all backing indices by default. **Add multi-fields to an existing - field** Multi-fields let you index the same field in different ways. You can - use this API to update the fields mapping parameter and enable multi-fields for - an existing field. WARNING: If an index (or data stream) contains documents when - you add a multi-field, those documents will not have values for the new multi-field. - You can populate the new multi-field with the update by query API. **Change supported - mapping parameters for an existing field** The documentation for each mapping - parameter indicates whether you can update it for an existing field using this - API. For example, you can use the update mapping API to update the `ignore_above` - parameter. **Change the mapping of an existing field** Except for supported mapping - parameters, you can't change the mapping or field type of an existing field. - Changing an existing field could invalidate data that's already indexed. If you - need to change the mapping of a field in a data stream's backing indices, refer - to documentation about modifying data streams. If you need to change the mapping - of a field in other indices, create a new index with the correct mapping and - reindex your data into that index. **Rename a field** Renaming a field would - invalidate data already indexed under the old field name. Instead, add an alias - field to create an alternate field name. + .. raw:: html + +

Update field mappings. + Add new fields to an existing data stream or index. + You can also use this API to change the search settings of existing fields and add new properties to existing object fields. + For data streams, these changes are applied to all backing indices by default.

+

Add multi-fields to an existing field

+

Multi-fields let you index the same field in different ways. + You can use this API to update the fields mapping parameter and enable multi-fields for an existing field. + WARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field. + You can populate the new multi-field with the update by query API.

+

Change supported mapping parameters for an existing field

+

The documentation for each mapping parameter indicates whether you can update it for an existing field using this API. + For example, you can use the update mapping API to update the ignore_above parameter.

+

Change the mapping of an existing field

+

Except for supported mapping parameters, you can't change the mapping or field type of an existing field. + Changing an existing field could invalidate data that's already indexed.

+

If you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams. + If you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index.

+

Rename a field

+

Renaming a field would invalidate data already indexed under the old field name. + Instead, add an alias field to create an alternate field name.

+ ``_ @@ -3500,21 +3602,23 @@ async def put_settings( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update index settings. Changes dynamic index settings in real time. For data - streams, index setting changes are applied to all backing indices by default. - To revert a setting to the default value, use a null value. The list of per-index - settings that can be updated dynamically on live indices can be found in index - module documentation. To preserve existing settings from being updated, set the - `preserve_existing` parameter to `true`. NOTE: You can only define new analyzers - on closed indices. To add an analyzer, you must close the index, define the analyzer, - and reopen the index. You cannot close the write index of a data stream. To update - the analyzer for a data stream's write index and future backing indices, update - the analyzer in the index template used by the stream. Then roll over the data - stream to apply the new analyzer to the stream's write index and future backing - indices. This affects searches and any new data added to the stream after the - rollover. However, it does not affect the data stream's backing indices or their - existing data. To change the analyzer for existing backing indices, you must - create a new data stream and reindex your data into it. + .. raw:: html + +

Update index settings. + Changes dynamic index settings in real time. + For data streams, index setting changes are applied to all backing indices by default.

+

To revert a setting to the default value, use a null value. + The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. + To preserve existing settings from being updated, set the preserve_existing parameter to true.

+

NOTE: You can only define new analyzers on closed indices. + To add an analyzer, you must close the index, define the analyzer, and reopen the index. + You cannot close the write index of a data stream. + To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. + Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. + This affects searches and any new data added to the stream after the rollover. + However, it does not affect the data stream's backing indices or their existing data. + To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.

+ ``_ @@ -3618,24 +3722,24 @@ async def put_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update an index template. Index templates define settings, mappings, - and aliases that can be applied automatically to new indices. Elasticsearch applies - templates to new indices based on an index pattern that matches the index name. - IMPORTANT: This documentation is about legacy index templates, which are deprecated - and will be replaced by the composable templates introduced in Elasticsearch - 7.8. Composable templates always take precedence over legacy templates. If no - composable template matches a new index, matching legacy templates are applied - according to their order. Index templates are only applied during index creation. - Changes to index templates do not affect existing indices. Settings and mappings - specified in create index API requests override any settings or mappings specified - in an index template. You can use C-style `/* *\\/` block comments in index templates. - You can include comments anywhere in the request body, except before the opening - curly bracket. **Indices matching multiple templates** Multiple index templates - can potentially match an index, in this case, both the settings and mappings - are merged into the final configuration of the index. The order of the merging - can be controlled using the order parameter, with lower order being applied first, - and higher orders overriding them. NOTE: Multiple matching templates with the - same order value will result in a non-deterministic merging order. + .. raw:: html + +

Create or update an index template. + Index templates define settings, mappings, and aliases that can be applied automatically to new indices. + Elasticsearch applies templates to new indices based on an index pattern that matches the index name.

+

IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

+

Composable templates always take precedence over legacy templates. + If no composable template matches a new index, matching legacy templates are applied according to their order.

+

Index templates are only applied during index creation. + Changes to index templates do not affect existing indices. + Settings and mappings specified in create index API requests override any settings or mappings specified in an index template.

+

You can use C-style /* *\\/ block comments in index templates. + You can include comments anywhere in the request body, except before the opening curly bracket.

+

Indices matching multiple templates

+

Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. + The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. + NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order.

+ ``_ @@ -3716,27 +3820,28 @@ async def recovery( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index recovery information. Get information about ongoing and completed shard - recoveries for one or more indices. For data streams, the API returns information - for the stream's backing indices. All recoveries, whether ongoing or complete, - are kept in the cluster state and may be reported on at any time. Shard recovery - is the process of initializing a shard copy, such as restoring a primary shard - from a snapshot or creating a replica shard from a primary shard. When a shard - recovery completes, the recovered shard is available for search and indexing. - Recovery automatically occurs during the following processes: * When creating - an index for the first time. * When a node rejoins the cluster and starts up - any missing primary shard copies using the data that it holds in its data path. - * Creation of new replica shard copies from the primary. * Relocation of a shard - copy to a different node in the same cluster. * A snapshot restore operation. - * A clone, shrink, or split operation. You can determine the cause of a shard - recovery using the recovery or cat recovery APIs. The index recovery API reports - information about completed recoveries only for shard copies that currently exist - in the cluster. It only reports the last recovery for each shard copy and does - not report historical information about earlier recoveries, nor does it report - information about the recoveries of shard copies that no longer exist. This means - that if a shard copy completes a recovery and then Elasticsearch relocates it - onto a different node then the information about the original recovery will not - be shown in the recovery API. + .. raw:: html + +

Get index recovery information. + Get information about ongoing and completed shard recoveries for one or more indices. + For data streams, the API returns information for the stream's backing indices.

+

All recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time.

+

Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. + When a shard recovery completes, the recovered shard is available for search and indexing.

+

Recovery automatically occurs during the following processes:

+
    +
  • When creating an index for the first time.
  • +
  • When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path.
  • +
  • Creation of new replica shard copies from the primary.
  • +
  • Relocation of a shard copy to a different node in the same cluster.
  • +
  • A snapshot restore operation.
  • +
  • A clone, shrink, or split operation.
  • +
+

You can determine the cause of a shard recovery using the recovery or cat recovery APIs.

+

The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. + It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. + This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API.

+ ``_ @@ -3798,19 +3903,19 @@ async def refresh( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Refresh an index. A refresh makes recent operations performed on one or more - indices available for search. For data streams, the API runs the refresh operation - on the stream’s backing indices. By default, Elasticsearch periodically refreshes - indices every second, but only on indices that have received one search request - or more in the last 30 seconds. You can change this default interval with the - `index.refresh_interval` setting. Refresh requests are synchronous and do not - return a response until the refresh operation completes. Refreshes are resource-intensive. - To ensure good cluster performance, it's recommended to wait for Elasticsearch's - periodic refresh rather than performing an explicit refresh when possible. If - your application workflow indexes documents and then runs a search to retrieve - the indexed document, it's recommended to use the index API's `refresh=wait_for` - query parameter option. This option ensures the indexing operation waits for - a periodic refresh before running the search. + .. raw:: html + +

Refresh an index. + A refresh makes recent operations performed on one or more indices available for search. + For data streams, the API runs the refresh operation on the stream’s backing indices.

+

By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. + You can change this default interval with the index.refresh_interval setting.

+

Refresh requests are synchronous and do not return a response until the refresh operation completes.

+

Refreshes are resource-intensive. + To ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.

+

If your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's refresh=wait_for query parameter option. + This option ensures the indexing operation waits for a periodic refresh before running the search.

+ ``_ @@ -3880,21 +3985,20 @@ async def reload_search_analyzers( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Reload search analyzers. Reload an index's search analyzers and their resources. - For data streams, the API reloads search analyzers and resources for the stream's - backing indices. IMPORTANT: After reloading the search analyzers you should clear - the request cache to make sure it doesn't contain responses derived from the - previous versions of the analyzer. You can use the reload search analyzers API - to pick up changes to synonym files used in the `synonym_graph` or `synonym` - token filter of a search analyzer. To be eligible, the token filter must have - an `updateable` flag of `true` and only be used in search analyzers. NOTE: This - API does not perform a reload for each shard of an index. Instead, it performs - a reload for each node containing index shards. As a result, the total shard - count returned by the API can differ from the number of index shards. Because - reloading affects every node with an index shard, it is important to update the - synonym file on every data node in the cluster--including nodes that don't contain - a shard replica--before using this API. This ensures the synonym file is updated - everywhere in the cluster in case shards are relocated in the future. + .. raw:: html + +

Reload search analyzers. + Reload an index's search analyzers and their resources. + For data streams, the API reloads search analyzers and resources for the stream's backing indices.

+

IMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer.

+

You can use the reload search analyzers API to pick up changes to synonym files used in the synonym_graph or synonym token filter of a search analyzer. + To be eligible, the token filter must have an updateable flag of true and only be used in search analyzers.

+

NOTE: This API does not perform a reload for each shard of an index. + Instead, it performs a reload for each node containing index shards. + As a result, the total shard count returned by the API can differ from the number of index shards. + Because reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API. + This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future.

+ ``_ @@ -3958,38 +4062,33 @@ async def resolve_cluster( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resolve the cluster. Resolve the specified index expressions to return information - about each cluster, including the local cluster, if included. Multiple patterns - and remote clusters are supported. This endpoint is useful before doing a cross-cluster - search in order to determine which remote clusters should be included in a search. - You use the same index expression with this endpoint as you would for cross-cluster - search. Index and cluster exclusions are also supported with this endpoint. For - each cluster in the index expression, information is returned about: * Whether - the querying ("local") cluster is currently connected to each remote cluster - in the index expression scope. * Whether each remote cluster is configured with - `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, - or data streams on that cluster that match the index expression. * Whether the - search is likely to have errors returned when you do the cross-cluster search - (including any authorization errors if you do not have permission to query the - index). * Cluster version information, including the Elasticsearch server version. - For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information - about the local cluster and all remotely configured clusters that start with - the alias `cluster*`. Each cluster returns information about whether it has any - indices, aliases or data streams that match `my-index-*`. **Advantages of using - this endpoint before a cross-cluster search** You may want to exclude a cluster - or index from a search when: * A remote cluster is not currently connected and - is configured with `skip_unavailable=false`. Running a cross-cluster search under - those conditions will cause the entire search to fail. * A cluster has no matching - indices, aliases or data streams for the index expression (or your user does - not have permissions to search them). For example, suppose your index expression - is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data - streams that match `logs*`. In that case, that cluster will return no results - from that cluster if you include it in a cross-cluster search. * The index expression - (combined with any query parameters you specify) will likely cause an exception - to be thrown when you do the search. In these cases, the "error" field in the - `_resolve/cluster` response will be present. (This is also where security/permission - errors will be shown.) * A remote cluster is an older version that does not support - the feature you want to use in your search. + .. raw:: html + +

Resolve the cluster. + Resolve the specified index expressions to return information about each cluster, including the local cluster, if included. + Multiple patterns and remote clusters are supported.

+

This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search.

+

You use the same index expression with this endpoint as you would for cross-cluster search. + Index and cluster exclusions are also supported with this endpoint.

+

For each cluster in the index expression, information is returned about:

+
    +
  • Whether the querying ("local") cluster is currently connected to each remote cluster in the index expression scope.
  • +
  • Whether each remote cluster is configured with skip_unavailable as true or false.
  • +
  • Whether there are any indices, aliases, or data streams on that cluster that match the index expression.
  • +
  • Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index).
  • +
  • Cluster version information, including the Elasticsearch server version.
  • +
+

For example, GET /_resolve/cluster/my-index-*,cluster*:my-index-* returns information about the local cluster and all remotely configured clusters that start with the alias cluster*. + Each cluster returns information about whether it has any indices, aliases or data streams that match my-index-*.

+

Advantages of using this endpoint before a cross-cluster search

+

You may want to exclude a cluster or index from a search when:

+
    +
  • A remote cluster is not currently connected and is configured with skip_unavailable=false. Running a cross-cluster search under those conditions will cause the entire search to fail.
  • +
  • A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is logs*,remote1:logs* and the remote1 cluster has no indices, aliases or data streams that match logs*. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search.
  • +
  • The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the _resolve/cluster response will be present. (This is also where security/permission errors will be shown.)
  • +
  • A remote cluster is an older version that does not support the feature you want to use in your search.
  • +
+ ``_ @@ -4062,8 +4161,12 @@ async def resolve_index( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resolve indices. Resolve the names and/or index patterns for indices, aliases, - and data streams. Multiple patterns and remote clusters are supported. + .. raw:: html + +

Resolve indices. + Resolve the names and/or index patterns for indices, aliases, and data streams. + Multiple patterns and remote clusters are supported.

+ ``_ @@ -4136,33 +4239,35 @@ async def rollover( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Roll over to a new index. TIP: It is recommended to use the index lifecycle rollover - action to automate rollovers. The rollover API creates a new index for a data - stream or index alias. The API behavior depends on the rollover target. **Roll - over a data stream** If you roll over a data stream, the API creates a new write - index for the stream. The stream's previous write index becomes a regular backing - index. A rollover also increments the data stream's generation. **Roll over an - index alias with a write index** TIP: Prior to Elasticsearch 7.9, you'd typically - use an index alias with a write index to manage time series data. Data streams - replace this functionality, require less maintenance, and automatically integrate - with data tiers. If an index alias points to multiple indices, one of the indices - must be a write index. The rollover API creates a new write index for the alias - with `is_write_index` set to `true`. The API also `sets is_write_index` to `false` - for the previous write index. **Roll over an index alias with one index** If - you roll over an index alias that points to only one index, the API creates a - new index for the alias and removes the original index from the alias. NOTE: - A rollover creates a new index and is subject to the `wait_for_active_shards` - setting. **Increment index names for an alias** When you roll over an index alias, - you can specify a name for the new index. If you don't specify a name and the - current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, - the new index name increments that number. For example, if you roll over an alias - with a current index of `my-index-000001`, the rollover creates a new index named - `my-index-000002`. This number is always six characters and zero-padded, regardless - of the previous index's name. If you use an index alias for time series data, - you can use date math in the index name to track the rollover date. For example, - you can create an alias that points to an index named ``. - If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. - If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. + .. raw:: html + +

Roll over to a new index. + TIP: It is recommended to use the index lifecycle rollover action to automate rollovers.

+

The rollover API creates a new index for a data stream or index alias. + The API behavior depends on the rollover target.

+

Roll over a data stream

+

If you roll over a data stream, the API creates a new write index for the stream. + The stream's previous write index becomes a regular backing index. + A rollover also increments the data stream's generation.

+

Roll over an index alias with a write index

+

TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data. + Data streams replace this functionality, require less maintenance, and automatically integrate with data tiers.

+

If an index alias points to multiple indices, one of the indices must be a write index. + The rollover API creates a new write index for the alias with is_write_index set to true. + The API also sets is_write_index to false for the previous write index.

+

Roll over an index alias with one index

+

If you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias.

+

NOTE: A rollover creates a new index and is subject to the wait_for_active_shards setting.

+

Increment index names for an alias

+

When you roll over an index alias, you can specify a name for the new index. + If you don't specify a name and the current index ends with - and a number, such as my-index-000001 or my-index-3, the new index name increments that number. + For example, if you roll over an alias with a current index of my-index-000001, the rollover creates a new index named my-index-000002. + This number is always six characters and zero-padded, regardless of the previous index's name.

+

If you use an index alias for time series data, you can use date math in the index name to track the rollover date. + For example, you can create an alias that points to an index named <my-index-{now/d}-000001>. + If you create the index on May 6, 2099, the index's name is my-index-2099.05.06-000001. + If you roll over the alias on May 7, 2099, the new index's name is my-index-2099.05.07-000002.

+ ``_ @@ -4267,9 +4372,12 @@ async def segments( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index segments. Get low-level information about the Lucene segments in index - shards. For data streams, the API returns information about the stream's backing - indices. + .. raw:: html + +

Get index segments. + Get low-level information about the Lucene segments in index shards. + For data streams, the API returns information about the stream's backing indices.

+ ``_ @@ -4348,14 +4456,20 @@ async def shard_stores( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index shard stores. Get store information about replica shards in one or - more indices. For data streams, the API retrieves store information for the stream's - backing indices. The index shard stores API returns the following information: - * The node on which each replica shard exists. * The allocation ID for each replica - shard. * A unique ID for each replica shard. * Any errors encountered while opening - the shard index or from an earlier failure. By default, the API returns store - information only for primary shards that are unassigned or have one or more unassigned - replica shards. + .. raw:: html + +

Get index shard stores. + Get store information about replica shards in one or more indices. + For data streams, the API retrieves store information for the stream's backing indices.

+

The index shard stores API returns the following information:

+
    +
  • The node on which each replica shard exists.
  • +
  • The allocation ID for each replica shard.
  • +
  • A unique ID for each replica shard.
  • +
  • Any errors encountered while opening the shard index or from an earlier failure.
  • +
+

By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards.

+ ``_ @@ -4426,39 +4540,38 @@ async def shrink( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Shrink an index. Shrink an index into a new index with fewer primary shards. - Before you can shrink an index: * The index must be read-only. * A copy of every - shard in the index must reside on the same node. * The index must have a green - health status. To make shard allocation easier, we recommend you also remove - the index's replica shards. You can later re-add replica shards as part of the - shrink operation. The requested number of primary shards in the target index - must be a factor of the number of shards in the source index. For example an - index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an - index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards - in the index is a prime number it can only be shrunk into a single primary shard - Before shrinking, a (primary or replica) copy of every shard in the index must - be present on the same node. The current write index on a data stream cannot - be shrunk. In order to shrink the current write index, the data stream must first - be rolled over so that a new write index is created and then the previous write - index can be shrunk. A shrink operation: * Creates a new target index with the - same definition as the source index, but with a smaller number of primary shards. - * Hard-links segments from the source index into the target index. If the file - system does not support hard-linking, then all segments are copied into the new - index, which is a much more time consuming process. Also if using multiple data - paths, shards on different data paths require a full copy of segment files if - they are not on the same disk since hardlinks do not work across disks. * Recovers - the target index as though it were a closed index which had just been re-opened. - Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. - IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: - * The target index must not exist. * The source index must have more primary - shards than the target index. * The number of primary shards in the target index - must be a factor of the number of primary shards in the source index. The source - index must have more primary shards than the target index. * The index must not - contain more than 2,147,483,519 documents in total across all shards that will - be shrunk into a single shard on the target index as this is the maximum number - of docs that can fit into a single shard. * The node handling the shrink process - must have sufficient free disk space to accommodate a second copy of the existing - index. + .. raw:: html + +

Shrink an index. + Shrink an index into a new index with fewer primary shards.

+

Before you can shrink an index:

+
    +
  • The index must be read-only.
  • +
  • A copy of every shard in the index must reside on the same node.
  • +
  • The index must have a green health status.
  • +
+

To make shard allocation easier, we recommend you also remove the index's replica shards. + You can later re-add replica shards as part of the shrink operation.

+

The requested number of primary shards in the target index must be a factor of the number of shards in the source index. + For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. + If the number of shards in the index is a prime number it can only be shrunk into a single primary shard + Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node.

+

The current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk.

+

A shrink operation:

+
    +
  • Creates a new target index with the same definition as the source index, but with a smaller number of primary shards.
  • +
  • Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks.
  • +
  • Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the .routing.allocation.initial_recovery._id index setting.
  • +
+

IMPORTANT: Indices can only be shrunk if they satisfy the following requirements:

+
    +
  • The target index must not exist.
  • +
  • The source index must have more primary shards than the target index.
  • +
  • The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index.
  • +
  • The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard.
  • +
  • The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index.
  • +
+ ``_ @@ -4533,8 +4646,11 @@ async def simulate_index_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Simulate an index. Get the index configuration that would be applied to the specified - index from an existing index template. + .. raw:: html + +

Simulate an index. + Get the index configuration that would be applied to the specified index from an existing index template.

+ ``_ @@ -4611,8 +4727,11 @@ async def simulate_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Simulate an index template. Get the index configuration that would be applied - by a particular index template. + .. raw:: html + +

Simulate an index template. + Get the index configuration that would be applied by a particular index template.

+ ``_ @@ -4743,31 +4862,44 @@ async def split( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Split an index. Split an index into a new index with more primary shards. * Before - you can split an index: * The index must be read-only. * The cluster health status - must be green. You can do make an index read-only with the following request - using the add index block API: ``` PUT /my_source_index/_block/write ``` The - current write index on a data stream cannot be split. In order to split the current - write index, the data stream must first be rolled over so that a new write index - is created and then the previous write index can be split. The number of times - the index can be split (and the number of shards that each original shard can - be split into) is determined by the `index.number_of_routing_shards` setting. - The number of routing shards specifies the hashing space that is used internally - to distribute documents across shards with consistent hashing. For instance, - a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be - split by a factor of 2 or 3. A split operation: * Creates a new target index - with the same definition as the source index, but with a larger number of primary - shards. * Hard-links segments from the source index into the target index. If - the file system doesn't support hard-linking, all segments are copied into the - new index, which is a much more time consuming process. * Hashes all documents - again, after low level files are created, to delete documents that belong to - a different shard. * Recovers the target index as though it were a closed index - which had just been re-opened. IMPORTANT: Indices can only be split if they satisfy - the following requirements: * The target index must not exist. * The source index - must have fewer primary shards than the target index. * The number of primary - shards in the target index must be a multiple of the number of primary shards - in the source index. * The node handling the split process must have sufficient - free disk space to accommodate a second copy of the existing index. + .. raw:: html + +

Split an index. + Split an index into a new index with more primary shards.

+
    +
  • +

    Before you can split an index:

    +
  • +
  • +

    The index must be read-only.

    +
  • +
  • +

    The cluster health status must be green.

    +
  • +
+

You can do make an index read-only with the following request using the add index block API:

+
PUT /my_source_index/_block/write
+          
+

The current write index on a data stream cannot be split. + In order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split.

+

The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the index.number_of_routing_shards setting. + The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. + For instance, a 5 shard index with number_of_routing_shards set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3.

+

A split operation:

+
    +
  • Creates a new target index with the same definition as the source index, but with a larger number of primary shards.
  • +
  • Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process.
  • +
  • Hashes all documents again, after low level files are created, to delete documents that belong to a different shard.
  • +
  • Recovers the target index as though it were a closed index which had just been re-opened.
  • +
+

IMPORTANT: Indices can only be split if they satisfy the following requirements:

+
    +
  • The target index must not exist.
  • +
  • The source index must have fewer primary shards than the target index.
  • +
  • The number of primary shards in the target index must be a multiple of the number of primary shards in the source index.
  • +
  • The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index.
  • +
+ ``_ @@ -4859,14 +4991,17 @@ async def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index statistics. For data streams, the API retrieves statistics for the - stream's backing indices. By default, the returned statistics are index-level - with `primaries` and `total` aggregations. `primaries` are the values for only - the primary shards. `total` are the accumulated values for both primary and replica - shards. To get shard-level statistics, set the `level` parameter to `shards`. - NOTE: When moving to another node, the shard-level statistics for a shard are - cleared. Although the shard is no longer part of the node, that node retains - any node-level statistics to which the shard contributed. + .. raw:: html + +

Get index statistics. + For data streams, the API retrieves statistics for the stream's backing indices.

+

By default, the returned statistics are index-level with primaries and total aggregations. + primaries are the values for only the primary shards. + total are the accumulated values for both primary and replica shards.

+

To get shard-level statistics, set the level parameter to shards.

+

NOTE: When moving to another node, the shard-level statistics for a shard are cleared. + Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed.

+ ``_ @@ -4969,8 +5104,11 @@ async def unfreeze( wait_for_active_shards: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Unfreeze an index. When a frozen index is unfrozen, the index goes through the - normal recovery process and becomes writeable again. + .. raw:: html + +

Unfreeze an index. + When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again.

+ ``_ @@ -5044,7 +5182,11 @@ async def update_aliases( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update an alias. Adds a data stream or index to an alias. + .. raw:: html + +

Create or update an alias. + Adds a data stream or index to an alias.

+ ``_ @@ -5119,7 +5261,11 @@ async def validate_query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Validate a query. Validates a query without running it. + .. raw:: html + +

Validate a query. + Validates a query without running it.

+ ``_ diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index 0b124e281..f85857b8b 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -44,7 +44,10 @@ async def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete an inference endpoint + .. raw:: html + +

Delete an inference endpoint

+ ``_ @@ -109,7 +112,10 @@ async def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get an inference endpoint + .. raw:: html + +

Get an inference endpoint

+ ``_ @@ -172,7 +178,10 @@ async def inference( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Perform inference on the service + .. raw:: html + +

Perform inference on the service

+ ``_ @@ -255,21 +264,18 @@ async def put( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Create an inference endpoint. When you create an inference endpoint, the associated - machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before - using it. To verify the deployment status, use the get trained model statistics - API. Look for `"state": "fully_allocated"` in the response and ensure that the - `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating - multiple endpoints for the same model unless required, as each endpoint consumes - significant resources. IMPORTANT: The inference APIs enable you to use certain - services, such as built-in machine learning models (ELSER, E5), models uploaded - through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google - Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models - uploaded through Eland, the inference APIs offer an alternative way to use and - manage trained models. However, if you do not plan to use the inference APIs - to use these models or if you want to use non-NLP models, use the machine learning - trained model APIs. + .. raw:: html + +

Create an inference endpoint. + When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+

IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. + For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. + However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

+ ``_ @@ -339,16 +345,14 @@ async def update( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Update an inference endpoint. Modify `task_settings`, secrets (within `service_settings`), - or `num_allocations` for an inference endpoint, depending on the specific endpoint - service and `task_type`. IMPORTANT: The inference APIs enable you to use certain - services, such as built-in machine learning models (ELSER, E5), models uploaded - through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, - Watsonx.ai, or Hugging Face. For built-in models and models uploaded through - Eland, the inference APIs offer an alternative way to use and manage trained - models. However, if you do not plan to use the inference APIs to use these models - or if you want to use non-NLP models, use the machine learning trained model - APIs. + .. raw:: html + +

Update an inference endpoint.

+

Modify task_settings, secrets (within service_settings), or num_allocations for an inference endpoint, depending on the specific endpoint service and task_type.

+

IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. + For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. + However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

+ ``_ diff --git a/elasticsearch/_async/client/ingest.py b/elasticsearch/_async/client/ingest.py index c8dc21c50..469be28e6 100644 --- a/elasticsearch/_async/client/ingest.py +++ b/elasticsearch/_async/client/ingest.py @@ -38,8 +38,11 @@ async def delete_geoip_database( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete GeoIP database configurations. Delete one or more IP geolocation database - configurations. + .. raw:: html + +

Delete GeoIP database configurations. + Delete one or more IP geolocation database configurations.

+ ``_ @@ -90,7 +93,10 @@ async def delete_ip_location_database( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete IP geolocation database configurations. + .. raw:: html + +

Delete IP geolocation database configurations.

+ ``_ @@ -143,7 +149,11 @@ async def delete_pipeline( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete pipelines. Delete one or more ingest pipelines. + .. raw:: html + +

Delete pipelines. + Delete one or more ingest pipelines.

+ ``_ @@ -192,8 +202,11 @@ async def geo_ip_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get GeoIP statistics. Get download statistics for GeoIP2 databases that are used - with the GeoIP processor. + .. raw:: html + +

Get GeoIP statistics. + Get download statistics for GeoIP2 databases that are used with the GeoIP processor.

+ ``_ """ @@ -229,8 +242,11 @@ async def get_geoip_database( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get GeoIP database configurations. Get information about one or more IP geolocation - database configurations. + .. raw:: html + +

Get GeoIP database configurations. + Get information about one or more IP geolocation database configurations.

+ ``_ @@ -276,7 +292,10 @@ async def get_ip_location_database( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get IP geolocation database configurations. + .. raw:: html + +

Get IP geolocation database configurations.

+ ``_ @@ -329,8 +348,12 @@ async def get_pipeline( summary: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get pipelines. Get information about one or more ingest pipelines. This API returns - a local reference of the pipeline. + .. raw:: html + +

Get pipelines. + Get information about one or more ingest pipelines. + This API returns a local reference of the pipeline.

+ ``_ @@ -381,10 +404,13 @@ async def processor_grok( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Run a grok processor. Extract structured fields out of a single text field within - a document. You must choose which field to extract matched fields from, as well - as the grok pattern you expect will match. A grok pattern is like a regular expression - that supports aliased expressions that can be reused. + .. raw:: html + +

Run a grok processor. + Extract structured fields out of a single text field within a document. + You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. + A grok pattern is like a regular expression that supports aliased expressions that can be reused.

+ ``_ """ @@ -427,8 +453,11 @@ async def put_geoip_database( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a GeoIP database configuration. Refer to the create or update - IP geolocation database configuration API. + .. raw:: html + +

Create or update a GeoIP database configuration. + Refer to the create or update IP geolocation database configuration API.

+ ``_ @@ -500,7 +529,10 @@ async def put_ip_location_database( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update an IP geolocation database configuration. + .. raw:: html + +

Create or update an IP geolocation database configuration.

+ ``_ @@ -582,7 +614,11 @@ async def put_pipeline( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a pipeline. Changes made using this API take effect immediately. + .. raw:: html + +

Create or update a pipeline. + Changes made using this API take effect immediately.

+ ``_ @@ -674,9 +710,12 @@ async def simulate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Simulate a pipeline. Run an ingest pipeline against a set of provided documents. - You can either specify an existing pipeline to use with the provided documents - or supply a pipeline definition in the body of the request. + .. raw:: html + +

Simulate a pipeline. + Run an ingest pipeline against a set of provided documents. + You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.

+ ``_ diff --git a/elasticsearch/_async/client/license.py b/elasticsearch/_async/client/license.py index e5ffac7a5..cc3827cce 100644 --- a/elasticsearch/_async/client/license.py +++ b/elasticsearch/_async/client/license.py @@ -37,9 +37,12 @@ async def delete( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete the license. When the license expires, your subscription level reverts - to Basic. If the operator privileges feature is enabled, only operator users - can use this API. + .. raw:: html + +

Delete the license. + When the license expires, your subscription level reverts to Basic.

+

If the operator privileges feature is enabled, only operator users can use this API.

+ ``_ @@ -84,11 +87,13 @@ async def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get license information. Get information about your Elastic license including - its type, its status, when it was issued, and when it expires. NOTE: If the master - node is generating a new cluster state, the get license API may return a `404 - Not Found` response. If you receive an unexpected 404 response after cluster - startup, wait a short period and retry the request. + .. raw:: html + +

Get license information. + Get information about your Elastic license including its type, its status, when it was issued, and when it expires.

+

NOTE: If the master node is generating a new cluster state, the get license API may return a 404 Not Found response. + If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request.

+ ``_ @@ -134,7 +139,10 @@ async def get_basic_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the basic license status. + .. raw:: html + +

Get the basic license status.

+ ``_ """ @@ -169,7 +177,10 @@ async def get_trial_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the trial status. + .. raw:: html + +

Get the trial status.

+ ``_ """ @@ -212,14 +223,16 @@ async def post( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the license. You can update your license at runtime without shutting down - your nodes. License updates take effect immediately. If the license you are installing - does not support all of the features that were available with your previous license, - however, you are notified in the response. You must then re-submit the API request - with the acknowledge parameter set to true. NOTE: If Elasticsearch security features - are enabled and you are installing a gold or higher license, you must enable - TLS on the transport networking layer before you install the license. If the - operator privileges feature is enabled, only operator users can use this API. + .. raw:: html + +

Update the license. + You can update your license at runtime without shutting down your nodes. + License updates take effect immediately. + If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. + You must then re-submit the API request with the acknowledge parameter set to true.

+

NOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license. + If the operator privileges feature is enabled, only operator users can use this API.

+ ``_ @@ -282,13 +295,15 @@ async def post_start_basic( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Start a basic license. Start an indefinite basic license, which gives access - to all the basic features. NOTE: In order to start a basic license, you must - not currently have a basic license. If the basic license does not support all - of the features that are available with your current license, however, you are - notified in the response. You must then re-submit the API request with the `acknowledge` - parameter set to `true`. To check the status of your basic license, use the get - basic license API. + .. raw:: html + +

Start a basic license. + Start an indefinite basic license, which gives access to all the basic features.

+

NOTE: In order to start a basic license, you must not currently have a basic license.

+

If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. + You must then re-submit the API request with the acknowledge parameter set to true.

+

To check the status of your basic license, use the get basic license API.

+ ``_ @@ -338,12 +353,14 @@ async def post_start_trial( type_query_string: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Start a trial. Start a 30-day trial, which gives access to all subscription features. - NOTE: You are allowed to start a trial only if your cluster has not already activated - a trial for the current major product version. For example, if you have already - activated a trial for v8.0, you cannot start a new trial until v9.0. You can, - however, request an extended trial at https://www.elastic.co/trialextension. - To check the status of your trial, use the get trial status API. + .. raw:: html + +

Start a trial. + Start a 30-day trial, which gives access to all subscription features.

+

NOTE: You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version. + For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension.

+

To check the status of your trial, use the get trial status API.

+ ``_ diff --git a/elasticsearch/_async/client/logstash.py b/elasticsearch/_async/client/logstash.py index 308588e32..9e8e6c639 100644 --- a/elasticsearch/_async/client/logstash.py +++ b/elasticsearch/_async/client/logstash.py @@ -36,9 +36,12 @@ async def delete_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central - Management. If the request succeeds, you receive an empty response with an appropriate - status code. + .. raw:: html + +

Delete a Logstash pipeline. + Delete a pipeline that is used for Logstash Central Management. + If the request succeeds, you receive an empty response with an appropriate status code.

+ ``_ @@ -78,7 +81,11 @@ async def get_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get Logstash pipelines. Get pipelines that are used for Logstash Central Management. + .. raw:: html + +

Get Logstash pipelines. + Get pipelines that are used for Logstash Central Management.

+ ``_ @@ -125,8 +132,12 @@ async def put_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a Logstash pipeline. Create a pipeline that is used for Logstash - Central Management. If the specified pipeline exists, it is replaced. + .. raw:: html + +

Create or update a Logstash pipeline.

+

Create a pipeline that is used for Logstash Central Management. + If the specified pipeline exists, it is replaced.

+ ``_ diff --git a/elasticsearch/_async/client/migration.py b/elasticsearch/_async/client/migration.py index 4bf4cead0..6cbc5283e 100644 --- a/elasticsearch/_async/client/migration.py +++ b/elasticsearch/_async/client/migration.py @@ -36,10 +36,13 @@ async def deprecations( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get deprecation information. Get information about different cluster, node, and - index level settings that use deprecated features that will be removed or changed - in the next major version. TIP: This APIs is designed for indirect use by the - Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. + .. raw:: html + +

Get deprecation information. + Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version.

+

TIP: This APIs is designed for indirect use by the Upgrade Assistant. + You are strongly recommended to use the Upgrade Assistant.

+ ``_ @@ -82,11 +85,14 @@ async def get_feature_upgrade_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get feature migration information. Version upgrades sometimes require changes - to how features store configuration information and data in system indices. Check - which features need to be migrated and the status of any migrations that are - in progress. TIP: This API is designed for indirect use by the Upgrade Assistant. - You are strongly recommended to use the Upgrade Assistant. + .. raw:: html + +

Get feature migration information. + Version upgrades sometimes require changes to how features store configuration information and data in system indices. + Check which features need to be migrated and the status of any migrations that are in progress.

+

TIP: This API is designed for indirect use by the Upgrade Assistant. + You are strongly recommended to use the Upgrade Assistant.

+ ``_ """ @@ -121,11 +127,14 @@ async def post_feature_upgrade( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Start the feature migration. Version upgrades sometimes require changes to how - features store configuration information and data in system indices. This API - starts the automatic migration process. Some functionality might be temporarily - unavailable during the migration process. TIP: The API is designed for indirect - use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. + .. raw:: html + +

Start the feature migration. + Version upgrades sometimes require changes to how features store configuration information and data in system indices. + This API starts the automatic migration process.

+

Some functionality might be temporarily unavailable during the migration process.

+

TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant.

+ ``_ """ diff --git a/elasticsearch/_async/client/ml.py b/elasticsearch/_async/client/ml.py index 813fe128f..dfc4ddc32 100644 --- a/elasticsearch/_async/client/ml.py +++ b/elasticsearch/_async/client/ml.py @@ -36,11 +36,14 @@ async def clear_trained_model_deployment_cache( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear trained model deployment cache. Cache will be cleared on all nodes where - the trained model is assigned. A trained model deployment may have an inference - cache enabled. As requests are handled by each allocated node, their responses - may be cached on that individual node. Calling this API clears the caches without - restarting the deployment. + .. raw:: html + +

Clear trained model deployment cache. + Cache will be cleared on all nodes where the trained model is assigned. + A trained model deployment may have an inference cache enabled. + As requests are handled by each allocated node, their responses may be cached on that individual node. + Calling this API clears the caches without restarting the deployment.

+ ``_ @@ -88,19 +91,14 @@ async def close_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Close anomaly detection jobs. A job can be opened and closed multiple times throughout - its lifecycle. A closed job cannot receive data or perform analysis operations, - but you can still explore and navigate results. When you close a job, it runs - housekeeping tasks such as pruning the model history, flushing buffers, calculating - final results and persisting the model snapshots. Depending upon the size of - the job, it could take several minutes to close and the equivalent time to re-open. - After it is closed, the job has a minimal overhead on the cluster except for - maintaining its meta data. Therefore it is a best practice to close jobs that - are no longer required to process data. If you close an anomaly detection job - whose datafeed is running, the request first tries to stop the datafeed. This - behavior is equivalent to calling stop datafeed API with the same timeout and - force parameters as the close job request. When a datafeed that has a specified - end date stops, it automatically closes its associated job. + .. raw:: html + +

Close anomaly detection jobs. + A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. + When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. + If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. + When a datafeed that has a specified end date stops, it automatically closes its associated job.

+ ``_ @@ -161,8 +159,11 @@ async def delete_calendar( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a calendar. Removes all scheduled events from a calendar, then deletes - it. + .. raw:: html + +

Delete a calendar. + Removes all scheduled events from a calendar, then deletes it.

+ ``_ @@ -203,7 +204,10 @@ async def delete_calendar_event( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete events from a calendar. + .. raw:: html + +

Delete events from a calendar.

+ ``_ @@ -251,7 +255,10 @@ async def delete_calendar_job( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete anomaly jobs from a calendar. + .. raw:: html + +

Delete anomaly jobs from a calendar.

+ ``_ @@ -300,7 +307,10 @@ async def delete_data_frame_analytics( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a data frame analytics job. + .. raw:: html + +

Delete a data frame analytics job.

+ ``_ @@ -348,7 +358,10 @@ async def delete_datafeed( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a datafeed. + .. raw:: html + +

Delete a datafeed.

+ ``_ @@ -400,13 +413,18 @@ async def delete_expired_data( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete expired ML data. Deletes all job results, model snapshots and forecast - data that have exceeded their retention days period. Machine learning state documents - that are not associated with any job are also deleted. You can limit the request - to a single or set of anomaly detection jobs by using a job identifier, a group - name, a comma-separated list of jobs, or a wildcard expression. You can delete - expired data for all anomaly detection jobs by using _all, by specifying * as - the , or by omitting the . + .. raw:: html + +

Delete expired ML data. + Deletes all job results, model snapshots and forecast data that have exceeded + their retention days period. Machine learning state documents that are not + associated with any job are also deleted. + You can limit the request to a single or set of anomaly detection jobs by + using a job identifier, a group name, a comma-separated list of jobs, or a + wildcard expression. You can delete expired data for all anomaly detection + jobs by using _all, by specifying * as the <job_id>, or by omitting the + <job_id>.

+ ``_ @@ -465,9 +483,12 @@ async def delete_filter( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a filter. If an anomaly detection job references the filter, you cannot - delete the filter. You must update or delete the job before you can delete the - filter. + .. raw:: html + +

Delete a filter. + If an anomaly detection job references the filter, you cannot delete the + filter. You must update or delete the job before you can delete the filter.

+ ``_ @@ -510,10 +531,14 @@ async def delete_forecast( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete forecasts from a job. By default, forecasts are retained for 14 days. - You can specify a different retention period with the `expires_in` parameter - in the forecast jobs API. The delete forecast API enables you to delete one or - more forecasts before they expire. + .. raw:: html + +

Delete forecasts from a job. + By default, forecasts are retained for 14 days. You can specify a + different retention period with the expires_in parameter in the forecast + jobs API. The delete forecast API enables you to delete one or more + forecasts before they expire.

+ ``_ @@ -580,12 +605,16 @@ async def delete_job( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete an anomaly detection job. All job configuration, model state and results - are deleted. It is not currently possible to delete multiple jobs using wildcards - or a comma separated list. If you delete a job that has a datafeed, the request - first tries to delete the datafeed. This behavior is equivalent to calling the - delete datafeed API with the same timeout and force parameters as the delete - job request. + .. raw:: html + +

Delete an anomaly detection job. + All job configuration, model state and results are deleted. + It is not currently possible to delete multiple jobs using wildcards or a + comma separated list. If you delete a job that has a datafeed, the request + first tries to delete the datafeed. This behavior is equivalent to calling + the delete datafeed API with the same timeout and force parameters as the + delete job request.

+ ``_ @@ -639,9 +668,13 @@ async def delete_model_snapshot( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a model snapshot. You cannot delete the active model snapshot. To delete - that snapshot, first revert to a different one. To identify the active model - snapshot, refer to the `model_snapshot_id` in the results from the get jobs API. + .. raw:: html + +

Delete a model snapshot. + You cannot delete the active model snapshot. To delete that snapshot, first + revert to a different one. To identify the active model snapshot, refer to + the model_snapshot_id in the results from the get jobs API.

+ ``_ @@ -689,8 +722,11 @@ async def delete_trained_model( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete an unreferenced trained model. The request deletes a trained inference - model that is not referenced by an ingest pipeline. + .. raw:: html + +

Delete an unreferenced trained model. + The request deletes a trained inference model that is not referenced by an ingest pipeline.

+ ``_ @@ -739,9 +775,13 @@ async def delete_trained_model_alias( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a trained model alias. This API deletes an existing model alias that refers - to a trained model. If the model alias is missing or refers to a model other - than the one identified by the `model_id`, this API returns an error. + .. raw:: html + +

Delete a trained model alias. + This API deletes an existing model alias that refers to a trained model. If + the model alias is missing or refers to a model other than the one identified + by the model_id, this API returns an error.

+ ``_ @@ -796,9 +836,13 @@ async def estimate_model_memory( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Estimate job model memory usage. Makes an estimation of the memory usage for - an anomaly detection job model. It is based on analysis configuration details - for the job and cardinality estimates for the fields it references. + .. raw:: html + +

Estimate job model memory usage. + Makes an estimation of the memory usage for an anomaly detection job model. + It is based on analysis configuration details for the job and cardinality + estimates for the fields it references.

+ ``_ @@ -863,10 +907,14 @@ async def evaluate_data_frame( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Evaluate data frame analytics. The API packages together commonly used evaluation - metrics for various types of machine learning features. This has been designed - for use on indexes created by data frame analytics. Evaluation requires both - a ground truth field and an analytics result field to be present. + .. raw:: html + +

Evaluate data frame analytics. + The API packages together commonly used evaluation metrics for various types + of machine learning features. This has been designed for use on indexes + created by data frame analytics. Evaluation requires both a ground truth + field and an analytics result field to be present.

+ ``_ @@ -940,13 +988,18 @@ async def explain_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Explain data frame analytics config. This API provides explanations for a data - frame analytics config that either exists already or one that has not been created - yet. The following explanations are provided: * which fields are included or - not in the analysis and why, * how much memory is estimated to be required. The - estimate can be used when deciding the appropriate value for model_memory_limit - setting later on. If you have object fields or fields that are excluded via source - filtering, they are not included in the explanation. + .. raw:: html + +

Explain data frame analytics config. + This API provides explanations for a data frame analytics config that either + exists already or one that has not been created yet. The following + explanations are provided:

+
    +
  • which fields are included or not in the analysis and why,
  • +
  • how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. + If you have object fields or fields that are excluded via source filtering, they are not included in the explanation.
  • +
+ ``_ @@ -1046,14 +1099,18 @@ async def flush_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Force buffered data to be processed. The flush jobs API is only applicable when - sending data for analysis using the post data API. Depending on the content of - the buffer, then it might additionally calculate new results. Both flush and - close operations are similar, however the flush is more efficient if you are - expecting to send more data for analysis. When flushing, the job remains open - and is available to continue analyzing data. A close operation additionally prunes - and persists the model state to disk and the job must be opened again before - analyzing further data. + .. raw:: html + +

Force buffered data to be processed. + The flush jobs API is only applicable when sending data for analysis using + the post data API. Depending on the content of the buffer, then it might + additionally calculate new results. Both flush and close operations are + similar, however the flush is more efficient if you are expecting to send + more data for analysis. When flushing, the job remains open and is available + to continue analyzing data. A close operation additionally prunes and + persists the model state to disk and the job must be opened again before + analyzing further data.

+ ``_ @@ -1121,10 +1178,14 @@ async def forecast( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Predict future behavior of a time series. Forecasts are not supported for jobs - that perform population analysis; an error occurs if you try to create a forecast - for a job that has an `over_field_name` in its configuration. Forcasts predict - future behavior based on historical data. + .. raw:: html + +

Predict future behavior of a time series.

+

Forecasts are not supported for jobs that perform population analysis; an + error occurs if you try to create a forecast for a job that has an + over_field_name in its configuration. Forcasts predict future behavior + based on historical data.

+ ``_ @@ -1206,8 +1267,11 @@ async def get_buckets( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get anomaly detection job results for buckets. The API presents a chronological - view of the records, grouped by bucket. + .. raw:: html + +

Get anomaly detection job results for buckets. + The API presents a chronological view of the records, grouped by bucket.

+ ``_ @@ -1302,7 +1366,10 @@ async def get_calendar_events( start: t.Optional[t.Union[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get info about events in calendars. + .. raw:: html + +

Get info about events in calendars.

+ ``_ @@ -1368,7 +1435,10 @@ async def get_calendars( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get calendar configuration info. + .. raw:: html + +

Get calendar configuration info.

+ ``_ @@ -1441,7 +1511,10 @@ async def get_categories( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get anomaly detection job results for categories. + .. raw:: html + +

Get anomaly detection job results for categories.

+ ``_ @@ -1523,9 +1596,13 @@ async def get_data_frame_analytics( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Get data frame analytics job configuration info. You can get information for - multiple data frame analytics jobs in a single API request by using a comma-separated - list of data frame analytics jobs or a wildcard expression. + .. raw:: html + +

Get data frame analytics job configuration info. + You can get information for multiple data frame analytics jobs in a single + API request by using a comma-separated list of data frame analytics jobs or a + wildcard expression.

+ ``_ @@ -1597,7 +1674,10 @@ async def get_data_frame_analytics_stats( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get data frame analytics jobs usage info. + .. raw:: html + +

Get data frame analytics jobs usage info.

+ ``_ @@ -1662,12 +1742,16 @@ async def get_datafeed_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get datafeeds usage info. You can get statistics for multiple datafeeds in a - single API request by using a comma-separated list of datafeeds or a wildcard - expression. You can get statistics for all datafeeds by using `_all`, by specifying - `*` as the ``, or by omitting the ``. If the datafeed is stopped, - the only information you receive is the `datafeed_id` and the `state`. This API - returns a maximum of 10,000 datafeeds. + .. raw:: html + +

Get datafeeds usage info. + You can get statistics for multiple datafeeds in a single API request by + using a comma-separated list of datafeeds or a wildcard expression. You can + get statistics for all datafeeds by using _all, by specifying * as the + <feed_id>, or by omitting the <feed_id>. If the datafeed is stopped, the + only information you receive is the datafeed_id and the state. + This API returns a maximum of 10,000 datafeeds.

+ ``_ @@ -1723,11 +1807,15 @@ async def get_datafeeds( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get datafeeds configuration info. You can get information for multiple datafeeds - in a single API request by using a comma-separated list of datafeeds or a wildcard - expression. You can get information for all datafeeds by using `_all`, by specifying - `*` as the ``, or by omitting the ``. This API returns a maximum - of 10,000 datafeeds. + .. raw:: html + +

Get datafeeds configuration info. + You can get information for multiple datafeeds in a single API request by + using a comma-separated list of datafeeds or a wildcard expression. You can + get information for all datafeeds by using _all, by specifying * as the + <feed_id>, or by omitting the <feed_id>. + This API returns a maximum of 10,000 datafeeds.

+ ``_ @@ -1790,7 +1878,11 @@ async def get_filters( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Get filters. You can get a single filter or all filters. + .. raw:: html + +

Get filters. + You can get a single filter or all filters.

+ ``_ @@ -1852,9 +1944,13 @@ async def get_influencers( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get anomaly detection job results for influencers. Influencers are the entities - that have contributed to, or are to blame for, the anomalies. Influencer results - are available only if an `influencer_field_name` is specified in the job configuration. + .. raw:: html + +

Get anomaly detection job results for influencers. + Influencers are the entities that have contributed to, or are to blame for, + the anomalies. Influencer results are available only if an + influencer_field_name is specified in the job configuration.

+ ``_ @@ -1935,7 +2031,10 @@ async def get_job_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get anomaly detection jobs usage info. + .. raw:: html + +

Get anomaly detection jobs usage info.

+ ``_ @@ -1992,11 +2091,14 @@ async def get_jobs( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get anomaly detection jobs configuration info. You can get information for multiple - anomaly detection jobs in a single API request by using a group name, a comma-separated - list of jobs, or a wildcard expression. You can get information for all anomaly - detection jobs by using `_all`, by specifying `*` as the ``, or by omitting - the ``. + .. raw:: html + +

Get anomaly detection jobs configuration info. + You can get information for multiple anomaly detection jobs in a single API + request by using a group name, a comma-separated list of jobs, or a wildcard + expression. You can get information for all anomaly detection jobs by using + _all, by specifying * as the <job_id>, or by omitting the <job_id>.

+ ``_ @@ -2057,9 +2159,12 @@ async def get_memory_stats( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get machine learning memory usage info. Get information about how machine learning - jobs and trained models are using memory, on each node, both within the JVM heap, - and natively, outside of the JVM. + .. raw:: html + +

Get machine learning memory usage info. + Get information about how machine learning jobs and trained models are using memory, + on each node, both within the JVM heap, and natively, outside of the JVM.

+ ``_ @@ -2114,7 +2219,10 @@ async def get_model_snapshot_upgrade_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get anomaly detection job model snapshot upgrade usage info. + .. raw:: html + +

Get anomaly detection job model snapshot upgrade usage info.

+ ``_ @@ -2185,7 +2293,10 @@ async def get_model_snapshots( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get model snapshots info. + .. raw:: html + +

Get model snapshots info.

+ ``_ @@ -2286,19 +2397,26 @@ async def get_overall_buckets( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get overall bucket results. Retrievs overall bucket results that summarize the - bucket results of multiple anomaly detection jobs. The `overall_score` is calculated - by combining the scores of all the buckets within the overall bucket span. First, - the maximum `anomaly_score` per anomaly detection job in the overall bucket is - calculated. Then the `top_n` of those scores are averaged to result in the `overall_score`. - This means that you can fine-tune the `overall_score` so that it is more or less - sensitive to the number of jobs that detect an anomaly at the same time. For - example, if you set `top_n` to `1`, the `overall_score` is the maximum bucket - score in the overall bucket. Alternatively, if you set `top_n` to the number - of jobs, the `overall_score` is high only when all jobs detect anomalies in that - overall bucket. If you set the `bucket_span` parameter (to a value greater than - its default), the `overall_score` is the maximum `overall_score` of the overall - buckets that have a span equal to the jobs' largest bucket span. + .. raw:: html + +

Get overall bucket results.

+

Retrievs overall bucket results that summarize the bucket results of + multiple anomaly detection jobs.

+

The overall_score is calculated by combining the scores of all the + buckets within the overall bucket span. First, the maximum + anomaly_score per anomaly detection job in the overall bucket is + calculated. Then the top_n of those scores are averaged to result in + the overall_score. This means that you can fine-tune the + overall_score so that it is more or less sensitive to the number of + jobs that detect an anomaly at the same time. For example, if you set + top_n to 1, the overall_score is the maximum bucket score in the + overall bucket. Alternatively, if you set top_n to the number of jobs, + the overall_score is high only when all jobs detect anomalies in that + overall bucket. If you set the bucket_span parameter (to a value + greater than its default), the overall_score is the maximum + overall_score of the overall buckets that have a span equal to the + jobs' largest bucket span.

+ ``_ @@ -2395,15 +2513,20 @@ async def get_records( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get anomaly records for an anomaly detection job. Records contain the detailed - analytical results. They describe the anomalous activity that has been identified - in the input data based on the detector configuration. There can be many anomaly - records depending on the characteristics and size of the input data. In practice, - there are often too many to be able to manually process them. The machine learning - features therefore perform a sophisticated aggregation of the anomaly records - into buckets. The number of record results depends on the number of anomalies - found in each bucket, which relates to the number of time series being modeled - and the number of detectors. + .. raw:: html + +

Get anomaly records for an anomaly detection job. + Records contain the detailed analytical results. They describe the anomalous + activity that has been identified in the input data based on the detector + configuration. + There can be many anomaly records depending on the characteristics and size + of the input data. In practice, there are often too many to be able to + manually process them. The machine learning features therefore perform a + sophisticated aggregation of the anomaly records into buckets. + The number of record results depends on the number of anomalies found in each + bucket, which relates to the number of time series being modeled and the + number of detectors.

+ ``_ @@ -2499,7 +2622,10 @@ async def get_trained_models( tags: t.Optional[t.Union[str, t.Sequence[str]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get trained model configuration info. + .. raw:: html + +

Get trained model configuration info.

+ ``_ @@ -2585,9 +2711,12 @@ async def get_trained_models_stats( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Get trained models usage info. You can get usage information for multiple trained - models in a single API request by using a comma-separated list of model IDs or - a wildcard expression. + .. raw:: html + +

Get trained models usage info. + You can get usage information for multiple trained + models in a single API request by using a comma-separated list of model IDs or a wildcard expression.

+ ``_ @@ -2650,7 +2779,10 @@ async def infer_trained_model( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Evaluate a trained model. + .. raw:: html + +

Evaluate a trained model.

+ ``_ @@ -2707,12 +2839,17 @@ async def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get machine learning information. Get defaults and limits used by machine learning. - This endpoint is designed to be used by a user interface that needs to fully - understand machine learning configurations where some options are not specified, - meaning that the defaults should be used. This endpoint may be used to find out - what those defaults are. It also provides information about the maximum size - of machine learning jobs that could run in the current cluster configuration. + .. raw:: html + +

Get machine learning information. + Get defaults and limits used by machine learning. + This endpoint is designed to be used by a user interface that needs to fully + understand machine learning configurations where some options are not + specified, meaning that the defaults should be used. This endpoint may be + used to find out what those defaults are. It also provides information about + the maximum size of machine learning jobs that could run in the current + cluster configuration.

+ ``_ """ @@ -2752,12 +2889,16 @@ async def open_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Open anomaly detection jobs. An anomaly detection job must be opened to be ready - to receive and analyze data. It can be opened and closed multiple times throughout - its lifecycle. When you open a new job, it starts with an empty model. When you - open an existing job, the most recent model state is automatically loaded. The - job is ready to resume its analysis from where it left off, once new data is - received. + .. raw:: html + +

Open anomaly detection jobs. + An anomaly detection job must be opened to be ready to receive and analyze + data. It can be opened and closed multiple times throughout its lifecycle. + When you open a new job, it starts with an empty model. + When you open an existing job, the most recent model state is automatically + loaded. The job is ready to resume its analysis from where it left off, once + new data is received.

+ ``_ @@ -2811,7 +2952,10 @@ async def post_calendar_events( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Add scheduled events to the calendar. + .. raw:: html + +

Add scheduled events to the calendar.

+ ``_ @@ -2867,9 +3011,12 @@ async def post_data( reset_start: t.Optional[t.Union[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Send data to an anomaly detection job for analysis. IMPORTANT: For each job, - data can be accepted from only a single connection at a time. It is not currently - possible to post data to multiple jobs using wildcards or a comma-separated list. + .. raw:: html + +

Send data to an anomaly detection job for analysis.

+

IMPORTANT: For each job, data can be accepted from only a single connection at a time. + It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list.

+ ``_ @@ -2932,8 +3079,11 @@ async def preview_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Preview features used by data frame analytics. Previews the extracted features - used by a data frame analytics config. + .. raw:: html + +

Preview features used by data frame analytics. + Previews the extracted features used by a data frame analytics config.

+ ``_ @@ -2995,15 +3145,18 @@ async def preview_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Preview a datafeed. This API returns the first "page" of search results from - a datafeed. You can preview an existing datafeed or provide configuration details - for a datafeed and anomaly detection job in the API. The preview shows the structure - of the data that will be passed to the anomaly detection engine. IMPORTANT: When - Elasticsearch security features are enabled, the preview uses the credentials - of the user that called the API. However, when the datafeed starts it uses the - roles of the last user that created or updated the datafeed. To get a preview - that accurately reflects the behavior of the datafeed, use the appropriate credentials. - You can also use secondary authorization headers to supply the credentials. + .. raw:: html + +

Preview a datafeed. + This API returns the first "page" of search results from a datafeed. + You can preview an existing datafeed or provide configuration details for a datafeed + and anomaly detection job in the API. The preview shows the structure of the data + that will be passed to the anomaly detection engine. + IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that + called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the + datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. + You can also use secondary authorization headers to supply the credentials.

+ ``_ @@ -3079,7 +3232,10 @@ async def put_calendar( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a calendar. + .. raw:: html + +

Create a calendar.

+ ``_ @@ -3133,7 +3289,10 @@ async def put_calendar_job( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Add anomaly detection job to calendar. + .. raw:: html + +

Add anomaly detection job to calendar.

+ ``_ @@ -3208,13 +3367,15 @@ async def put_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a data frame analytics job. This API creates a data frame analytics job - that performs an analysis on the source indices and stores the outcome in a destination - index. By default, the query used in the source configuration is `{"match_all": - {}}`. If the destination index does not exist, it is created automatically when - you start the job. If you supply only a subset of the regression or classification - parameters, hyperparameter optimization occurs. It determines a value for each - of the undefined parameters. + .. raw:: html + +

Create a data frame analytics job. + This API creates a data frame analytics job that performs an analysis on the + source indices and stores the outcome in a destination index. + By default, the query used in the source configuration is {"match_all": {}}.

+

If the destination index does not exist, it is created automatically when you start the job.

+

If you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters.

+ ``_ @@ -3387,18 +3548,19 @@ async def put_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by - an anomaly detection job. You can associate only one datafeed with each anomaly - detection job. The datafeed contains a query that runs at a defined interval - (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') - at each interval. By default, the datafeed uses the following query: `{"match_all": - {"boost": 1}}`. When Elasticsearch security features are enabled, your datafeed - remembers which roles the user who created it had at the time of creation and - runs the query using those same roles. If you provide secondary authorization - headers, those credentials are used instead. You must use Kibana, this API, or - the create anomaly detection jobs API to create a datafeed. Do not add a datafeed - directly to the `.ml-config` index. Do not give users `write` privileges on the - `.ml-config` index. + .. raw:: html + +

Create a datafeed. + Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. + You can associate only one datafeed with each anomaly detection job. + The datafeed contains a query that runs at a defined interval (frequency). + If you are concerned about delayed data, you can add a delay (query_delay') at each interval. By default, the datafeed uses the following query: {"match_all": {"boost": 1}}`.

+

When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had + at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, + those credentials are used instead. + You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed + directly to the .ml-config index. Do not give users write privileges on the .ml-config index.

+ ``_ @@ -3555,9 +3717,12 @@ async def put_filter( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a filter. A filter contains a list of strings. It can be used by one or - more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` - property of detector configuration objects. + .. raw:: html + +

Create a filter. + A filter contains a list of strings. It can be used by one or more anomaly detection jobs. + Specifically, filters are referenced in the custom_rules property of detector configuration objects.

+ ``_ @@ -3654,9 +3819,12 @@ async def put_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create an anomaly detection job. If you include a `datafeed_config`, you must - have read index privileges on the source index. If you include a `datafeed_config` - but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. + .. raw:: html + +

Create an anomaly detection job. + If you include a datafeed_config, you must have read index privileges on the source index. + If you include a datafeed_config but do not provide a query, the datafeed uses {"match_all": {"boost": 1}}.

+ ``_ @@ -3860,8 +4028,11 @@ async def put_trained_model( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a trained model. Enable you to supply a trained model that is not created - by data frame analytics. + .. raw:: html + +

Create a trained model. + Enable you to supply a trained model that is not created by data frame analytics.

+ ``_ @@ -3963,19 +4134,26 @@ async def put_trained_model_alias( reassign: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a trained model alias. A trained model alias is a logical name - used to reference a single trained model. You can use aliases instead of trained - model identifiers to make it easier to reference your models. For example, you - can use aliases in inference aggregations and processors. An alias must be unique - and refer to only a single trained model. However, you can have multiple aliases - for each trained model. If you use this API to update an alias such that it references - a different trained model ID and the model uses a different type of data frame - analytics, an error occurs. For example, this situation occurs if you have a - trained model for regression analysis and a trained model for classification - analysis; you cannot reassign an alias from one type of trained model to another. - If you use this API to update an alias and there are very few input fields in - common between the old and new trained models for the model alias, the API returns - a warning. + .. raw:: html + +

Create or update a trained model alias. + A trained model alias is a logical name used to reference a single trained + model. + You can use aliases instead of trained model identifiers to make it easier to + reference your models. For example, you can use aliases in inference + aggregations and processors. + An alias must be unique and refer to only a single trained model. However, + you can have multiple aliases for each trained model. + If you use this API to update an alias such that it references a different + trained model ID and the model uses a different type of data frame analytics, + an error occurs. For example, this situation occurs if you have a trained + model for regression analysis and a trained model for classification + analysis; you cannot reassign an alias from one type of trained model to + another. + If you use this API to update an alias and there are very few input fields in + common between the old and new trained models for the model alias, the API + returns a warning.

+ ``_ @@ -4033,7 +4211,10 @@ async def put_trained_model_definition_part( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create part of a trained model definition. + .. raw:: html + +

Create part of a trained model definition.

+ ``_ @@ -4110,9 +4291,12 @@ async def put_trained_model_vocabulary( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a trained model vocabulary. This API is supported only for natural language - processing (NLP) models. The vocabulary is stored in the index as described in - `inference_config.*.vocabulary` of the trained model definition. + .. raw:: html + +

Create a trained model vocabulary. + This API is supported only for natural language processing (NLP) models. + The vocabulary is stored in the index as described in inference_config.*.vocabulary of the trained model definition.

+ ``_ @@ -4168,9 +4352,14 @@ async def reset_job( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Reset an anomaly detection job. All model state and results are deleted. The - job is ready to start over as if it had just been created. It is not currently - possible to reset multiple jobs using wildcards or a comma separated list. + .. raw:: html + +

Reset an anomaly detection job. + All model state and results are deleted. The job is ready to start over as if + it had just been created. + It is not currently possible to reset multiple jobs using wildcards or a + comma separated list.

+ ``_ @@ -4224,13 +4413,17 @@ async def revert_model_snapshot( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Revert to a snapshot. The machine learning features react quickly to anomalous - input, learning new behaviors in data. Highly anomalous input increases the variance - in the models whilst the system learns whether this is a new step-change in behavior - or a one-off event. In the case where this anomalous input is known to be a one-off, - then it might be appropriate to reset the model state to a time before this event. - For example, you might consider reverting to a saved snapshot after Black Friday - or a critical system failure. + .. raw:: html + +

Revert to a snapshot. + The machine learning features react quickly to anomalous input, learning new + behaviors in data. Highly anomalous input increases the variance in the + models whilst the system learns whether this is a new step-change in behavior + or a one-off event. In the case where this anomalous input is known to be a + one-off, then it might be appropriate to reset the model state to a time + before this event. For example, you might consider reverting to a saved + snapshot after Black Friday or a critical system failure.

+ ``_ @@ -4290,17 +4483,22 @@ async def set_upgrade_mode( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Set upgrade_mode for ML indices. Sets a cluster wide upgrade_mode setting that - prepares machine learning indices for an upgrade. When upgrading your cluster, - in some circumstances you must restart your nodes and reindex your machine learning - indices. In those circumstances, there must be no machine learning jobs running. - You can close the machine learning jobs, do the upgrade, then open all the jobs - again. Alternatively, you can use this API to temporarily halt tasks associated - with the jobs and datafeeds and prevent new jobs from opening. You can also use - this API during upgrades that do not require you to reindex your machine learning - indices, though stopping jobs is not a requirement in that case. You can see - the current value for the upgrade_mode setting by using the get machine learning - info API. + .. raw:: html + +

Set upgrade_mode for ML indices. + Sets a cluster wide upgrade_mode setting that prepares machine learning + indices for an upgrade. + When upgrading your cluster, in some circumstances you must restart your + nodes and reindex your machine learning indices. In those circumstances, + there must be no machine learning jobs running. You can close the machine + learning jobs, do the upgrade, then open all the jobs again. Alternatively, + you can use this API to temporarily halt tasks associated with the jobs and + datafeeds and prevent new jobs from opening. You can also use this API + during upgrades that do not require you to reindex your machine learning + indices, though stopping jobs is not a requirement in that case. + You can see the current value for the upgrade_mode setting by using the get + machine learning info API.

+ ``_ @@ -4346,16 +4544,21 @@ async def start_data_frame_analytics( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Start a data frame analytics job. A data frame analytics job can be started and - stopped multiple times throughout its lifecycle. If the destination index does - not exist, it is created automatically the first time you start the data frame - analytics job. The `index.number_of_shards` and `index.number_of_replicas` settings - for the destination index are copied from the source index. If there are multiple - source indices, the destination index copies the highest setting values. The - mappings for the destination index are also copied from the source indices. If - there are any mapping conflicts, the job fails to start. If the destination index - exists, it is used as is. You can therefore set up the destination index in advance - with custom settings and mappings. + .. raw:: html + +

Start a data frame analytics job. + A data frame analytics job can be started and stopped multiple times + throughout its lifecycle. + If the destination index does not exist, it is created automatically the + first time you start the data frame analytics job. The + index.number_of_shards and index.number_of_replicas settings for the + destination index are copied from the source index. If there are multiple + source indices, the destination index copies the highest setting values. The + mappings for the destination index are also copied from the source indices. + If there are any mapping conflicts, the job fails to start. + If the destination index exists, it is used as is. You can therefore set up + the destination index in advance with custom settings and mappings.

+ ``_ @@ -4407,17 +4610,18 @@ async def start_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Start datafeeds. A datafeed must be started in order to retrieve data from Elasticsearch. - A datafeed can be started and stopped multiple times throughout its lifecycle. - Before you can start a datafeed, the anomaly detection job must be open. Otherwise, - an error occurs. If you restart a stopped datafeed, it continues processing input - data from the next millisecond after it was stopped. If new data was indexed - for that exact millisecond between stopping and starting, it will be ignored. - When Elasticsearch security features are enabled, your datafeed remembers which - roles the last user to create or update it had at the time of creation or update - and runs the query using those same roles. If you provided secondary authorization - headers when you created or updated the datafeed, those credentials are used - instead. + .. raw:: html + +

Start datafeeds.

+

A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped + multiple times throughout its lifecycle.

+

Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs.

+

If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. + If new data was indexed for that exact millisecond between stopping and starting, it will be ignored.

+

When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or + update it had at the time of creation or update and runs the query using those same roles. If you provided secondary + authorization headers when you created or updated the datafeed, those credentials are used instead.

+ ``_ @@ -4486,8 +4690,11 @@ async def start_trained_model_deployment( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Start a trained model deployment. It allocates the model to every machine learning - node. + .. raw:: html + +

Start a trained model deployment. + It allocates the model to every machine learning node.

+ ``_ @@ -4570,8 +4777,12 @@ async def stop_data_frame_analytics( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Stop data frame analytics jobs. A data frame analytics job can be started and - stopped multiple times throughout its lifecycle. + .. raw:: html + +

Stop data frame analytics jobs. + A data frame analytics job can be started and stopped multiple times + throughout its lifecycle.

+ ``_ @@ -4636,8 +4847,12 @@ async def stop_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. - A datafeed can be started and stopped multiple times throughout its lifecycle. + .. raw:: html + +

Stop datafeeds. + A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped + multiple times throughout its lifecycle.

+ ``_ @@ -4699,7 +4914,10 @@ async def stop_trained_model_deployment( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stop a trained model deployment. + .. raw:: html + +

Stop a trained model deployment.

+ ``_ @@ -4764,7 +4982,10 @@ async def update_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update a data frame analytics job. + .. raw:: html + +

Update a data frame analytics job.

+ ``_ @@ -4872,11 +5093,14 @@ async def update_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update a datafeed. You must stop and start the datafeed for the changes to be - applied. When Elasticsearch security features are enabled, your datafeed remembers - which roles the user who updated it had at the time of the update and runs the - query using those same roles. If you provide secondary authorization headers, - those credentials are used instead. + .. raw:: html + +

Update a datafeed. + You must stop and start the datafeed for the changes to be applied. + When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at + the time of the update and runs the query using those same roles. If you provide secondary authorization headers, + those credentials are used instead.

+ ``_ @@ -5039,8 +5263,11 @@ async def update_filter( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update a filter. Updates the description of a filter, adds items, or removes - items from the list. + .. raw:: html + +

Update a filter. + Updates the description of a filter, adds items, or removes items from the list.

+ ``_ @@ -5130,8 +5357,11 @@ async def update_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update an anomaly detection job. Updates certain properties of an anomaly detection - job. + .. raw:: html + +

Update an anomaly detection job. + Updates certain properties of an anomaly detection job.

+ ``_ @@ -5259,7 +5489,11 @@ async def update_model_snapshot( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update a snapshot. Updates certain properties of a snapshot. + .. raw:: html + +

Update a snapshot. + Updates certain properties of a snapshot.

+ ``_ @@ -5320,7 +5554,10 @@ async def update_trained_model_deployment( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update a trained model deployment. + .. raw:: html + +

Update a trained model deployment.

+ ``_ @@ -5379,14 +5616,19 @@ async def upgrade_job_snapshot( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Upgrade a snapshot. Upgrades an anomaly detection model snapshot to the latest - major version. Over time, older snapshot formats are deprecated and removed. - Anomaly detection jobs support only snapshots that are from the current or previous - major version. This API provides a means to upgrade a snapshot to the current - major version. This aids in preparing the cluster for an upgrade to the next - major version. Only one snapshot per anomaly detection job can be upgraded at - a time and the upgraded snapshot cannot be the current snapshot of the anomaly - detection job. + .. raw:: html + +

Upgrade a snapshot. + Upgrades an anomaly detection model snapshot to the latest major version. + Over time, older snapshot formats are deprecated and removed. Anomaly + detection jobs support only snapshots that are from the current or previous + major version. + This API provides a means to upgrade a snapshot to the current major version. + This aids in preparing the cluster for an upgrade to the next major version. + Only one snapshot per anomaly detection job can be upgraded at a time and the + upgraded snapshot cannot be the current snapshot of the anomaly detection + job.

+ ``_ @@ -5462,7 +5704,10 @@ async def validate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Validate an anomaly detection job. + .. raw:: html + +

Validate an anomaly detection job.

+ ``_ @@ -5532,7 +5777,10 @@ async def validate_detector( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Validate an anomaly detection job. + .. raw:: html + +

Validate an anomaly detection job.

+ ``_ diff --git a/elasticsearch/_async/client/monitoring.py b/elasticsearch/_async/client/monitoring.py index 2439d73d7..d5e5b5252 100644 --- a/elasticsearch/_async/client/monitoring.py +++ b/elasticsearch/_async/client/monitoring.py @@ -42,8 +42,11 @@ async def bulk( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Send monitoring data. This API is used by the monitoring features to send monitoring - data. + .. raw:: html + +

Send monitoring data. + This API is used by the monitoring features to send monitoring data.

+ ``_ diff --git a/elasticsearch/_async/client/nodes.py b/elasticsearch/_async/client/nodes.py index 9dfdb9b67..99d8fb209 100644 --- a/elasticsearch/_async/client/nodes.py +++ b/elasticsearch/_async/client/nodes.py @@ -44,8 +44,11 @@ async def clear_repositories_metering_archive( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear the archived repositories metering. Clear the archived repositories metering - information in the cluster. + .. raw:: html + +

Clear the archived repositories metering. + Clear the archived repositories metering information in the cluster.

+ ``_ @@ -94,11 +97,13 @@ async def get_repositories_metering_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get cluster repositories metering. Get repositories metering information for - a cluster. This API exposes monotonically non-decreasing counters and it is expected - that clients would durably store the information needed to compute aggregations - over a period of time. Additionally, the information exposed by this API is volatile, - meaning that it will not be present after node restarts. + .. raw:: html + +

Get cluster repositories metering. + Get repositories metering information for a cluster. + This API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time. + Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts.

+ ``_ @@ -150,9 +155,12 @@ async def hot_threads( ] = None, ) -> TextApiResponse: """ - Get the hot threads for nodes. Get a breakdown of the hot threads on each selected - node in the cluster. The output is plain text with a breakdown of the top hot - threads for each node. + .. raw:: html + +

Get the hot threads for nodes. + Get a breakdown of the hot threads on each selected node in the cluster. + The output is plain text with a breakdown of the top hot threads for each node.

+ ``_ @@ -221,8 +229,11 @@ async def info( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get node information. By default, the API returns all attributes and core settings - for cluster nodes. + .. raw:: html + +

Get node information. + By default, the API returns all attributes and core settings for cluster nodes.

+ ``_ @@ -286,18 +297,16 @@ async def reload_secure_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reload the keystore on nodes in the cluster. Secure settings are stored in an - on-disk keystore. Certain of these settings are reloadable. That is, you can - change them on disk and reload them without restarting any nodes in the cluster. - When you have updated reloadable secure settings in your keystore, you can use - this API to reload those settings on each node. When the Elasticsearch keystore - is password protected and not simply obfuscated, you must provide the password - for the keystore when you reload the secure settings. Reloading the settings - for the whole cluster assumes that the keystores for all nodes are protected - with the same password; this method is allowed only when inter-node communications - are encrypted. Alternatively, you can reload the secure settings on each node - by locally accessing the API and passing the node-specific Elasticsearch keystore - password. + .. raw:: html + +

Reload the keystore on nodes in the cluster.

+

Secure settings are stored in an on-disk keystore. Certain of these settings are reloadable. + That is, you can change them on disk and reload them without restarting any nodes in the cluster. + When you have updated reloadable secure settings in your keystore, you can use this API to reload those settings on each node.

+

When the Elasticsearch keystore is password protected and not simply obfuscated, you must provide the password for the keystore when you reload the secure settings. + Reloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted. + Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password.

+ ``_ @@ -367,8 +376,12 @@ async def stats( types: t.Optional[t.Sequence[str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get node statistics. Get statistics for nodes in a cluster. By default, all stats - are returned. You can limit the returned information by using metrics. + .. raw:: html + +

Get node statistics. + Get statistics for nodes in a cluster. + By default, all stats are returned. You can limit the returned information by using metrics.

+ ``_ @@ -480,7 +493,10 @@ async def usage( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get feature usage information. + .. raw:: html + +

Get feature usage information.

+ ``_ diff --git a/elasticsearch/_async/client/query_rules.py b/elasticsearch/_async/client/query_rules.py index 3380840c5..9e7f38b4b 100644 --- a/elasticsearch/_async/client/query_rules.py +++ b/elasticsearch/_async/client/query_rules.py @@ -37,9 +37,12 @@ async def delete_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a query rule. Delete a query rule within a query ruleset. This is a destructive - action that is only recoverable by re-adding the same rule with the create or - update query rule API. + .. raw:: html + +

Delete a query rule. + Delete a query rule within a query ruleset. + This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API.

+ ``_ @@ -87,8 +90,12 @@ async def delete_ruleset( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a query ruleset. Remove a query ruleset and its associated data. This - is a destructive action that is not recoverable. + .. raw:: html + +

Delete a query ruleset. + Remove a query ruleset and its associated data. + This is a destructive action that is not recoverable.

+ ``_ @@ -129,7 +136,11 @@ async def get_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a query rule. Get details about a query rule within a query ruleset. + .. raw:: html + +

Get a query rule. + Get details about a query rule within a query ruleset.

+ ``_ @@ -177,7 +188,11 @@ async def get_ruleset( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a query ruleset. Get details about a query ruleset. + .. raw:: html + +

Get a query ruleset. + Get details about a query ruleset.

+ ``_ @@ -220,7 +235,11 @@ async def list_rulesets( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Get all query rulesets. Get summarized information about the query rulesets. + .. raw:: html + +

Get all query rulesets. + Get summarized information about the query rulesets.

+ ``_ @@ -273,13 +292,15 @@ async def put_rule( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a query rule. Create or update a query rule within a query ruleset. - IMPORTANT: Due to limitations within pinned queries, you can only pin documents - using ids or docs, but cannot use both in single rule. It is advised to use one - or the other in query rulesets, to avoid errors. Additionally, pinned queries - have a maximum limit of 100 pinned hits. If multiple matching rules pin more - than 100 documents, only the first 100 documents are pinned in the order they - are specified in the ruleset. + .. raw:: html + +

Create or update a query rule. + Create or update a query rule within a query ruleset.

+

IMPORTANT: Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule. + It is advised to use one or the other in query rulesets, to avoid errors. + Additionally, pinned queries have a maximum limit of 100 pinned hits. + If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.

+ ``_ @@ -357,14 +378,16 @@ async def put_ruleset( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a query ruleset. There is a limit of 100 rules per ruleset. - This limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` - cluster setting. IMPORTANT: Due to limitations within pinned queries, you can - only select documents using `ids` or `docs`, but cannot use both in single rule. - It is advised to use one or the other in query rulesets, to avoid errors. Additionally, - pinned queries have a maximum limit of 100 pinned hits. If multiple matching - rules pin more than 100 documents, only the first 100 documents are pinned in - the order they are specified in the ruleset. + .. raw:: html + +

Create or update a query ruleset. + There is a limit of 100 rules per ruleset. + This limit can be increased by using the xpack.applications.rules.max_rules_per_ruleset cluster setting.

+

IMPORTANT: Due to limitations within pinned queries, you can only select documents using ids or docs, but cannot use both in single rule. + It is advised to use one or the other in query rulesets, to avoid errors. + Additionally, pinned queries have a maximum limit of 100 pinned hits. + If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.

+ ``_ @@ -417,8 +440,11 @@ async def test( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Test a query ruleset. Evaluate match criteria against a query ruleset to identify - the rules that would match that criteria. + .. raw:: html + +

Test a query ruleset. + Evaluate match criteria against a query ruleset to identify the rules that would match that criteria.

+ ``_ diff --git a/elasticsearch/_async/client/rollup.py b/elasticsearch/_async/client/rollup.py index fcf4dda78..94dc52ae3 100644 --- a/elasticsearch/_async/client/rollup.py +++ b/elasticsearch/_async/client/rollup.py @@ -43,20 +43,29 @@ async def delete_job( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a rollup job. A job must be stopped before it can be deleted. If you attempt - to delete a started job, an error occurs. Similarly, if you attempt to delete - a nonexistent job, an exception occurs. IMPORTANT: When you delete a job, you - remove only the process that is actively monitoring and rolling up data. The - API does not delete any previously rolled up data. This is by design; a user - may wish to roll up a static data set. Because the data set is static, after - it has been fully rolled up there is no need to keep the indexing rollup job - around (as there will be no new data). Thus the job can be deleted, leaving behind - the rolled up data for analysis. If you wish to also remove the rollup data and - the rollup index contains the data for only a single job, you can delete the - whole rollup index. If the rollup index stores data from several jobs, you must - issue a delete-by-query that targets the rollup job's identifier in the rollup - index. For example: ``` POST my_rollup_index/_delete_by_query { "query": { "term": - { "_rollup.id": "the_rollup_job_id" } } } ``` + .. raw:: html + +

Delete a rollup job.

+

A job must be stopped before it can be deleted. + If you attempt to delete a started job, an error occurs. + Similarly, if you attempt to delete a nonexistent job, an exception occurs.

+

IMPORTANT: When you delete a job, you remove only the process that is actively monitoring and rolling up data. + The API does not delete any previously rolled up data. + This is by design; a user may wish to roll up a static data set. + Because the data set is static, after it has been fully rolled up there is no need to keep the indexing rollup job around (as there will be no new data). + Thus the job can be deleted, leaving behind the rolled up data for analysis. + If you wish to also remove the rollup data and the rollup index contains the data for only a single job, you can delete the whole rollup index. + If the rollup index stores data from several jobs, you must issue a delete-by-query that targets the rollup job's identifier in the rollup index. For example:

+
POST my_rollup_index/_delete_by_query
+          {
+            "query": {
+              "term": {
+                "_rollup.id": "the_rollup_job_id"
+              }
+            }
+          }
+          
+ ``_ @@ -97,11 +106,14 @@ async def get_jobs( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get rollup job information. Get the configuration, stats, and status of rollup - jobs. NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. - If a job was created, ran for a while, then was deleted, the API does not return - any details about it. For details about a historical rollup job, the rollup capabilities - API may be more useful. + .. raw:: html + +

Get rollup job information. + Get the configuration, stats, and status of rollup jobs.

+

NOTE: This API returns only active (both STARTED and STOPPED) jobs. + If a job was created, ran for a while, then was deleted, the API does not return any details about it. + For details about a historical rollup job, the rollup capabilities API may be more useful.

+ ``_ @@ -146,15 +158,18 @@ async def get_rollup_caps( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the rollup job capabilities. Get the capabilities of any rollup jobs that - have been configured for a specific index or index pattern. This API is useful - because a rollup job is often configured to rollup only a subset of fields from - the source index. Furthermore, only certain aggregations can be configured for - various fields, leading to a limited subset of functionality depending on that - configuration. This API enables you to inspect an index and determine: 1. Does - this index have associated rollup data somewhere in the cluster? 2. If yes to - the first question, what fields were rolled up, what aggregations can be performed, - and where does the data live? + .. raw:: html + +

Get the rollup job capabilities. + Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern.

+

This API is useful because a rollup job is often configured to rollup only a subset of fields from the source index. + Furthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration. + This API enables you to inspect an index and determine:

+
    +
  1. Does this index have associated rollup data somewhere in the cluster?
  2. +
  3. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live?
  4. +
+ ``_ @@ -199,12 +214,16 @@ async def get_rollup_index_caps( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the rollup index capabilities. Get the rollup capabilities of all jobs inside - of a rollup index. A single rollup index may store the data for multiple rollup - jobs and may have a variety of capabilities depending on those jobs. This API - enables you to determine: * What jobs are stored in an index (or indices specified - via a pattern)? * What target indices were rolled up, what fields were used in - those rollups, and what aggregations can be performed on each job? + .. raw:: html + +

Get the rollup index capabilities. + Get the rollup capabilities of all jobs inside of a rollup index. + A single rollup index may store the data for multiple rollup jobs and may have a variety of capabilities depending on those jobs. This API enables you to determine:

+
    +
  • What jobs are stored in an index (or indices specified via a pattern)?
  • +
  • What target indices were rolled up, what fields were used in those rollups, and what aggregations can be performed on each job?
  • +
+ ``_ @@ -267,16 +286,14 @@ async def put_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a rollup job. WARNING: From 8.15.0, calling this API in a cluster with - no rollup usage will fail with a message about the deprecation and planned removal - of rollup features. A cluster needs to contain either a rollup job or a rollup - index in order for this API to be allowed to run. The rollup job configuration - contains all the details about how the job should run, when it indexes documents, - and what future queries will be able to run against the rollup index. There are - three main sections to the job configuration: the logistical details about the - job (for example, the cron schedule), the fields that are used for grouping, - and what metrics to collect for each group. Jobs are created in a `STOPPED` state. - You can start them with the start rollup jobs API. + .. raw:: html + +

Create a rollup job.

+

WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will fail with a message about the deprecation and planned removal of rollup features. A cluster needs to contain either a rollup job or a rollup index in order for this API to be allowed to run.

+

The rollup job configuration contains all the details about how the job should run, when it indexes documents, and what future queries will be able to run against the rollup index.

+

There are three main sections to the job configuration: the logistical details about the job (for example, the cron schedule), the fields that are used for grouping, and what metrics to collect for each group.

+

Jobs are created in a STOPPED state. You can start them with the start rollup jobs API.

+ ``_ @@ -393,25 +410,38 @@ async def rollup_search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Search rolled-up data. The rollup search endpoint is needed because, internally, - rolled-up documents utilize a different document structure than the original - data. It rewrites standard Query DSL into a format that matches the rollup documents - then takes the response and rewrites it back to what a client would expect given - the original query. The request body supports a subset of features from the regular - search API. The following functionality is not available: `size`: Because rollups - work on pre-aggregated data, no search hits can be returned and so size must - be set to zero or omitted entirely. `highlighter`, `suggestors`, `post_filter`, - `profile`, `explain`: These are similarly disallowed. **Searching both historical - rollup and non-rollup data** The rollup search API has the capability to search - across both "live" non-rollup data and the aggregated rollup data. This is done - by simply adding the live indices to the URI. For example: ``` GET sensor-1,sensor_rollup/_rollup_search - { "size": 0, "aggregations": { "max_temperature": { "max": { "field": "temperature" - } } } } ``` The rollup search endpoint does two things when the search runs: - * The original request is sent to the non-rollup index unaltered. * A rewritten - version of the original request is sent to the rollup index. When the two responses - are received, the endpoint rewrites the rollup response and merges the two together. - During the merging process, if there is any overlap in buckets between the two - responses, the buckets from the non-rollup index are used. + .. raw:: html + +

Search rolled-up data. + The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. + It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query.

+

The request body supports a subset of features from the regular search API. + The following functionality is not available:

+

size: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. + highlighter, suggestors, post_filter, profile, explain: These are similarly disallowed.

+

Searching both historical rollup and non-rollup data

+

The rollup search API has the capability to search across both "live" non-rollup data and the aggregated rollup data. + This is done by simply adding the live indices to the URI. For example:

+
GET sensor-1,sensor_rollup/_rollup_search
+          {
+            "size": 0,
+            "aggregations": {
+               "max_temperature": {
+                "max": {
+                  "field": "temperature"
+                }
+              }
+            }
+          }
+          
+

The rollup search endpoint does two things when the search runs:

+
    +
  • The original request is sent to the non-rollup index unaltered.
  • +
  • A rewritten version of the original request is sent to the rollup index.
  • +
+

When the two responses are received, the endpoint rewrites the rollup response and merges the two together. + During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used.

+ ``_ @@ -484,8 +514,12 @@ async def start_job( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Start rollup jobs. If you try to start a job that does not exist, an exception - occurs. If you try to start a job that is already started, nothing happens. + .. raw:: html + +

Start rollup jobs. + If you try to start a job that does not exist, an exception occurs. + If you try to start a job that is already started, nothing happens.

+ ``_ @@ -528,14 +562,18 @@ async def stop_job( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stop rollup jobs. If you try to stop a job that does not exist, an exception - occurs. If you try to stop a job that is already stopped, nothing happens. Since - only a stopped job can be deleted, it can be useful to block the API until the - indexer has fully stopped. This is accomplished with the `wait_for_completion` - query parameter, and optionally a timeout. For example: ``` POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s - ``` The parameter blocks the API call from returning until either the job has - moved to STOPPED or the specified time has elapsed. If the specified time elapses - without the job moving to STOPPED, a timeout exception occurs. + .. raw:: html + +

Stop rollup jobs. + If you try to stop a job that does not exist, an exception occurs. + If you try to stop a job that is already stopped, nothing happens.

+

Since only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped. + This is accomplished with the wait_for_completion query parameter, and optionally a timeout. For example:

+
POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s
+          
+

The parameter blocks the API call from returning until either the job has moved to STOPPED or the specified time has elapsed. + If the specified time elapses without the job moving to STOPPED, a timeout exception occurs.

+ ``_ diff --git a/elasticsearch/_async/client/search_application.py b/elasticsearch/_async/client/search_application.py index 72e1ca23e..724ccdcff 100644 --- a/elasticsearch/_async/client/search_application.py +++ b/elasticsearch/_async/client/search_application.py @@ -43,8 +43,11 @@ async def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a search application. Remove a search application and its associated alias. - Indices attached to the search application are not removed. + .. raw:: html + +

Delete a search application. + Remove a search application and its associated alias. Indices attached to the search application are not removed.

+ ``_ @@ -85,8 +88,11 @@ async def delete_behavioral_analytics( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a behavioral analytics collection. The associated data stream is also - deleted. + .. raw:: html + +

Delete a behavioral analytics collection. + The associated data stream is also deleted.

+ ``_ @@ -127,7 +133,10 @@ async def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get search application details. + .. raw:: html + +

Get search application details.

+ ``_ @@ -168,7 +177,10 @@ async def get_behavioral_analytics( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get behavioral analytics collections. + .. raw:: html + +

Get behavioral analytics collections.

+ ``_ @@ -216,7 +228,11 @@ async def list( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Get search applications. Get information about search applications. + .. raw:: html + +

Get search applications. + Get information about search applications.

+ ``_ @@ -269,7 +285,10 @@ async def post_behavioral_analytics_event( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a behavioral analytics collection event. + .. raw:: html + +

Create a behavioral analytics collection event.

+ ``_ @@ -333,7 +352,10 @@ async def put( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a search application. + .. raw:: html + +

Create or update a search application.

+ ``_ @@ -387,7 +409,10 @@ async def put_behavioral_analytics( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a behavioral analytics collection. + .. raw:: html + +

Create a behavioral analytics collection.

+ ``_ @@ -433,13 +458,14 @@ async def render_query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Render a search application query. Generate an Elasticsearch query using the - specified query parameters and the search template associated with the search - application or a default template if none is specified. If a parameter used in - the search template is not specified in `params`, the parameter's default value - will be used. The API returns the specific Elasticsearch query that would be - generated and run by calling the search application search API. You must have - `read` privileges on the backing alias of the search application. + .. raw:: html + +

Render a search application query. + Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified. + If a parameter used in the search template is not specified in params, the parameter's default value will be used. + The API returns the specific Elasticsearch query that would be generated and run by calling the search application search API.

+

You must have read privileges on the backing alias of the search application.

+ ``_ @@ -498,10 +524,12 @@ async def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Run a search application search. Generate and run an Elasticsearch query that - uses the specified query parameteter and the search template associated with - the search application or default template. Unspecified template parameters are - assigned their default values if applicable. + .. raw:: html + +

Run a search application search. + Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. + Unspecified template parameters are assigned their default values if applicable.

+ ``_ diff --git a/elasticsearch/_async/client/searchable_snapshots.py b/elasticsearch/_async/client/searchable_snapshots.py index ac3975751..6d9ef50cc 100644 --- a/elasticsearch/_async/client/searchable_snapshots.py +++ b/elasticsearch/_async/client/searchable_snapshots.py @@ -44,8 +44,11 @@ async def cache_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get cache statistics. Get statistics about the shared cache for partially mounted - indices. + .. raw:: html + +

Get cache statistics. + Get statistics about the shared cache for partially mounted indices.

+ ``_ @@ -102,8 +105,11 @@ async def clear_cache( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear the cache. Clear indices and data streams from the shared cache for partially - mounted indices. + .. raw:: html + +

Clear the cache. + Clear indices and data streams from the shared cache for partially mounted indices.

+ ``_ @@ -176,9 +182,13 @@ async def mount( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use - this API for snapshots managed by index lifecycle management (ILM). Manually - mounting ILM-managed snapshots can interfere with ILM processes. + .. raw:: html + +

Mount a snapshot. + Mount a snapshot as a searchable snapshot index. + Do not use this API for snapshots managed by index lifecycle management (ILM). + Manually mounting ILM-managed snapshots can interfere with ILM processes.

+ ``_ @@ -263,7 +273,10 @@ async def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get searchable snapshot statistics. + .. raw:: html + +

Get searchable snapshot statistics.

+ ``_ diff --git a/elasticsearch/_async/client/security.py b/elasticsearch/_async/client/security.py index 5be7612a3..2fb66dddb 100644 --- a/elasticsearch/_async/client/security.py +++ b/elasticsearch/_async/client/security.py @@ -44,21 +44,19 @@ async def activate_user_profile( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Activate a user profile. Create or update a user profile on behalf of another - user. NOTE: The user profile feature is designed only for use by Kibana and Elastic's - Observability, Enterprise Search, and Elastic Security solutions. Individual - users and external applications should not call this API directly. The calling - application must have either an `access_token` or a combination of `username` - and `password` for the user that the profile document is intended for. Elastic - reserves the right to change or remove this feature in future releases without - prior notice. This API creates or updates a profile document for end users with - information that is extracted from the user's authentication object including - `username`, `full_name,` `roles`, and the authentication realm. For example, - in the JWT `access_token` case, the profile user's `username` is extracted from - the JWT token claim pointed to by the `claims.principal` setting of the JWT realm - that authenticated the token. When updating a profile document, the API enables - the document if it was disabled. Any updates do not change existing content for - either the `labels` or `data` fields. + .. raw:: html + +

Activate a user profile.

+

Create or update a user profile on behalf of another user.

+

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. + Individual users and external applications should not call this API directly. + The calling application must have either an access_token or a combination of username and password for the user that the profile document is intended for. + Elastic reserves the right to change or remove this feature in future releases without prior notice.

+

This API creates or updates a profile document for end users with information that is extracted from the user's authentication object including username, full_name, roles, and the authentication realm. + For example, in the JWT access_token case, the profile user's username is extracted from the JWT token claim pointed to by the claims.principal setting of the JWT realm that authenticated the token.

+

When updating a profile document, the API enables the document if it was disabled. + Any updates do not change existing content for either the labels or data fields.

+ ``_ @@ -117,12 +115,14 @@ async def authenticate( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Authenticate a user. Authenticates a user and returns information about the authenticated - user. Include the user information in a [basic auth header](https://en.wikipedia.org/wiki/Basic_access_authentication). - A successful call returns a JSON structure that shows user information such as - their username, the roles that are assigned to the user, any assigned metadata, - and information about the realms that authenticated and authorized the user. - If the user cannot be authenticated, this API returns a 401 status code. + .. raw:: html + +

Authenticate a user.

+

Authenticates a user and returns information about the authenticated user. + Include the user information in a basic auth header. + A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. + If the user cannot be authenticated, this API returns a 401 status code.

+ ``_ """ @@ -164,9 +164,12 @@ async def bulk_delete_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Bulk delete roles. The role management APIs are generally the preferred way to - manage roles, rather than using file-based role management. The bulk delete roles - API cannot delete roles that are defined in roles files. + .. raw:: html + +

Bulk delete roles.

+

The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. + The bulk delete roles API cannot delete roles that are defined in roles files.

+ ``_ @@ -222,9 +225,12 @@ async def bulk_put_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Bulk create or update roles. The role management APIs are generally the preferred - way to manage roles, rather than using file-based role management. The bulk create - or update roles API cannot update roles that are defined in roles files. + .. raw:: html + +

Bulk create or update roles.

+

The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. + The bulk create or update roles API cannot update roles that are defined in roles files.

+ ``_ @@ -280,23 +286,19 @@ async def bulk_update_api_keys( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Bulk update API keys. Update the attributes for multiple API keys. IMPORTANT: - It is not possible to use an API key as the authentication credential for this - API. To update API keys, the owner user's credentials are required. This API - is similar to the update API key API but enables you to apply the same update - to multiple API keys in one API call. This operation can greatly improve performance - over making individual updates. It is not possible to update expired or invalidated - API keys. This API supports updates to API key access scope, metadata and expiration. - The access scope of each API key is derived from the `role_descriptors` you specify - in the request and a snapshot of the owner user's permissions at the time of - the request. The snapshot of the owner's permissions is updated automatically - on every call. IMPORTANT: If you don't specify `role_descriptors` in the request, - a call to this API might still change an API key's access scope. This change - can occur if the owner user's permissions have changed since the API key was - created or last modified. A successful request returns a JSON structure that - contains the IDs of all updated API keys, the IDs of API keys that already had - the requested changes and did not require an update, and error details for any - failed update. + .. raw:: html + +

Bulk update API keys. + Update the attributes for multiple API keys.

+

IMPORTANT: It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user's credentials are required.

+

This API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates.

+

It is not possible to update expired or invalidated API keys.

+

This API supports updates to API key access scope, metadata and expiration. + The access scope of each API key is derived from the role_descriptors you specify in the request and a snapshot of the owner user's permissions at the time of the request. + The snapshot of the owner's permissions is updated automatically on every call.

+

IMPORTANT: If you don't specify role_descriptors in the request, a call to this API might still change an API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified.

+

A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update.

+ ``_ @@ -370,8 +372,11 @@ async def change_password( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Change passwords. Change the passwords of users in the native realm and built-in - users. + .. raw:: html + +

Change passwords.

+

Change the passwords of users in the native realm and built-in users.

+ ``_ @@ -433,8 +438,12 @@ async def clear_api_key_cache( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear the API key cache. Evict a subset of all entries from the API key cache. - The cache is also automatically cleared on state changes of the security index. + .. raw:: html + +

Clear the API key cache.

+

Evict a subset of all entries from the API key cache. + The cache is also automatically cleared on state changes of the security index.

+ ``_ @@ -475,9 +484,12 @@ async def clear_cached_privileges( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear the privileges cache. Evict privileges from the native application privilege - cache. The cache is also automatically cleared for applications that have their - privileges updated. + .. raw:: html + +

Clear the privileges cache.

+

Evict privileges from the native application privilege cache. + The cache is also automatically cleared for applications that have their privileges updated.

+ ``_ @@ -519,12 +531,15 @@ async def clear_cached_realms( usernames: t.Optional[t.Sequence[str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear the user cache. Evict users from the user cache. You can completely clear - the cache or evict specific users. User credentials are cached in memory on each - node to avoid connecting to a remote authentication service or hitting the disk - for every incoming request. There are realm settings that you can use to configure - the user cache. For more information, refer to the documentation about controlling - the user cache. + .. raw:: html + +

Clear the user cache.

+

Evict users from the user cache. + You can completely clear the cache or evict specific users.

+

User credentials are cached in memory on each node to avoid connecting to a remote authentication service or hitting the disk for every incoming request. + There are realm settings that you can use to configure the user cache. + For more information, refer to the documentation about controlling the user cache.

+ ``_ @@ -570,7 +585,11 @@ async def clear_cached_roles( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear the roles cache. Evict roles from the native role cache. + .. raw:: html + +

Clear the roles cache.

+

Evict roles from the native role cache.

+ ``_ @@ -614,13 +633,15 @@ async def clear_cached_service_tokens( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear service account token caches. Evict a subset of all entries from the service - account token caches. Two separate caches exist for service account tokens: one - cache for tokens backed by the `service_tokens` file, and another for tokens - backed by the `.security` index. This API clears matching entries from both caches. - The cache for service account tokens backed by the `.security` index is cleared - automatically on state changes of the security index. The cache for tokens backed - by the `service_tokens` file is cleared automatically on file changes. + .. raw:: html + +

Clear service account token caches.

+

Evict a subset of all entries from the service account token caches. + Two separate caches exist for service account tokens: one cache for tokens backed by the service_tokens file, and another for tokens backed by the .security index. + This API clears matching entries from both caches.

+

The cache for service account tokens backed by the .security index is cleared automatically on state changes of the security index. + The cache for tokens backed by the service_tokens file is cleared automatically on file changes.

+ ``_ @@ -681,16 +702,18 @@ async def create_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create an API key. Create an API key for access without requiring basic authentication. - IMPORTANT: If the credential that is used to authenticate this request is an - API key, the derived API key cannot have any privileges. If you specify privileges, - the API returns an error. A successful request returns a JSON structure that - contains the API key, its unique id, and its name. If applicable, it also returns - expiration information for the API key in milliseconds. NOTE: By default, API - keys never expire. You can specify expiration information when you create the - API keys. The API keys are created by the Elasticsearch API key service, which - is automatically enabled. To configure or turn off the API key service, refer - to API key service setting documentation. + .. raw:: html + +

Create an API key.

+

Create an API key for access without requiring basic authentication.

+

IMPORTANT: If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges. + If you specify privileges, the API returns an error.

+

A successful request returns a JSON structure that contains the API key, its unique id, and its name. + If applicable, it also returns expiration information for the API key in milliseconds.

+

NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys.

+

The API keys are created by the Elasticsearch API key service, which is automatically enabled. + To configure or turn off the API key service, refer to API key service setting documentation.

+ ``_ @@ -768,21 +791,19 @@ async def create_cross_cluster_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a cross-cluster API key. Create an API key of the `cross_cluster` type - for the API key based remote cluster access. A `cross_cluster` API key cannot - be used to authenticate through the REST interface. IMPORTANT: To authenticate - this request you must use a credential that is not an API key. Even if you use - an API key that has the required privilege, the API returns an error. Cross-cluster - API keys are created by the Elasticsearch API key service, which is automatically - enabled. NOTE: Unlike REST API keys, a cross-cluster API key does not capture - permissions of the authenticated user. The API key’s effective permission is - exactly as specified with the `access` property. A successful request returns - a JSON structure that contains the API key, its unique ID, and its name. If applicable, - it also returns expiration information for the API key in milliseconds. By default, - API keys never expire. You can specify expiration information when you create - the API keys. Cross-cluster API keys can only be updated with the update cross-cluster - API key API. Attempting to update them with the update REST API key API or the - bulk update REST API keys API will result in an error. + .. raw:: html + +

Create a cross-cluster API key.

+

Create an API key of the cross_cluster type for the API key based remote cluster access. + A cross_cluster API key cannot be used to authenticate through the REST interface.

+

IMPORTANT: To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error.

+

Cross-cluster API keys are created by the Elasticsearch API key service, which is automatically enabled.

+

NOTE: Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the access property.

+

A successful request returns a JSON structure that contains the API key, its unique ID, and its name. If applicable, it also returns expiration information for the API key in milliseconds.

+

By default, API keys never expire. You can specify expiration information when you create the API keys.

+

Cross-cluster API keys can only be updated with the update cross-cluster API key API. + Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error.

+ ``_ @@ -851,9 +872,13 @@ async def create_service_token( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a service account token. Create a service accounts token for access without - requiring basic authentication. NOTE: Service account tokens never expire. You - must actively delete them if they are no longer needed. + .. raw:: html + +

Create a service account token.

+

Create a service accounts token for access without requiring basic authentication.

+

NOTE: Service account tokens never expire. + You must actively delete them if they are no longer needed.

+ ``_ @@ -929,20 +954,17 @@ async def delegate_pki( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delegate PKI authentication. This API implements the exchange of an X509Certificate - chain for an Elasticsearch access token. The certificate chain is validated, - according to RFC 5280, by sequentially considering the trust configuration of - every installed PKI realm that has `delegation.enabled` set to `true`. A successfully - trusted client certificate is also subject to the validation of the subject distinguished - name according to thw `username_pattern` of the respective realm. This API is - called by smart and trusted proxies, such as Kibana, which terminate the user's - TLS session but still want to authenticate the user by using a PKI realm—-​as - if the user connected directly to Elasticsearch. IMPORTANT: The association between - the subject public key in the target certificate and the corresponding private - key is not validated. This is part of the TLS authentication process and it is - delegated to the proxy that calls this API. The proxy is trusted to have performed - the TLS authentication and this API translates that authentication into an Elasticsearch - access token. + .. raw:: html + +

Delegate PKI authentication.

+

This API implements the exchange of an X509Certificate chain for an Elasticsearch access token. + The certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has delegation.enabled set to true. + A successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw username_pattern of the respective realm.

+

This API is called by smart and trusted proxies, such as Kibana, which terminate the user's TLS session but still want to authenticate the user by using a PKI realm—-​as if the user connected directly to Elasticsearch.

+

IMPORTANT: The association between the subject public key in the target certificate and the corresponding private key is not validated. + This is part of the TLS authentication process and it is delegated to the proxy that calls this API. + The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token.

+ ``_ @@ -998,10 +1020,15 @@ async def delete_privileges( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete application privileges. To use this API, you must have one of the following - privileges: * The `manage_security` cluster privilege (or a greater privilege - such as `all`). * The "Manage Application Privileges" global privilege for the - application being referenced in the request. + .. raw:: html + +

Delete application privileges.

+

To use this API, you must have one of the following privileges:

+
    +
  • The manage_security cluster privilege (or a greater privilege such as all).
  • +
  • The "Manage Application Privileges" global privilege for the application being referenced in the request.
  • +
+ ``_ @@ -1058,10 +1085,13 @@ async def delete_role( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete roles. Delete roles in the native realm. The role management APIs are - generally the preferred way to manage roles, rather than using file-based role - management. The delete roles API cannot remove roles that are defined in roles - files. + .. raw:: html + +

Delete roles.

+

Delete roles in the native realm. + The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. + The delete roles API cannot remove roles that are defined in roles files.

+ ``_ @@ -1109,10 +1139,13 @@ async def delete_role_mapping( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete role mappings. Role mappings define which roles are assigned to each user. - The role mapping APIs are generally the preferred way to manage role mappings - rather than using role mapping files. The delete role mappings API cannot remove - role mappings that are defined in role mapping files. + .. raw:: html + +

Delete role mappings.

+

Role mappings define which roles are assigned to each user. + The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. + The delete role mappings API cannot remove role mappings that are defined in role mapping files.

+ ``_ @@ -1164,8 +1197,11 @@ async def delete_service_token( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete service account tokens. Delete service account tokens for a service in - a specified namespace. + .. raw:: html + +

Delete service account tokens.

+

Delete service account tokens for a service in a specified namespace.

+ ``_ @@ -1223,7 +1259,11 @@ async def delete_user( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete users. Delete users from the native realm. + .. raw:: html + +

Delete users.

+

Delete users from the native realm.

+ ``_ @@ -1271,8 +1311,13 @@ async def disable_user( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Disable users. Disable users in the native realm. By default, when you create - users, they are enabled. You can use this API to revoke a user's access to Elasticsearch. + .. raw:: html + +

Disable users.

+

Disable users in the native realm. + By default, when you create users, they are enabled. + You can use this API to revoke a user's access to Elasticsearch.

+ ``_ @@ -1320,15 +1365,16 @@ async def disable_user_profile( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Disable a user profile. Disable user profiles so that they are not visible in - user profile searches. NOTE: The user profile feature is designed only for use - by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security - solutions. Individual users and external applications should not call this API - directly. Elastic reserves the right to change or remove this feature in future - releases without prior notice. When you activate a user profile, its automatically - enabled and visible in user profile searches. You can use the disable user profile - API to disable a user profile so it’s not visible in these searches. To re-enable - a disabled user profile, use the enable user profile API . + .. raw:: html + +

Disable a user profile.

+

Disable user profiles so that they are not visible in user profile searches.

+

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. + Individual users and external applications should not call this API directly. + Elastic reserves the right to change or remove this feature in future releases without prior notice.

+

When you activate a user profile, its automatically enabled and visible in user profile searches. You can use the disable user profile API to disable a user profile so it’s not visible in these searches. + To re-enable a disabled user profile, use the enable user profile API .

+ ``_ @@ -1376,8 +1422,12 @@ async def enable_user( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Enable users. Enable users in the native realm. By default, when you create users, - they are enabled. + .. raw:: html + +

Enable users.

+

Enable users in the native realm. + By default, when you create users, they are enabled.

+ ``_ @@ -1425,14 +1475,16 @@ async def enable_user_profile( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Enable a user profile. Enable user profiles to make them visible in user profile - searches. NOTE: The user profile feature is designed only for use by Kibana and - Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual - users and external applications should not call this API directly. Elastic reserves - the right to change or remove this feature in future releases without prior notice. - When you activate a user profile, it's automatically enabled and visible in user - profile searches. If you later disable the user profile, you can use the enable - user profile API to make the profile visible in these searches again. + .. raw:: html + +

Enable a user profile.

+

Enable user profiles to make them visible in user profile searches.

+

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. + Individual users and external applications should not call this API directly. + Elastic reserves the right to change or remove this feature in future releases without prior notice.

+

When you activate a user profile, it's automatically enabled and visible in user profile searches. + If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again.

+ ``_ @@ -1476,11 +1528,13 @@ async def enroll_kibana( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Enroll Kibana. Enable a Kibana instance to configure itself for communication - with a secured Elasticsearch cluster. NOTE: This API is currently intended for - internal use only by Kibana. Kibana uses this API internally to configure itself - for communications with an Elasticsearch cluster that already has security features - enabled. + .. raw:: html + +

Enroll Kibana.

+

Enable a Kibana instance to configure itself for communication with a secured Elasticsearch cluster.

+

NOTE: This API is currently intended for internal use only by Kibana. + Kibana uses this API internally to configure itself for communications with an Elasticsearch cluster that already has security features enabled.

+ ``_ """ @@ -1515,12 +1569,13 @@ async def enroll_node( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Enroll a node. Enroll a new node to allow it to join an existing cluster with - security features enabled. The response contains all the necessary information - for the joining node to bootstrap discovery and security related settings so - that it can successfully join the cluster. The response contains key and certificate - material that allows the caller to generate valid signed certificates for the - HTTP layer of all nodes in the cluster. + .. raw:: html + +

Enroll a node.

+

Enroll a new node to allow it to join an existing cluster with security features enabled.

+

The response contains all the necessary information for the joining node to bootstrap discovery and security related settings so that it can successfully join the cluster. + The response contains key and certificate material that allows the caller to generate valid signed certificates for the HTTP layer of all nodes in the cluster.

+ ``_ """ @@ -1563,11 +1618,13 @@ async def get_api_key( with_profile_uid: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get API key information. Retrieves information for one or more API keys. NOTE: - If you have only the `manage_own_api_key` privilege, this API returns only the - API keys that you own. If you have `read_security`, `manage_api_key` or greater - privileges (including `manage_security`), this API returns all API keys regardless - of ownership. + .. raw:: html + +

Get API key information.

+

Retrieves information for one or more API keys. + NOTE: If you have only the manage_own_api_key privilege, this API returns only the API keys that you own. + If you have read_security, manage_api_key or greater privileges (including manage_security), this API returns all API keys regardless of ownership.

+ ``_ @@ -1641,8 +1698,11 @@ async def get_builtin_privileges( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get builtin privileges. Get the list of cluster privileges and index privileges - that are available in this version of Elasticsearch. + .. raw:: html + +

Get builtin privileges.

+

Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch.

+ ``_ """ @@ -1679,10 +1739,15 @@ async def get_privileges( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get application privileges. To use this API, you must have one of the following - privileges: * The `read_security` cluster privilege (or a greater privilege such - as `manage_security` or `all`). * The "Manage Application Privileges" global - privilege for the application being referenced in the request. + .. raw:: html + +

Get application privileges.

+

To use this API, you must have one of the following privileges:

+
    +
  • The read_security cluster privilege (or a greater privilege such as manage_security or all).
  • +
  • The "Manage Application Privileges" global privilege for the application being referenced in the request.
  • +
+ ``_ @@ -1732,9 +1797,13 @@ async def get_role( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get roles. Get roles in the native realm. The role management APIs are generally - the preferred way to manage roles, rather than using file-based role management. - The get roles API cannot retrieve roles that are defined in roles files. + .. raw:: html + +

Get roles.

+

Get roles in the native realm. + The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. + The get roles API cannot retrieve roles that are defined in roles files.

+ ``_ @@ -1779,10 +1848,13 @@ async def get_role_mapping( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get role mappings. Role mappings define which roles are assigned to each user. - The role mapping APIs are generally the preferred way to manage role mappings - rather than using role mapping files. The get role mappings API cannot retrieve - role mappings that are defined in role mapping files. + .. raw:: html + +

Get role mappings.

+

Role mappings define which roles are assigned to each user. + The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. + The get role mappings API cannot retrieve role mappings that are defined in role mapping files.

+ ``_ @@ -1830,9 +1902,12 @@ async def get_service_accounts( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get service accounts. Get a list of service accounts that match the provided - path parameters. NOTE: Currently, only the `elastic/fleet-server` service account - is available. + .. raw:: html + +

Get service accounts.

+

Get a list of service accounts that match the provided path parameters.

+

NOTE: Currently, only the elastic/fleet-server service account is available.

+ ``_ @@ -1883,14 +1958,14 @@ async def get_service_credentials( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get service account credentials. To use this API, you must have at least the - `read_security` cluster privilege (or a greater privilege such as `manage_service_account` - or `manage_security`). The response includes service account tokens that were - created with the create service account tokens API as well as file-backed tokens - from all nodes of the cluster. NOTE: For tokens backed by the `service_tokens` - file, the API collects them from all nodes of the cluster. Tokens with the same - name from different nodes are assumed to be the same token and are only counted - once towards the total number of service tokens. + .. raw:: html + +

Get service account credentials.

+

To use this API, you must have at least the read_security cluster privilege (or a greater privilege such as manage_service_account or manage_security).

+

The response includes service account tokens that were created with the create service account tokens API as well as file-backed tokens from all nodes of the cluster.

+

NOTE: For tokens backed by the service_tokens file, the API collects them from all nodes of the cluster. + Tokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens.

+ ``_ @@ -1936,10 +2011,17 @@ async def get_settings( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get security index settings. Get the user-configurable settings for the security - internal index (`.security` and associated indices). Only a subset of the index - settings — those that are user-configurable—will be shown. This includes: * `index.auto_expand_replicas` - * `index.number_of_replicas` + .. raw:: html + +

Get security index settings.

+

Get the user-configurable settings for the security internal index (.security and associated indices). + Only a subset of the index settings — those that are user-configurable—will be shown. + This includes:

+
    +
  • index.auto_expand_replicas
  • +
  • index.number_of_replicas
  • +
+ ``_ @@ -2003,20 +2085,19 @@ async def get_token( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a token. Create a bearer token for access without requiring basic authentication. - The tokens are created by the Elasticsearch Token Service, which is automatically - enabled when you configure TLS on the HTTP interface. Alternatively, you can - explicitly enable the `xpack.security.authc.token.enabled` setting. When you - are running in production mode, a bootstrap check prevents you from enabling - the token service unless you also enable TLS on the HTTP interface. The get token - API takes the same parameters as a typical OAuth 2.0 token API except for the - use of a JSON request body. A successful get token API call returns a JSON structure - that contains the access token, the amount of time (seconds) that the token expires - in, the type, and the scope if available. The tokens returned by the get token - API have a finite period of time for which they are valid and after that time - period, they can no longer be used. That time period is defined by the `xpack.security.authc.token.timeout` - setting. If you want to invalidate a token immediately, you can do so by using - the invalidate token API. + .. raw:: html + +

Get a token.

+

Create a bearer token for access without requiring basic authentication. + The tokens are created by the Elasticsearch Token Service, which is automatically enabled when you configure TLS on the HTTP interface. + Alternatively, you can explicitly enable the xpack.security.authc.token.enabled setting. + When you are running in production mode, a bootstrap check prevents you from enabling the token service unless you also enable TLS on the HTTP interface.

+

The get token API takes the same parameters as a typical OAuth 2.0 token API except for the use of a JSON request body.

+

A successful get token API call returns a JSON structure that contains the access token, the amount of time (seconds) that the token expires in, the type, and the scope if available.

+

The tokens returned by the get token API have a finite period of time for which they are valid and after that time period, they can no longer be used. + That time period is defined by the xpack.security.authc.token.timeout setting. + If you want to invalidate a token immediately, you can do so by using the invalidate token API.

+ ``_ @@ -2086,7 +2167,11 @@ async def get_user( with_profile_uid: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get users. Get information about users in the native realm and built-in users. + .. raw:: html + +

Get users.

+

Get information about users in the native realm and built-in users.

+ ``_ @@ -2137,10 +2222,14 @@ async def get_user_privileges( username: t.Optional[t.Union[None, str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get user privileges. Get the security privileges for the logged in user. All - users can use this API, but only to determine their own privileges. To check - the privileges of other users, you must use the run as feature. To check whether - a user has a specific list of privileges, use the has privileges API. + .. raw:: html + +

Get user privileges.

+

Get the security privileges for the logged in user. + All users can use this API, but only to determine their own privileges. + To check the privileges of other users, you must use the run as feature. + To check whether a user has a specific list of privileges, use the has privileges API.

+ ``_ @@ -2190,11 +2279,14 @@ async def get_user_profile( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a user profile. Get a user's profile using the unique profile ID. NOTE: The - user profile feature is designed only for use by Kibana and Elastic's Observability, - Enterprise Search, and Elastic Security solutions. Individual users and external - applications should not call this API directly. Elastic reserves the right to - change or remove this feature in future releases without prior notice. + .. raw:: html + +

Get a user profile.

+

Get a user's profile using the unique profile ID.

+

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. + Individual users and external applications should not call this API directly. + Elastic reserves the right to change or remove this feature in future releases without prior notice.

+ ``_ @@ -2258,23 +2350,27 @@ async def grant_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Grant an API key. Create an API key on behalf of another user. This API is similar - to the create API keys API, however it creates the API key for a user that is - different than the user that runs the API. The caller must have authentication - credentials for the user on whose behalf the API key will be created. It is not - possible to use this API to create an API key without that user's credentials. - The supported user authentication credential types are: * username and password - * Elasticsearch access tokens * JWTs The user, for whom the authentication credentials - is provided, can optionally "run as" (impersonate) another user. In this case, - the API key will be created on behalf of the impersonated user. This API is intended - be used by applications that need to create and manage API keys for end users, - but cannot guarantee that those users have permission to create API keys on their - own behalf. The API keys are created by the Elasticsearch API key service, which - is automatically enabled. A successful grant API key API call returns a JSON - structure that contains the API key, its unique id, and its name. If applicable, - it also returns expiration information for the API key in milliseconds. By default, - API keys never expire. You can specify expiration information when you create - the API keys. + .. raw:: html + +

Grant an API key.

+

Create an API key on behalf of another user. + This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. + The caller must have authentication credentials for the user on whose behalf the API key will be created. + It is not possible to use this API to create an API key without that user's credentials. + The supported user authentication credential types are:

+
    +
  • username and password
  • +
  • Elasticsearch access tokens
  • +
  • JWTs
  • +
+

The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. + In this case, the API key will be created on behalf of the impersonated user.

+

This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. + The API keys are created by the Elasticsearch API key service, which is automatically enabled.

+

A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. + If applicable, it also returns expiration information for the API key in milliseconds.

+

By default, API keys never expire. You can specify expiration information when you create the API keys.

+ ``_ @@ -2415,9 +2511,13 @@ async def has_privileges( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Check user privileges. Determine whether the specified user has a specified list - of privileges. All users can use this API, but only to determine their own privileges. - To check the privileges of other users, you must use the run as feature. + .. raw:: html + +

Check user privileges.

+

Determine whether the specified user has a specified list of privileges. + All users can use this API, but only to determine their own privileges. + To check the privileges of other users, you must use the run as feature.

+ ``_ @@ -2476,12 +2576,13 @@ async def has_privileges_user_profile( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Check user profile privileges. Determine whether the users associated with the - specified user profile IDs have all the requested privileges. NOTE: The user - profile feature is designed only for use by Kibana and Elastic's Observability, - Enterprise Search, and Elastic Security solutions. Individual users and external - applications should not call this API directly. Elastic reserves the right to - change or remove this feature in future releases without prior notice. + .. raw:: html + +

Check user profile privileges.

+

Determine whether the users associated with the specified user profile IDs have all the requested privileges.

+

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. + Elastic reserves the right to change or remove this feature in future releases without prior notice.

+ ``_ @@ -2540,20 +2641,22 @@ async def invalidate_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Invalidate API keys. This API invalidates API keys created by the create API - key or grant API key APIs. Invalidated API keys fail authentication, but they - can still be viewed using the get API key information and query API key information - APIs, for at least the configured retention period, until they are automatically - deleted. To use this API, you must have at least the `manage_security`, `manage_api_key`, - or `manage_own_api_key` cluster privileges. The `manage_security` privilege allows - deleting any API key, including both REST and cross cluster API keys. The `manage_api_key` - privilege allows deleting any REST API key, but not cross cluster API keys. The - `manage_own_api_key` only allows deleting REST API keys that are owned by the - user. In addition, with the `manage_own_api_key` privilege, an invalidation request - must be issued in one of the three formats: - Set the parameter `owner=true`. - - Or, set both `username` and `realm_name` to match the user's identity. - Or, - if the request is issued by an API key, that is to say an API key invalidates - itself, specify its ID in the `ids` field. + .. raw:: html + +

Invalidate API keys.

+

This API invalidates API keys created by the create API key or grant API key APIs. + Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted.

+

To use this API, you must have at least the manage_security, manage_api_key, or manage_own_api_key cluster privileges. + The manage_security privilege allows deleting any API key, including both REST and cross cluster API keys. + The manage_api_key privilege allows deleting any REST API key, but not cross cluster API keys. + The manage_own_api_key only allows deleting REST API keys that are owned by the user. + In addition, with the manage_own_api_key privilege, an invalidation request must be issued in one of the three formats:

+
    +
  • Set the parameter owner=true.
  • +
  • Or, set both username and realm_name to match the user's identity.
  • +
  • Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the ids field.
  • +
+ ``_ @@ -2625,15 +2728,19 @@ async def invalidate_token( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Invalidate a token. The access tokens returned by the get token API have a finite - period of time for which they are valid. After that time period, they can no - longer be used. The time period is defined by the `xpack.security.authc.token.timeout` - setting. The refresh tokens returned by the get token API are only valid for - 24 hours. They can also be used exactly once. If you want to invalidate one or - more access or refresh tokens immediately, use this invalidate token API. NOTE: - While all parameters are optional, at least one of them is required. More specifically, - either one of `token` or `refresh_token` parameters is required. If none of these - two are specified, then `realm_name` and/or `username` need to be specified. + .. raw:: html + +

Invalidate a token.

+

The access tokens returned by the get token API have a finite period of time for which they are valid. + After that time period, they can no longer be used. + The time period is defined by the xpack.security.authc.token.timeout setting.

+

The refresh tokens returned by the get token API are only valid for 24 hours. + They can also be used exactly once. + If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API.

+

NOTE: While all parameters are optional, at least one of them is required. + More specifically, either one of token or refresh_token parameters is required. + If none of these two are specified, then realm_name and/or username need to be specified.

+ ``_ @@ -2695,12 +2802,13 @@ async def oidc_authenticate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Authenticate OpenID Connect. Exchange an OpenID Connect authentication response - message for an Elasticsearch internal access token and refresh token that can - be subsequently used for authentication. Elasticsearch exposes all the necessary - OpenID Connect related functionality with the OpenID Connect APIs. These APIs - are used internally by Kibana in order to provide OpenID Connect based authentication, - but can also be used by other, custom web applications or other clients. + .. raw:: html + +

Authenticate OpenID Connect.

+

Exchange an OpenID Connect authentication response message for an Elasticsearch internal access token and refresh token that can be subsequently used for authentication.

+

Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. + These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.

+ ``_ @@ -2773,15 +2881,14 @@ async def oidc_logout( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Logout of OpenID Connect. Invalidate an access token and a refresh token that - were generated as a response to the `/_security/oidc/authenticate` API. If the - OpenID Connect authentication realm in Elasticsearch is accordingly configured, - the response to this call will contain a URI pointing to the end session endpoint - of the OpenID Connect Provider in order to perform single logout. Elasticsearch - exposes all the necessary OpenID Connect related functionality with the OpenID - Connect APIs. These APIs are used internally by Kibana in order to provide OpenID - Connect based authentication, but can also be used by other, custom web applications - or other clients. + .. raw:: html + +

Logout of OpenID Connect.

+

Invalidate an access token and a refresh token that were generated as a response to the /_security/oidc/authenticate API.

+

If the OpenID Connect authentication realm in Elasticsearch is accordingly configured, the response to this call will contain a URI pointing to the end session endpoint of the OpenID Connect Provider in order to perform single logout.

+

Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. + These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.

+ ``_ @@ -2836,15 +2943,14 @@ async def oidc_prepare_authentication( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Prepare OpenID connect authentication. Create an oAuth 2.0 authentication request - as a URL string based on the configuration of the OpenID Connect authentication - realm in Elasticsearch. The response of this API is a URL pointing to the Authorization - Endpoint of the configured OpenID Connect Provider, which can be used to redirect - the browser of the user in order to continue the authentication process. Elasticsearch - exposes all the necessary OpenID Connect related functionality with the OpenID - Connect APIs. These APIs are used internally by Kibana in order to provide OpenID - Connect based authentication, but can also be used by other, custom web applications - or other clients. + .. raw:: html + +

Prepare OpenID connect authentication.

+

Create an oAuth 2.0 authentication request as a URL string based on the configuration of the OpenID Connect authentication realm in Elasticsearch.

+

The response of this API is a URL pointing to the Authorization Endpoint of the configured OpenID Connect Provider, which can be used to redirect the browser of the user in order to continue the authentication process.

+

Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. + These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.

+ ``_ @@ -2921,20 +3027,26 @@ async def put_privileges( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update application privileges. To use this API, you must have one of - the following privileges: * The `manage_security` cluster privilege (or a greater - privilege such as `all`). * The "Manage Application Privileges" global privilege - for the application being referenced in the request. Application names are formed - from a prefix, with an optional suffix that conform to the following rules: * - The prefix must begin with a lowercase ASCII letter. * The prefix must contain - only ASCII letters or digits. * The prefix must be at least 3 characters long. - * If the suffix exists, it must begin with either a dash `-` or `_`. * The suffix - cannot contain any of the following characters: `\\`, `/`, `*`, `?`, `"`, `<`, - `>`, `|`, `,`, `*`. * No part of the name can contain whitespace. Privilege names - must begin with a lowercase ASCII letter and must contain only ASCII letters - and digits along with the characters `_`, `-`, and `.`. Action names can contain - any number of printable ASCII characters and must contain at least one of the - following characters: `/`, `*`, `:`. + .. raw:: html + +

Create or update application privileges.

+

To use this API, you must have one of the following privileges:

+
    +
  • The manage_security cluster privilege (or a greater privilege such as all).
  • +
  • The "Manage Application Privileges" global privilege for the application being referenced in the request.
  • +
+

Application names are formed from a prefix, with an optional suffix that conform to the following rules:

+
    +
  • The prefix must begin with a lowercase ASCII letter.
  • +
  • The prefix must contain only ASCII letters or digits.
  • +
  • The prefix must be at least 3 characters long.
  • +
  • If the suffix exists, it must begin with either a dash - or _.
  • +
  • The suffix cannot contain any of the following characters: \\, /, *, ?, ", <, >, |, ,, *.
  • +
  • No part of the name can contain whitespace.
  • +
+

Privilege names must begin with a lowercase ASCII letter and must contain only ASCII letters and digits along with the characters _, -, and ..

+

Action names can contain any number of printable ASCII characters and must contain at least one of the following characters: /, *, :.

+ ``_ @@ -3080,10 +3192,13 @@ async def put_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update roles. The role management APIs are generally the preferred - way to manage roles in the native realm, rather than using file-based role management. - The create or update roles API cannot update roles that are defined in roles - files. File-based role management is not available in Elastic Serverless. + .. raw:: html + +

Create or update roles.

+

The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management. + The create or update roles API cannot update roles that are defined in roles files. + File-based role management is not available in Elastic Serverless.

+ ``_ @@ -3195,29 +3310,27 @@ async def put_role_mapping( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update role mappings. Role mappings define which roles are assigned - to each user. Each mapping has rules that identify users and a list of roles - that are granted to those users. The role mapping APIs are generally the preferred - way to manage role mappings rather than using role mapping files. The create - or update role mappings API cannot update role mappings that are defined in role - mapping files. NOTE: This API does not create roles. Rather, it maps users to - existing roles. Roles can be created by using the create or update roles API - or roles files. **Role templates** The most common use for role mappings is to - create a mapping from a known value on the user to a fixed role name. For example, - all users in the `cn=admin,dc=example,dc=com` LDAP group should be given the - superuser role in Elasticsearch. The `roles` field is used for this purpose. - For more complex needs, it is possible to use Mustache templates to dynamically - determine the names of the roles that should be granted to the user. The `role_templates` - field is used for this purpose. NOTE: To use role templates successfully, the - relevant scripting feature must be enabled. Otherwise, all attempts to create - a role mapping with role templates fail. All of the user fields that are available - in the role mapping rules are also available in the role templates. Thus it is - possible to assign a user to a role that reflects their username, their groups, - or the name of the realm to which they authenticated. By default a template is - evaluated to produce a single string that is the name of the role which should - be assigned to the user. If the format of the template is set to "json" then - the template is expected to produce a JSON string or an array of JSON strings - for the role names. + .. raw:: html + +

Create or update role mappings.

+

Role mappings define which roles are assigned to each user. + Each mapping has rules that identify users and a list of roles that are granted to those users. + The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files.

+

NOTE: This API does not create roles. Rather, it maps users to existing roles. + Roles can be created by using the create or update roles API or roles files.

+

Role templates

+

The most common use for role mappings is to create a mapping from a known value on the user to a fixed role name. + For example, all users in the cn=admin,dc=example,dc=com LDAP group should be given the superuser role in Elasticsearch. + The roles field is used for this purpose.

+

For more complex needs, it is possible to use Mustache templates to dynamically determine the names of the roles that should be granted to the user. + The role_templates field is used for this purpose.

+

NOTE: To use role templates successfully, the relevant scripting feature must be enabled. + Otherwise, all attempts to create a role mapping with role templates fail.

+

All of the user fields that are available in the role mapping rules are also available in the role templates. + Thus it is possible to assign a user to a role that reflects their username, their groups, or the name of the realm to which they authenticated.

+

By default a template is evaluated to produce a single string that is the name of the role which should be assigned to the user. + If the format of the template is set to "json" then the template is expected to produce a JSON string or an array of JSON strings for the role names.

+ ``_ @@ -3313,10 +3426,13 @@ async def put_user( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update users. Add and update users in the native realm. A password - is required for adding a new user but is optional when updating an existing user. - To change a user's password without updating any other fields, use the change - password API. + .. raw:: html + +

Create or update users.

+

Add and update users in the native realm. + A password is required for adding a new user but is optional when updating an existing user. + To change a user's password without updating any other fields, use the change password API.

+ ``_ @@ -3427,13 +3543,15 @@ async def query_api_keys( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Find API keys with a query. Get a paginated list of API keys and their information. - You can optionally filter the results with a query. To use this API, you must - have at least the `manage_own_api_key` or the `read_security` cluster privileges. - If you have only the `manage_own_api_key` privilege, this API returns only the - API keys that you own. If you have the `read_security`, `manage_api_key`, or - greater privileges (including `manage_security`), this API returns all API keys - regardless of ownership. + .. raw:: html + +

Find API keys with a query.

+

Get a paginated list of API keys and their information. + You can optionally filter the results with a query.

+

To use this API, you must have at least the manage_own_api_key or the read_security cluster privileges. + If you have only the manage_own_api_key privilege, this API returns only the API keys that you own. + If you have the read_security, manage_api_key, or greater privileges (including manage_security), this API returns all API keys regardless of ownership.

+ ``_ @@ -3568,11 +3686,15 @@ async def query_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Find roles with a query. Get roles in a paginated manner. The role management - APIs are generally the preferred way to manage roles, rather than using file-based - role management. The query roles API does not retrieve roles that are defined - in roles files, nor built-in ones. You can optionally filter the results with - a query. Also, the results can be paginated and sorted. + .. raw:: html + +

Find roles with a query.

+

Get roles in a paginated manner. + The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. + The query roles API does not retrieve roles that are defined in roles files, nor built-in ones. + You can optionally filter the results with a query. + Also, the results can be paginated and sorted.

+ ``_ @@ -3658,10 +3780,14 @@ async def query_user( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Find users with a query. Get information for users in a paginated manner. You - can optionally filter the results with a query. NOTE: As opposed to the get user - API, built-in users are excluded from the result. This API is only for native - users. + .. raw:: html + +

Find users with a query.

+

Get information for users in a paginated manner. + You can optionally filter the results with a query.

+

NOTE: As opposed to the get user API, built-in users are excluded from the result. + This API is only for native users.

+ ``_ @@ -3740,19 +3866,21 @@ async def saml_authenticate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Authenticate SAML. Submit a SAML response message to Elasticsearch for consumption. - NOTE: This API is intended for use by custom web applications other than Kibana. - If you are using Kibana, refer to the documentation for configuring SAML single-sign-on - on the Elastic Stack. The SAML message that is submitted can be: * A response - to a SAML authentication request that was previously created using the SAML prepare - authentication API. * An unsolicited SAML message in the case of an IdP-initiated - single sign-on (SSO) flow. In either case, the SAML message needs to be a base64 - encoded XML document with a root element of ``. After successful validation, - Elasticsearch responds with an Elasticsearch internal access token and refresh - token that can be subsequently used for authentication. This API endpoint essentially - exchanges SAML responses that indicate successful authentication in the IdP for - Elasticsearch access and refresh tokens, which can be used for authentication - against Elasticsearch. + .. raw:: html + +

Authenticate SAML.

+

Submit a SAML response message to Elasticsearch for consumption.

+

NOTE: This API is intended for use by custom web applications other than Kibana. + If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack.

+

The SAML message that is submitted can be:

+
    +
  • A response to a SAML authentication request that was previously created using the SAML prepare authentication API.
  • +
  • An unsolicited SAML message in the case of an IdP-initiated single sign-on (SSO) flow.
  • +
+

In either case, the SAML message needs to be a base64 encoded XML document with a root element of <Response>.

+

After successful validation, Elasticsearch responds with an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. + This API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch.

+ ``_ @@ -3814,16 +3942,18 @@ async def saml_complete_logout( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Logout of SAML completely. Verifies the logout response sent from the SAML IdP. - NOTE: This API is intended for use by custom web applications other than Kibana. - If you are using Kibana, refer to the documentation for configuring SAML single-sign-on - on the Elastic Stack. The SAML IdP may send a logout response back to the SP - after handling the SP-initiated SAML Single Logout. This API verifies the response - by ensuring the content is relevant and validating its signature. An empty response - is returned if the verification process is successful. The response can be sent - by the IdP with either the HTTP-Redirect or the HTTP-Post binding. The caller - of this API must prepare the request accordingly so that this API can handle - either of them. + .. raw:: html + +

Logout of SAML completely.

+

Verifies the logout response sent from the SAML IdP.

+

NOTE: This API is intended for use by custom web applications other than Kibana. + If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack.

+

The SAML IdP may send a logout response back to the SP after handling the SP-initiated SAML Single Logout. + This API verifies the response by ensuring the content is relevant and validating its signature. + An empty response is returned if the verification process is successful. + The response can be sent by the IdP with either the HTTP-Redirect or the HTTP-Post binding. + The caller of this API must prepare the request accordingly so that this API can handle either of them.

+ ``_ @@ -3889,15 +4019,17 @@ async def saml_invalidate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Invalidate SAML. Submit a SAML LogoutRequest message to Elasticsearch for consumption. - NOTE: This API is intended for use by custom web applications other than Kibana. - If you are using Kibana, refer to the documentation for configuring SAML single-sign-on - on the Elastic Stack. The logout request comes from the SAML IdP during an IdP - initiated Single Logout. The custom web application can use this API to have - Elasticsearch process the `LogoutRequest`. After successful validation of the - request, Elasticsearch invalidates the access token and refresh token that corresponds - to that specific SAML principal and provides a URL that contains a SAML LogoutResponse - message. Thus the user can be redirected back to their IdP. + .. raw:: html + +

Invalidate SAML.

+

Submit a SAML LogoutRequest message to Elasticsearch for consumption.

+

NOTE: This API is intended for use by custom web applications other than Kibana. + If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack.

+

The logout request comes from the SAML IdP during an IdP initiated Single Logout. + The custom web application can use this API to have Elasticsearch process the LogoutRequest. + After successful validation of the request, Elasticsearch invalidates the access token and refresh token that corresponds to that specific SAML principal and provides a URL that contains a SAML LogoutResponse message. + Thus the user can be redirected back to their IdP.

+ ``_ @@ -3964,14 +4096,15 @@ async def saml_logout( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Logout of SAML. Submits a request to invalidate an access token and refresh token. - NOTE: This API is intended for use by custom web applications other than Kibana. - If you are using Kibana, refer to the documentation for configuring SAML single-sign-on - on the Elastic Stack. This API invalidates the tokens that were generated for - a user by the SAML authenticate API. If the SAML realm in Elasticsearch is configured - accordingly and the SAML IdP supports this, the Elasticsearch response contains - a URL to redirect the user to the IdP that contains a SAML logout request (starting - an SP-initiated SAML Single Logout). + .. raw:: html + +

Logout of SAML.

+

Submits a request to invalidate an access token and refresh token.

+

NOTE: This API is intended for use by custom web applications other than Kibana. + If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack.

+

This API invalidates the tokens that were generated for a user by the SAML authenticate API. + If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP supports this, the Elasticsearch response contains a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout).

+ ``_ @@ -4028,20 +4161,20 @@ async def saml_prepare_authentication( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Prepare SAML authentication. Create a SAML authentication request (``) - as a URL string based on the configuration of the respective SAML realm in Elasticsearch. - NOTE: This API is intended for use by custom web applications other than Kibana. - If you are using Kibana, refer to the documentation for configuring SAML single-sign-on - on the Elastic Stack. This API returns a URL pointing to the SAML Identity Provider. - You can use the URL to redirect the browser of the user in order to continue - the authentication process. The URL includes a single parameter named `SAMLRequest`, - which contains a SAML Authentication request that is deflated and Base64 encoded. - If the configuration dictates that SAML authentication requests should be signed, - the URL has two extra parameters named `SigAlg` and `Signature`. These parameters - contain the algorithm used for the signature and the signature value itself. - It also returns a random string that uniquely identifies this SAML Authentication - request. The caller of this API needs to store this identifier as it needs to - be used in a following step of the authentication process. + .. raw:: html + +

Prepare SAML authentication.

+

Create a SAML authentication request (<AuthnRequest>) as a URL string based on the configuration of the respective SAML realm in Elasticsearch.

+

NOTE: This API is intended for use by custom web applications other than Kibana. + If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack.

+

This API returns a URL pointing to the SAML Identity Provider. + You can use the URL to redirect the browser of the user in order to continue the authentication process. + The URL includes a single parameter named SAMLRequest, which contains a SAML Authentication request that is deflated and Base64 encoded. + If the configuration dictates that SAML authentication requests should be signed, the URL has two extra parameters named SigAlg and Signature. + These parameters contain the algorithm used for the signature and the signature value itself. + It also returns a random string that uniquely identifies this SAML Authentication request. + The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process.

+ ``_ @@ -4096,11 +4229,13 @@ async def saml_service_provider_metadata( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Create SAML service provider metadata. Generate SAML metadata for a SAML 2.0 - Service Provider. The SAML 2.0 specification provides a mechanism for Service - Providers to describe their capabilities and configuration using a metadata file. - This API generates Service Provider metadata based on the configuration of a - SAML realm in Elasticsearch. + .. raw:: html + +

Create SAML service provider metadata.

+

Generate SAML metadata for a SAML 2.0 Service Provider.

+

The SAML 2.0 specification provides a mechanism for Service Providers to describe their capabilities and configuration using a metadata file. + This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch.

+ ``_ @@ -4146,12 +4281,14 @@ async def suggest_user_profiles( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Suggest a user profile. Get suggestions for user profiles that match specified - search criteria. NOTE: The user profile feature is designed only for use by Kibana - and Elastic's Observability, Enterprise Search, and Elastic Security solutions. - Individual users and external applications should not call this API directly. - Elastic reserves the right to change or remove this feature in future releases - without prior notice. + .. raw:: html + +

Suggest a user profile.

+

Get suggestions for user profiles that match specified search criteria.

+

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. + Individual users and external applications should not call this API directly. + Elastic reserves the right to change or remove this feature in future releases without prior notice.

+ ``_ @@ -4222,24 +4359,23 @@ async def update_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update an API key. Update attributes of an existing API key. This API supports - updates to an API key's access scope, expiration, and metadata. To use this API, - you must have at least the `manage_own_api_key` cluster privilege. Users can - only update API keys that they created or that were granted to them. To update - another user’s API key, use the `run_as` feature to submit a request on behalf - of another user. IMPORTANT: It's not possible to use an API key as the authentication - credential for this API. The owner user’s credentials are required. Use this - API to update API keys created by the create API key or grant API Key APIs. If - you need to apply the same update to many API keys, you can use the bulk update - API keys API to reduce overhead. It's not possible to update expired API keys - or API keys that have been invalidated by the invalidate API key API. The access - scope of an API key is derived from the `role_descriptors` you specify in the - request and a snapshot of the owner user's permissions at the time of the request. - The snapshot of the owner's permissions is updated automatically on every call. - IMPORTANT: If you don't specify `role_descriptors` in the request, a call to - this API might still change the API key's access scope. This change can occur - if the owner user's permissions have changed since the API key was created or - last modified. + .. raw:: html + +

Update an API key.

+

Update attributes of an existing API key. + This API supports updates to an API key's access scope, expiration, and metadata.

+

To use this API, you must have at least the manage_own_api_key cluster privilege. + Users can only update API keys that they created or that were granted to them. + To update another user’s API key, use the run_as feature to submit a request on behalf of another user.

+

IMPORTANT: It's not possible to use an API key as the authentication credential for this API. The owner user’s credentials are required.

+

Use this API to update API keys created by the create API key or grant API Key APIs. + If you need to apply the same update to many API keys, you can use the bulk update API keys API to reduce overhead. + It's not possible to update expired API keys or API keys that have been invalidated by the invalidate API key API.

+

The access scope of an API key is derived from the role_descriptors you specify in the request and a snapshot of the owner user's permissions at the time of the request. + The snapshot of the owner's permissions is updated automatically on every call.

+

IMPORTANT: If you don't specify role_descriptors in the request, a call to this API might still change the API key's access scope. + This change can occur if the owner user's permissions have changed since the API key was created or last modified.

+ ``_ @@ -4314,19 +4450,20 @@ async def update_cross_cluster_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update a cross-cluster API key. Update the attributes of an existing cross-cluster - API key, which is used for API key based remote cluster access. To use this API, - you must have at least the `manage_security` cluster privilege. Users can only - update API keys that they created. To update another user's API key, use the - `run_as` feature to submit a request on behalf of another user. IMPORTANT: It's - not possible to use an API key as the authentication credential for this API. - To update an API key, the owner user's credentials are required. It's not possible - to update expired API keys, or API keys that have been invalidated by the invalidate - API key API. This API supports updates to an API key's access scope, metadata, - and expiration. The owner user's information, such as the `username` and `realm`, - is also updated automatically on every call. NOTE: This API cannot update REST - API keys, which should be updated by either the update API key or bulk update - API keys API. + .. raw:: html + +

Update a cross-cluster API key.

+

Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access.

+

To use this API, you must have at least the manage_security cluster privilege. + Users can only update API keys that they created. + To update another user's API key, use the run_as feature to submit a request on behalf of another user.

+

IMPORTANT: It's not possible to use an API key as the authentication credential for this API. + To update an API key, the owner user's credentials are required.

+

It's not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API.

+

This API supports updates to an API key's access scope, metadata, and expiration. + The owner user's information, such as the username and realm, is also updated automatically on every call.

+

NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API.

+ ``_ @@ -4398,14 +4535,14 @@ async def update_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update security index settings. Update the user-configurable settings for the - security internal index (`.security` and associated indices). Only a subset of - settings are allowed to be modified. This includes `index.auto_expand_replicas` - and `index.number_of_replicas`. NOTE: If `index.auto_expand_replicas` is set, - `index.number_of_replicas` will be ignored during updates. If a specific index - is not in use on the system and settings are provided for it, the request will - be rejected. This API does not yet support configuring the settings for indices - before they are in use. + .. raw:: html + +

Update security index settings.

+

Update the user-configurable settings for the security internal index (.security and associated indices). Only a subset of settings are allowed to be modified. This includes index.auto_expand_replicas and index.number_of_replicas.

+

NOTE: If index.auto_expand_replicas is set, index.number_of_replicas will be ignored during updates.

+

If a specific index is not in use on the system and settings are provided for it, the request will be rejected. + This API does not yet support configuring the settings for indices before they are in use.

+ ``_ @@ -4474,19 +4611,23 @@ async def update_user_profile_data( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update user profile data. Update specific data for the user profile that is associated - with a unique ID. NOTE: The user profile feature is designed only for use by - Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. - Individual users and external applications should not call this API directly. - Elastic reserves the right to change or remove this feature in future releases - without prior notice. To use this API, you must have one of the following privileges: - * The `manage_user_profile` cluster privilege. * The `update_profile_data` global - privilege for the namespaces that are referenced in the request. This API updates - the `labels` and `data` fields of an existing user profile document with JSON - objects. New keys and their values are added to the profile document and conflicting - keys are replaced by data that's included in the request. For both labels and - data, content is namespaced by the top-level fields. The `update_profile_data` - global privilege grants privileges for updating only the allowed namespaces. + .. raw:: html + +

Update user profile data.

+

Update specific data for the user profile that is associated with a unique ID.

+

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. + Individual users and external applications should not call this API directly. + Elastic reserves the right to change or remove this feature in future releases without prior notice.

+

To use this API, you must have one of the following privileges:

+
    +
  • The manage_user_profile cluster privilege.
  • +
  • The update_profile_data global privilege for the namespaces that are referenced in the request.
  • +
+

This API updates the labels and data fields of an existing user profile document with JSON objects. + New keys and their values are added to the profile document and conflicting keys are replaced by data that's included in the request.

+

For both labels and data, content is namespaced by the top-level fields. + The update_profile_data global privilege grants privileges for updating only the allowed namespaces.

+ ``_ diff --git a/elasticsearch/_async/client/shutdown.py b/elasticsearch/_async/client/shutdown.py index 3236aa0c2..8e7380af4 100644 --- a/elasticsearch/_async/client/shutdown.py +++ b/elasticsearch/_async/client/shutdown.py @@ -42,13 +42,16 @@ async def delete_node( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Cancel node shutdown preparations. Remove a node from the shutdown list so it - can resume normal operations. You must explicitly clear the shutdown request - when a node rejoins the cluster or when a node has permanently left the cluster. - Shutdown requests are never removed automatically by Elasticsearch. NOTE: This - feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, - and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator - privileges feature is enabled, you must be an operator to use this API. + .. raw:: html + +

Cancel node shutdown preparations. + Remove a node from the shutdown list so it can resume normal operations. + You must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster. + Shutdown requests are never removed automatically by Elasticsearch.

+

NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. + Direct use is not supported.

+

If the operator privileges feature is enabled, you must be an operator to use this API.

+ ``_ @@ -100,13 +103,14 @@ async def get_node( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the shutdown status. Get information about nodes that are ready to be shut - down, have shut down preparations still in progress, or have stalled. The API - returns status information for each part of the shut down process. NOTE: This - feature is designed for indirect use by Elasticsearch Service, Elastic Cloud - Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If - the operator privileges feature is enabled, you must be an operator to use this - API. + .. raw:: html + +

Get the shutdown status.

+

Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled. + The API returns status information for each part of the shut down process.

+

NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

+

If the operator privileges feature is enabled, you must be an operator to use this API.

+ ``_ @@ -169,18 +173,19 @@ async def put_node( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Prepare a node to be shut down. NOTE: This feature is designed for indirect use - by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. - Direct use is not supported. If you specify a node that is offline, it will be - prepared for shut down when it rejoins the cluster. If the operator privileges - feature is enabled, you must be an operator to use this API. The API migrates - ongoing tasks and index shards to other nodes as needed to prepare a node to - be restarted or shut down and removed from the cluster. This ensures that Elasticsearch - can be stopped safely with minimal disruption to the cluster. You must specify - the type of shutdown: `restart`, `remove`, or `replace`. If a node is already - being prepared for shutdown, you can use this API to change the shutdown type. - IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the - node shutdown status to determine when it is safe to stop Elasticsearch. + .. raw:: html + +

Prepare a node to be shut down.

+

NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

+

If you specify a node that is offline, it will be prepared for shut down when it rejoins the cluster.

+

If the operator privileges feature is enabled, you must be an operator to use this API.

+

The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. + This ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster.

+

You must specify the type of shutdown: restart, remove, or replace. + If a node is already being prepared for shutdown, you can use this API to change the shutdown type.

+

IMPORTANT: This API does NOT terminate the Elasticsearch process. + Monitor the node shutdown status to determine when it is safe to stop Elasticsearch.

+ ``_ diff --git a/elasticsearch/_async/client/simulate.py b/elasticsearch/_async/client/simulate.py index 6c40ff3c7..73f71429f 100644 --- a/elasticsearch/_async/client/simulate.py +++ b/elasticsearch/_async/client/simulate.py @@ -64,28 +64,22 @@ async def ingest( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Simulate data ingestion. Run ingest pipelines against a set of provided documents, - optionally with substitute pipeline definitions, to simulate ingesting data into - an index. This API is meant to be used for troubleshooting or pipeline development, - as it does not actually index any data into Elasticsearch. The API runs the default - and final pipeline for that index against a set of documents provided in the - body of the request. If a pipeline contains a reroute processor, it follows that - reroute processor to the new index, running that index's pipelines as well the - same way that a non-simulated ingest would. No data is indexed into Elasticsearch. - Instead, the transformed document is returned, along with the list of pipelines - that have been run and the name of the index where the document would have been - indexed if this were not a simulation. The transformed document is validated - against the mappings that would apply to this index, and any validation error - is reported in the result. This API differs from the simulate pipeline API in - that you specify a single pipeline for that API, and it runs only that one pipeline. - The simulate pipeline API is more useful for developing a single pipeline, while - the simulate ingest API is more useful for troubleshooting the interaction of - the various pipelines that get applied when ingesting into an index. By default, - the pipeline definitions that are currently in the system are used. However, - you can supply substitute pipeline definitions in the body of the request. These - will be used in place of the pipeline definitions that are already in the system. - This can be used to replace existing pipeline definitions or to create new ones. - The pipeline substitutions are used only within this request. + .. raw:: html + +

Simulate data ingestion. + Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index.

+

This API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch.

+

The API runs the default and final pipeline for that index against a set of documents provided in the body of the request. + If a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would. + No data is indexed into Elasticsearch. + Instead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation. + The transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result.

+

This API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline. + The simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index.

+

By default, the pipeline definitions that are currently in the system are used. + However, you can supply substitute pipeline definitions in the body of the request. + These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request.

+ ``_ diff --git a/elasticsearch/_async/client/slm.py b/elasticsearch/_async/client/slm.py index 1870a9de5..62ef2aa53 100644 --- a/elasticsearch/_async/client/slm.py +++ b/elasticsearch/_async/client/slm.py @@ -38,9 +38,12 @@ async def delete_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a policy. Delete a snapshot lifecycle policy definition. This operation - prevents any future snapshots from being taken but does not cancel in-progress - snapshots or remove previously-taken snapshots. + .. raw:: html + +

Delete a policy. + Delete a snapshot lifecycle policy definition. + This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots.

+ ``_ @@ -91,10 +94,12 @@ async def execute_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Run a policy. Immediately create a snapshot according to the snapshot lifecycle - policy without waiting for the scheduled time. The snapshot policy is normally - applied according to its schedule, but you might want to manually run a policy - before performing an upgrade or other maintenance. + .. raw:: html + +

Run a policy. + Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. + The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance.

+ ``_ @@ -144,9 +149,12 @@ async def execute_retention( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Run a retention policy. Manually apply the retention policy to force immediate - removal of snapshots that are expired according to the snapshot lifecycle policy - retention rules. The retention policy is normally applied according to its schedule. + .. raw:: html + +

Run a retention policy. + Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. + The retention policy is normally applied according to its schedule.

+ ``_ @@ -194,8 +202,11 @@ async def get_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get policy information. Get snapshot lifecycle policy definitions and information - about the latest snapshot attempts. + .. raw:: html + +

Get policy information. + Get snapshot lifecycle policy definitions and information about the latest snapshot attempts.

+ ``_ @@ -248,8 +259,11 @@ async def get_stats( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get snapshot lifecycle management statistics. Get global and policy-level statistics - about actions taken by snapshot lifecycle management. + .. raw:: html + +

Get snapshot lifecycle management statistics. + Get global and policy-level statistics about actions taken by snapshot lifecycle management.

+ ``_ @@ -296,7 +310,10 @@ async def get_status( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the snapshot lifecycle management status. + .. raw:: html + +

Get the snapshot lifecycle management status.

+ ``_ @@ -354,9 +371,13 @@ async def put_lifecycle( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a policy. Create or update a snapshot lifecycle policy. If the - policy already exists, this request increments the policy version. Only the latest - version of a policy is stored. + .. raw:: html + +

Create or update a policy. + Create or update a snapshot lifecycle policy. + If the policy already exists, this request increments the policy version. + Only the latest version of a policy is stored.

+ ``_ @@ -437,9 +458,12 @@ async def start( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Start snapshot lifecycle management. Snapshot lifecycle management (SLM) starts - automatically when a cluster is formed. Manually starting SLM is necessary only - if it has been stopped using the stop SLM API. + .. raw:: html + +

Start snapshot lifecycle management. + Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. + Manually starting SLM is necessary only if it has been stopped using the stop SLM API.

+ ``_ @@ -488,15 +512,16 @@ async def stop( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Stop snapshot lifecycle management. Stop all snapshot lifecycle management (SLM) - operations and the SLM plugin. This API is useful when you are performing maintenance - on a cluster and need to prevent SLM from performing any actions on your data - streams or indices. Stopping SLM does not stop any snapshots that are in progress. - You can manually trigger snapshots with the run snapshot lifecycle policy API - even if SLM is stopped. The API returns a response as soon as the request is - acknowledged, but the plugin might continue to run until in-progress operations - complete and it can be safely stopped. Use the get snapshot lifecycle management - status API to see if SLM is running. + .. raw:: html + +

Stop snapshot lifecycle management. + Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. + This API is useful when you are performing maintenance on a cluster and need to prevent SLM from performing any actions on your data streams or indices. + Stopping SLM does not stop any snapshots that are in progress. + You can manually trigger snapshots with the run snapshot lifecycle policy API even if SLM is stopped.

+

The API returns a response as soon as the request is acknowledged, but the plugin might continue to run until in-progress operations complete and it can be safely stopped. + Use the get snapshot lifecycle management status API to see if SLM is running.

+ ``_ diff --git a/elasticsearch/_async/client/snapshot.py b/elasticsearch/_async/client/snapshot.py index 969be8774..05b1b1c17 100644 --- a/elasticsearch/_async/client/snapshot.py +++ b/elasticsearch/_async/client/snapshot.py @@ -44,8 +44,11 @@ async def cleanup_repository( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clean up the snapshot repository. Trigger the review of the contents of a snapshot - repository and delete any stale data not referenced by existing snapshots. + .. raw:: html + +

Clean up the snapshot repository. + Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots.

+ ``_ @@ -98,8 +101,11 @@ async def clone( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clone a snapshot. Clone part of all of a snapshot into another snapshot in the - same repository. + .. raw:: html + +

Clone a snapshot. + Clone part of all of a snapshot into another snapshot in the same repository.

+ ``_ @@ -179,7 +185,11 @@ async def create( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a snapshot. Take a snapshot of a cluster or of data streams and indices. + .. raw:: html + +

Create a snapshot. + Take a snapshot of a cluster or of data streams and indices.

+ ``_ @@ -283,11 +293,13 @@ async def create_repository( verify: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a snapshot repository. IMPORTANT: If you are migrating searchable - snapshots, the repository name must be identical in the source and destination - clusters. To register a snapshot repository, the cluster's global metadata must - be writeable. Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` - and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. + .. raw:: html + +

Create or update a snapshot repository. + IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. + To register a snapshot repository, the cluster's global metadata must be writeable. + Ensure there are no cluster blocks (for example, cluster.blocks.read_only and clsuter.blocks.read_only_allow_delete settings) that prevent write access.

+ ``_ @@ -347,7 +359,10 @@ async def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete snapshots. + .. raw:: html + +

Delete snapshots.

+ ``_ @@ -398,9 +413,12 @@ async def delete_repository( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete snapshot repositories. When a repository is unregistered, Elasticsearch - removes only the reference to the location where the repository is storing the - snapshots. The snapshots themselves are left untouched and in place. + .. raw:: html + +

Delete snapshot repositories. + When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. + The snapshots themselves are left untouched and in place.

+ ``_ @@ -474,7 +492,10 @@ async def get( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get snapshot information. + .. raw:: html + +

Get snapshot information.

+ ``_ @@ -586,7 +607,10 @@ async def get_repository( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get snapshot repository information. + .. raw:: html + +

Get snapshot repository information.

+ ``_ @@ -648,120 +672,83 @@ async def repository_analyze( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Analyze a snapshot repository. Analyze the performance characteristics and any - incorrect behaviour found in a repository. The response exposes implementation - details of the analysis which may change from version to version. The response - body format is therefore not considered stable and may be different in newer - versions. There are a large number of third-party storage systems available, - not all of which are suitable for use as a snapshot repository by Elasticsearch. - Some storage systems behave incorrectly, or perform poorly, especially when accessed - concurrently by multiple clients as the nodes of an Elasticsearch cluster do. - This API performs a collection of read and write operations on your repository - which are designed to detect incorrect behaviour and to measure the performance - characteristics of your storage system. The default values for the parameters - are deliberately low to reduce the impact of running an analysis inadvertently - and to provide a sensible starting point for your investigations. Run your first - analysis with the default parameter values to check for simple problems. If successful, - run a sequence of increasingly large analyses until you encounter a failure or - you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, - a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of - at least `100`. Always specify a generous timeout, possibly `1h` or longer, to - allow time for each analysis to run to completion. Perform the analyses using - a multi-node cluster of a similar size to your production cluster so that it - can detect any problems that only arise when the repository is accessed by many - nodes at once. If the analysis fails, Elasticsearch detected that your repository - behaved unexpectedly. This usually means you are using a third-party storage - system with an incorrect or incompatible implementation of the API it claims - to support. If so, this storage system is not suitable for use as a snapshot - repository. You will need to work with the supplier of your storage system to - address the incompatibilities that Elasticsearch detects. If the analysis is - successful, the API returns details of the testing process, optionally including - how long each operation took. You can use this information to determine the performance - of your storage system. If any operation fails or returns an incorrect result, - the API returns an error. If the API returns an error, it may not have removed - all the data it wrote to the repository. The error will indicate the location - of any leftover data and this path is also recorded in the Elasticsearch logs. - You should verify that this location has been cleaned up correctly. If there - is still leftover data at the specified location, you should manually remove - it. If the connection from your client to Elasticsearch is closed while the client - is waiting for the result of the analysis, the test is cancelled. Some clients - are configured to close their connection if no response is received within a - certain timeout. An analysis takes a long time to complete so you might need - to relax any such client-side timeouts. On cancellation the analysis attempts - to clean up the data it was writing, but it may not be able to remove it all. - The path to the leftover data is recorded in the Elasticsearch logs. You should - verify that this location has been cleaned up correctly. If there is still leftover - data at the specified location, you should manually remove it. If the analysis - is successful then it detected no incorrect behaviour, but this does not mean - that correct behaviour is guaranteed. The analysis attempts to detect common - bugs but it does not offer 100% coverage. Additionally, it does not test the - following: * Your repository must perform durable writes. Once a blob has been - written it must remain in place until it is deleted, even after a power loss - or similar disaster. * Your repository must not suffer from silent data corruption. - Once a blob has been written, its contents must remain unchanged until it is - deliberately modified or deleted. * Your repository must behave correctly even - if connectivity from the cluster is disrupted. Reads and writes may fail in this - case, but they must not return incorrect results. IMPORTANT: An analysis writes - a substantial amount of data to your repository and then reads it back again. - This consumes bandwidth on the network between the cluster and the repository, - and storage space and I/O bandwidth on the repository itself. You must ensure - this load does not affect other users of these systems. Analyses respect the - repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec` - if available and the cluster setting `indices.recovery.max_bytes_per_sec` which - you can use to limit the bandwidth they consume. NOTE: This API is intended for - exploratory use by humans. You should expect the request parameters and the response - format to vary in future versions. NOTE: Different versions of Elasticsearch - may perform different checks for repository compatibility, with newer versions - typically being stricter than older ones. A storage system that passes repository - analysis with one version of Elasticsearch may fail with a different version. - This indicates it behaves incorrectly in ways that the former version did not - detect. You must work with the supplier of your storage system to address the - incompatibilities detected by the repository analysis API in any version of Elasticsearch. - NOTE: This API may not work correctly in a mixed-version cluster. *Implementation - details* NOTE: This section of documentation describes how the repository analysis - API works in this version of Elasticsearch, but you should expect the implementation - to vary between versions. The request parameters and response format depend on - details of the implementation so may also be different in newer versions. The - analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter - and a number of compare-and-exchange operations on linearizable registers, as - set by the `register_operation_count` parameter. These tasks are distributed - over the data and master-eligible nodes in the cluster for execution. For most - blob-level tasks, the executing node first writes a blob to the repository and - then instructs some of the other nodes in the cluster to attempt to read the - data it just wrote. The size of the blob is chosen randomly, according to the - `max_blob_size` and `max_total_data_size` parameters. If any of these reads fails - then the repository does not implement the necessary read-after-write semantics - that Elasticsearch requires. For some blob-level tasks, the executing node will - instruct some of its peers to attempt to read the data before the writing process - completes. These reads are permitted to fail, but must not return partial data. - If any read returns partial data then the repository does not implement the necessary - atomicity semantics that Elasticsearch requires. For some blob-level tasks, the - executing node will overwrite the blob while its peers are reading it. In this - case the data read may come from either the original or the overwritten blob, - but the read operation must not return partial data or a mix of data from the - two blobs. If any of these reads returns partial data or a mix of the two blobs - then the repository does not implement the necessary atomicity semantics that - Elasticsearch requires for overwrites. The executing node will use a variety - of different methods to write the blob. For instance, where applicable, it will - use both single-part and multi-part uploads. Similarly, the reading nodes will - use a variety of different methods to read the data back again. For instance - they may read the entire blob from start to end or may read only a subset of - the data. For some blob-level tasks, the executing node will cancel the write - before it is complete. In this case, it still instructs some of the other nodes - in the cluster to attempt to read the blob but all of these reads must fail to - find the blob. Linearizable registers are special blobs that Elasticsearch manipulates - using an atomic compare-and-exchange operation. This operation ensures correct - and strongly-consistent behavior even when the blob is accessed by multiple nodes - at the same time. The detailed implementation of the compare-and-exchange operation - on linearizable registers varies by repository type. Repository analysis verifies - that that uncontended compare-and-exchange operations on a linearizable register - blob always succeed. Repository analysis also verifies that contended operations - either succeed or report the contention but do not return incorrect results. - If an operation fails due to contention, Elasticsearch retries the operation - until it succeeds. Most of the compare-and-exchange operations performed by repository - analysis atomically increment a counter which is represented as an 8-byte blob. - Some operations also verify the behavior on small blobs with sizes other than - 8 bytes. + .. raw:: html + +

Analyze a snapshot repository. + Analyze the performance characteristics and any incorrect behaviour found in a repository.

+

The response exposes implementation details of the analysis which may change from version to version. + The response body format is therefore not considered stable and may be different in newer versions.

+

There are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch. + Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system.

+

The default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations. + Run your first analysis with the default parameter values to check for simple problems. + If successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a blob_count of at least 2000, a max_blob_size of at least 2gb, a max_total_data_size of at least 1tb, and a register_operation_count of at least 100. + Always specify a generous timeout, possibly 1h or longer, to allow time for each analysis to run to completion. + Perform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once.

+

If the analysis fails, Elasticsearch detected that your repository behaved unexpectedly. + This usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support. + If so, this storage system is not suitable for use as a snapshot repository. + You will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects.

+

If the analysis is successful, the API returns details of the testing process, optionally including how long each operation took. + You can use this information to determine the performance of your storage system. + If any operation fails or returns an incorrect result, the API returns an error. + If the API returns an error, it may not have removed all the data it wrote to the repository. + The error will indicate the location of any leftover data and this path is also recorded in the Elasticsearch logs. + You should verify that this location has been cleaned up correctly. + If there is still leftover data at the specified location, you should manually remove it.

+

If the connection from your client to Elasticsearch is closed while the client is waiting for the result of the analysis, the test is cancelled. + Some clients are configured to close their connection if no response is received within a certain timeout. + An analysis takes a long time to complete so you might need to relax any such client-side timeouts. + On cancellation the analysis attempts to clean up the data it was writing, but it may not be able to remove it all. + The path to the leftover data is recorded in the Elasticsearch logs. + You should verify that this location has been cleaned up correctly. + If there is still leftover data at the specified location, you should manually remove it.

+

If the analysis is successful then it detected no incorrect behaviour, but this does not mean that correct behaviour is guaranteed. + The analysis attempts to detect common bugs but it does not offer 100% coverage. + Additionally, it does not test the following:

+
    +
  • Your repository must perform durable writes. Once a blob has been written it must remain in place until it is deleted, even after a power loss or similar disaster.
  • +
  • Your repository must not suffer from silent data corruption. Once a blob has been written, its contents must remain unchanged until it is deliberately modified or deleted.
  • +
  • Your repository must behave correctly even if connectivity from the cluster is disrupted. Reads and writes may fail in this case, but they must not return incorrect results.
  • +
+

IMPORTANT: An analysis writes a substantial amount of data to your repository and then reads it back again. + This consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself. + You must ensure this load does not affect other users of these systems. + Analyses respect the repository settings max_snapshot_bytes_per_sec and max_restore_bytes_per_sec if available and the cluster setting indices.recovery.max_bytes_per_sec which you can use to limit the bandwidth they consume.

+

NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions.

+

NOTE: Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones. + A storage system that passes repository analysis with one version of Elasticsearch may fail with a different version. + This indicates it behaves incorrectly in ways that the former version did not detect. + You must work with the supplier of your storage system to address the incompatibilities detected by the repository analysis API in any version of Elasticsearch.

+

NOTE: This API may not work correctly in a mixed-version cluster.

+

Implementation details

+

NOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions.

+

The analysis comprises a number of blob-level tasks, as set by the blob_count parameter and a number of compare-and-exchange operations on linearizable registers, as set by the register_operation_count parameter. + These tasks are distributed over the data and master-eligible nodes in the cluster for execution.

+

For most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote. + The size of the blob is chosen randomly, according to the max_blob_size and max_total_data_size parameters. + If any of these reads fails then the repository does not implement the necessary read-after-write semantics that Elasticsearch requires.

+

For some blob-level tasks, the executing node will instruct some of its peers to attempt to read the data before the writing process completes. + These reads are permitted to fail, but must not return partial data. + If any read returns partial data then the repository does not implement the necessary atomicity semantics that Elasticsearch requires.

+

For some blob-level tasks, the executing node will overwrite the blob while its peers are reading it. + In this case the data read may come from either the original or the overwritten blob, but the read operation must not return partial data or a mix of data from the two blobs. + If any of these reads returns partial data or a mix of the two blobs then the repository does not implement the necessary atomicity semantics that Elasticsearch requires for overwrites.

+

The executing node will use a variety of different methods to write the blob. + For instance, where applicable, it will use both single-part and multi-part uploads. + Similarly, the reading nodes will use a variety of different methods to read the data back again. + For instance they may read the entire blob from start to end or may read only a subset of the data.

+

For some blob-level tasks, the executing node will cancel the write before it is complete. + In this case, it still instructs some of the other nodes in the cluster to attempt to read the blob but all of these reads must fail to find the blob.

+

Linearizable registers are special blobs that Elasticsearch manipulates using an atomic compare-and-exchange operation. + This operation ensures correct and strongly-consistent behavior even when the blob is accessed by multiple nodes at the same time. + The detailed implementation of the compare-and-exchange operation on linearizable registers varies by repository type. + Repository analysis verifies that that uncontended compare-and-exchange operations on a linearizable register blob always succeed. + Repository analysis also verifies that contended operations either succeed or report the contention but do not return incorrect results. + If an operation fails due to contention, Elasticsearch retries the operation until it succeeds. + Most of the compare-and-exchange operations performed by repository analysis atomically increment a counter which is represented as an 8-byte blob. + Some operations also verify the behavior on small blobs with sizes other than 8 bytes.

+ ``_ @@ -864,40 +851,31 @@ async def repository_verify_integrity( verify_blob_contents: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Verify the repository integrity. Verify the integrity of the contents of a snapshot - repository. This API enables you to perform a comprehensive check of the contents - of a repository, looking for any anomalies in its data or metadata which might - prevent you from restoring snapshots from the repository or which might cause - future snapshot create or delete operations to fail. If you suspect the integrity - of the contents of one of your snapshot repositories, cease all write activity - to this repository immediately, set its `read_only` option to `true`, and use - this API to verify its integrity. Until you do so: * It may not be possible to - restore some snapshots from this repository. * Searchable snapshots may report - errors when searched or may have unassigned shards. * Taking snapshots into this - repository may fail or may appear to succeed but have created a snapshot which - cannot be restored. * Deleting snapshots from this repository may fail or may - appear to succeed but leave the underlying data on disk. * Continuing to write - to the repository while it is in an invalid state may causing additional damage - to its contents. If the API finds any problems with the integrity of the contents - of your repository, Elasticsearch will not be able to repair the damage. The - only way to bring the repository back into a fully working state after its contents - have been damaged is by restoring its contents from a repository backup which - was taken before the damage occurred. You must also identify what caused the - damage and take action to prevent it from happening again. If you cannot restore - a repository backup, register a new repository and use this for all future snapshot - operations. In some cases it may be possible to recover some of the contents - of a damaged repository, either by restoring as many of its snapshots as needed - and taking new snapshots of the restored data, or by using the reindex API to - copy data from any searchable snapshots mounted from the damaged repository. - Avoid all operations which write to the repository while the verify repository - integrity API is running. If something changes the repository contents while - an integrity verification is running then Elasticsearch may incorrectly report - having detected some anomalies in its contents due to the concurrent writes. - It may also incorrectly fail to report some anomalies that the concurrent writes - prevented it from detecting. NOTE: This API is intended for exploratory use by - humans. You should expect the request parameters and the response format to vary - in future versions. NOTE: This API may not work correctly in a mixed-version - cluster. + .. raw:: html + +

Verify the repository integrity. + Verify the integrity of the contents of a snapshot repository.

+

This API enables you to perform a comprehensive check of the contents of a repository, looking for any anomalies in its data or metadata which might prevent you from restoring snapshots from the repository or which might cause future snapshot create or delete operations to fail.

+

If you suspect the integrity of the contents of one of your snapshot repositories, cease all write activity to this repository immediately, set its read_only option to true, and use this API to verify its integrity. + Until you do so:

+
    +
  • It may not be possible to restore some snapshots from this repository.
  • +
  • Searchable snapshots may report errors when searched or may have unassigned shards.
  • +
  • Taking snapshots into this repository may fail or may appear to succeed but have created a snapshot which cannot be restored.
  • +
  • Deleting snapshots from this repository may fail or may appear to succeed but leave the underlying data on disk.
  • +
  • Continuing to write to the repository while it is in an invalid state may causing additional damage to its contents.
  • +
+

If the API finds any problems with the integrity of the contents of your repository, Elasticsearch will not be able to repair the damage. + The only way to bring the repository back into a fully working state after its contents have been damaged is by restoring its contents from a repository backup which was taken before the damage occurred. + You must also identify what caused the damage and take action to prevent it from happening again.

+

If you cannot restore a repository backup, register a new repository and use this for all future snapshot operations. + In some cases it may be possible to recover some of the contents of a damaged repository, either by restoring as many of its snapshots as needed and taking new snapshots of the restored data, or by using the reindex API to copy data from any searchable snapshots mounted from the damaged repository.

+

Avoid all operations which write to the repository while the verify repository integrity API is running. + If something changes the repository contents while an integrity verification is running then Elasticsearch may incorrectly report having detected some anomalies in its contents due to the concurrent writes. + It may also incorrectly fail to report some anomalies that the concurrent writes prevented it from detecting.

+

NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions.

+

NOTE: This API may not work correctly in a mixed-version cluster.

+ ``_ @@ -994,20 +972,20 @@ async def restore( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Restore a snapshot. Restore a snapshot of a cluster or data streams and indices. - You can restore a snapshot only to a running cluster with an elected master node. - The snapshot repository must be registered and available to the cluster. The - snapshot and cluster versions must be compatible. To restore a snapshot, the - cluster's global metadata must be writable. Ensure there are't any cluster blocks - that prevent writes. The restore operation ignores index blocks. Before you restore - a data stream, ensure the cluster contains a matching index template with data - streams enabled. To check, use the index management feature in Kibana or the - get index template API: ``` GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream - ``` If no such template exists, you can create one or restore a cluster state - that contains one. Without a matching index template, a data stream can't roll - over or create backing indices. If your snapshot contains data from App Search - or Workplace Search, you must restore the Enterprise Search encryption key before - you restore the snapshot. + .. raw:: html + +

Restore a snapshot. + Restore a snapshot of a cluster or data streams and indices.

+

You can restore a snapshot only to a running cluster with an elected master node. + The snapshot repository must be registered and available to the cluster. + The snapshot and cluster versions must be compatible.

+

To restore a snapshot, the cluster's global metadata must be writable. Ensure there are't any cluster blocks that prevent writes. The restore operation ignores index blocks.

+

Before you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API:

+
GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream
+          
+

If no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can't roll over or create backing indices.

+

If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot.

+ ``_ @@ -1100,18 +1078,18 @@ async def status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the snapshot status. Get a detailed description of the current state for - each shard participating in the snapshot. Note that this API should be used only - to obtain detailed shard-level information for ongoing snapshots. If this detail - is not needed or you want to obtain information about one or more existing snapshots, - use the get snapshot API. WARNING: Using the API to return the status of any - snapshots other than currently running snapshots can be expensive. The API requires - a read from the repository for each shard in each snapshot. For example, if you - have 100 snapshots with 1,000 shards each, an API request that includes all snapshots - will require 100,000 reads (100 snapshots x 1,000 shards). Depending on the latency - of your storage, such requests can take an extremely long time to return results. - These requests can also tax machine resources and, when using cloud storage, - incur high processing costs. + .. raw:: html + +

Get the snapshot status. + Get a detailed description of the current state for each shard participating in the snapshot. + Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. + If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API.

+

WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. + The API requires a read from the repository for each shard in each snapshot. + For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards).

+

Depending on the latency of your storage, such requests can take an extremely long time to return results. + These requests can also tax machine resources and, when using cloud storage, incur high processing costs.

+ ``_ @@ -1170,8 +1148,11 @@ async def verify_repository( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Verify a snapshot repository. Check for common misconfigurations in a snapshot - repository. + .. raw:: html + +

Verify a snapshot repository. + Check for common misconfigurations in a snapshot repository.

+ ``_ diff --git a/elasticsearch/_async/client/sql.py b/elasticsearch/_async/client/sql.py index 2a93a5837..39ac7c5b9 100644 --- a/elasticsearch/_async/client/sql.py +++ b/elasticsearch/_async/client/sql.py @@ -39,7 +39,10 @@ async def clear_cursor( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear an SQL search cursor. + .. raw:: html + +

Clear an SQL search cursor.

+ ``_ @@ -84,11 +87,17 @@ async def delete_async( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete an async SQL search. Delete an async SQL search or a stored synchronous - SQL search. If the search is still running, the API cancels it. If the Elasticsearch - security features are enabled, only the following users can use this API to delete - a search: * Users with the `cancel_task` cluster privilege. * The user who first - submitted the search. + .. raw:: html + +

Delete an async SQL search. + Delete an async SQL search or a stored synchronous SQL search. + If the search is still running, the API cancels it.

+

If the Elasticsearch security features are enabled, only the following users can use this API to delete a search:

+
    +
  • Users with the cancel_task cluster privilege.
  • +
  • The user who first submitted the search.
  • +
+ ``_ @@ -134,10 +143,12 @@ async def get_async( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Get async SQL search results. Get the current status and available results for - an async SQL search or stored synchronous SQL search. If the Elasticsearch security - features are enabled, only the user who first submitted the SQL search can retrieve - the search using this API. + .. raw:: html + +

Get async SQL search results. + Get the current status and available results for an async SQL search or stored synchronous SQL search.

+

If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API.

+ ``_ @@ -195,8 +206,11 @@ async def get_async_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the async SQL search status. Get the current status of an async SQL search - or a stored synchronous SQL search. + .. raw:: html + +

Get the async SQL search status. + Get the current status of an async SQL search or a stored synchronous SQL search.

+ ``_ @@ -281,7 +295,11 @@ async def query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get SQL search results. Run an SQL request. + .. raw:: html + +

Get SQL search results. + Run an SQL request.

+ ``_ @@ -402,9 +420,12 @@ async def translate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Translate SQL into Elasticsearch queries. Translate an SQL search into a search - API request containing Query DSL. It accepts the same request body parameters - as the SQL search API, excluding `cursor`. + .. raw:: html + +

Translate SQL into Elasticsearch queries. + Translate an SQL search into a search API request containing Query DSL. + It accepts the same request body parameters as the SQL search API, excluding cursor.

+ ``_ diff --git a/elasticsearch/_async/client/ssl.py b/elasticsearch/_async/client/ssl.py index 6ab683691..9fc313614 100644 --- a/elasticsearch/_async/client/ssl.py +++ b/elasticsearch/_async/client/ssl.py @@ -35,23 +35,22 @@ async def certificates( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get SSL certificates. Get information about the X.509 certificates that are used - to encrypt communications in the cluster. The API returns a list that includes - certificates from all TLS contexts including: - Settings for transport and HTTP - interfaces - TLS settings that are used within authentication realms - TLS settings - for remote monitoring exporters The list includes certificates that are used - for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` - and `xpack.security.transport.ssl.certificate_authorities` settings. It also - includes certificates that are used for configuring server identity, such as - `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`. - The list does not include certificates that are sourced from the default SSL - context of the Java Runtime Environment (JRE), even if those certificates are - in use within Elasticsearch. NOTE: When a PKCS#11 token is configured as the - truststore of the JRE, the API returns all the certificates that are included - in the PKCS#11 token irrespective of whether these are used in the Elasticsearch - TLS configuration. If Elasticsearch is configured to use a keystore or truststore, - the API output includes all certificates in that store, even though some of the - certificates might not be in active use within the cluster. + .. raw:: html + +

Get SSL certificates.

+

Get information about the X.509 certificates that are used to encrypt communications in the cluster. + The API returns a list that includes certificates from all TLS contexts including:

+
    +
  • Settings for transport and HTTP interfaces
  • +
  • TLS settings that are used within authentication realms
  • +
  • TLS settings for remote monitoring exporters
  • +
+

The list includes certificates that are used for configuring trust, such as those configured in the xpack.security.transport.ssl.truststore and xpack.security.transport.ssl.certificate_authorities settings. + It also includes certificates that are used for configuring server identity, such as xpack.security.http.ssl.keystore and xpack.security.http.ssl.certificate settings.

+

The list does not include certificates that are sourced from the default SSL context of the Java Runtime Environment (JRE), even if those certificates are in use within Elasticsearch.

+

NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the API returns all the certificates that are included in the PKCS#11 token irrespective of whether these are used in the Elasticsearch TLS configuration.

+

If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster.

+ ``_ """ diff --git a/elasticsearch/_async/client/synonyms.py b/elasticsearch/_async/client/synonyms.py index e6fe303fc..21cbd8084 100644 --- a/elasticsearch/_async/client/synonyms.py +++ b/elasticsearch/_async/client/synonyms.py @@ -36,21 +36,22 @@ async def delete_synonym( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a synonym set. You can only delete a synonyms set that is not in use by - any index analyzer. Synonyms sets can be used in synonym graph token filters - and synonym token filters. These synonym filters can be used as part of search - analyzers. Analyzers need to be loaded when an index is restored (such as when - a node starts, or the index becomes open). Even if the analyzer is not used on - any field mapping, it still needs to be loaded on the index recovery phase. If - any analyzers cannot be loaded, the index becomes unavailable and the cluster - status becomes red or yellow as index shards are not available. To prevent that, - synonyms sets that are used in analyzers can't be deleted. A delete request in - this case will return a 400 response code. To remove a synonyms set, you must - first remove all indices that contain analyzers using it. You can migrate an - index by creating a new index that does not contain the token filter with the - synonyms set, and use the reindex API in order to copy over the index data. Once - finished, you can delete the index. When the synonyms set is not used in analyzers, - you will be able to delete it. + .. raw:: html + +

Delete a synonym set.

+

You can only delete a synonyms set that is not in use by any index analyzer.

+

Synonyms sets can be used in synonym graph token filters and synonym token filters. + These synonym filters can be used as part of search analyzers.

+

Analyzers need to be loaded when an index is restored (such as when a node starts, or the index becomes open). + Even if the analyzer is not used on any field mapping, it still needs to be loaded on the index recovery phase.

+

If any analyzers cannot be loaded, the index becomes unavailable and the cluster status becomes red or yellow as index shards are not available. + To prevent that, synonyms sets that are used in analyzers can't be deleted. + A delete request in this case will return a 400 response code.

+

To remove a synonyms set, you must first remove all indices that contain analyzers using it. + You can migrate an index by creating a new index that does not contain the token filter with the synonyms set, and use the reindex API in order to copy over the index data. + Once finished, you can delete the index. + When the synonyms set is not used in analyzers, you will be able to delete it.

+ ``_ @@ -91,7 +92,11 @@ async def delete_synonym_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a synonym rule. Delete a synonym rule from a synonym set. + .. raw:: html + +

Delete a synonym rule. + Delete a synonym rule from a synonym set.

+ ``_ @@ -141,7 +146,10 @@ async def get_synonym( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a synonym set. + .. raw:: html + +

Get a synonym set.

+ ``_ @@ -188,7 +196,11 @@ async def get_synonym_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a synonym rule. Get a synonym rule from a synonym set. + .. raw:: html + +

Get a synonym rule. + Get a synonym rule from a synonym set.

+ ``_ @@ -237,7 +249,11 @@ async def get_synonyms_sets( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Get all synonym sets. Get a summary of all defined synonym sets. + .. raw:: html + +

Get all synonym sets. + Get a summary of all defined synonym sets.

+ ``_ @@ -286,12 +302,14 @@ async def put_synonym( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 - synonym rules per set. If you need to manage more synonym rules, you can create - multiple synonym sets. When an existing synonyms set is updated, the search analyzers - that use the synonyms set are reloaded automatically for all indices. This is - equivalent to invoking the reload search analyzers API for all indices that use - the synonyms set. + .. raw:: html + +

Create or update a synonym set. + Synonyms sets are limited to a maximum of 10,000 synonym rules per set. + If you need to manage more synonym rules, you can create multiple synonym sets.

+

When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. + This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set.

+ ``_ @@ -344,10 +362,13 @@ async def put_synonym_rule( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a synonym rule. Create or update a synonym rule in a synonym - set. If any of the synonym rules included is invalid, the API returns an error. - When you update a synonym rule, all analyzers using the synonyms set will be - reloaded automatically to reflect the new rule. + .. raw:: html + +

Create or update a synonym rule. + Create or update a synonym rule in a synonym set.

+

If any of the synonym rules included is invalid, the API returns an error.

+

When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule.

+ ``_ diff --git a/elasticsearch/_async/client/tasks.py b/elasticsearch/_async/client/tasks.py index 576ef3c41..af54ecafa 100644 --- a/elasticsearch/_async/client/tasks.py +++ b/elasticsearch/_async/client/tasks.py @@ -47,19 +47,18 @@ async def cancel( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Cancel a task. WARNING: The task management API is new and should still be considered - a beta feature. The API may change in ways that are not backwards compatible. - A task may continue to run for some time after it has been cancelled because - it may not be able to safely stop its current activity straight away. It is also - possible that Elasticsearch must complete its work on other tasks before it can - process the cancellation. The get task information API will continue to list - these cancelled tasks until they complete. The cancelled flag in the response - indicates that the cancellation command has been processed and the task will - stop as soon as possible. To troubleshoot why a cancelled task does not complete - promptly, use the get task information API with the `?detailed` parameter to - identify the other tasks the system is running. You can also use the node hot - threads API to obtain detailed information about the work the system is doing - instead of completing the cancelled task. + .. raw:: html + +

Cancel a task.

+

WARNING: The task management API is new and should still be considered a beta feature. + The API may change in ways that are not backwards compatible.

+

A task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away. + It is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation. + The get task information API will continue to list these cancelled tasks until they complete. + The cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible.

+

To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the ?detailed parameter to identify the other tasks the system is running. + You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task.

+ ``_ @@ -120,11 +119,14 @@ async def get( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get task information. Get information about a task currently running in the cluster. - WARNING: The task management API is new and should still be considered a beta - feature. The API may change in ways that are not backwards compatible. If the - task identifier is not found, a 404 response code indicates that there are no - resources that match the request. + .. raw:: html + +

Get task information. + Get information about a task currently running in the cluster.

+

WARNING: The task management API is new and should still be considered a beta feature. + The API may change in ways that are not backwards compatible.

+

If the task identifier is not found, a 404 response code indicates that there are no resources that match the request.

+ ``_ @@ -181,27 +183,60 @@ async def list( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get all tasks. Get information about the tasks currently running on one or more - nodes in the cluster. WARNING: The task management API is new and should still - be considered a beta feature. The API may change in ways that are not backwards - compatible. **Identifying running tasks** The `X-Opaque-Id header`, when provided - on the HTTP request header, is going to be returned as a header in the response - as well as in the headers field for in the task information. This enables you - to track certain calls or associate certain tasks with the client that started - them. For example: ``` curl -i -H "X-Opaque-Id: 123456" "http://localhost:9200/_tasks?group_by=parents" - ``` The API returns the following result: ``` HTTP/1.1 200 OK X-Opaque-Id: 123456 - content-type: application/json; charset=UTF-8 content-length: 831 { "tasks" : - { "u5lcZHqcQhu-rUoFaqDphA:45" : { "node" : "u5lcZHqcQhu-rUoFaqDphA", "id" : 45, - "type" : "transport", "action" : "cluster:monitor/tasks/lists", "start_time_in_millis" - : 1513823752749, "running_time_in_nanos" : 293139, "cancellable" : false, "headers" - : { "X-Opaque-Id" : "123456" }, "children" : [ { "node" : "u5lcZHqcQhu-rUoFaqDphA", - "id" : 46, "type" : "direct", "action" : "cluster:monitor/tasks/lists[n]", "start_time_in_millis" - : 1513823752750, "running_time_in_nanos" : 92133, "cancellable" : false, "parent_task_id" - : "u5lcZHqcQhu-rUoFaqDphA:45", "headers" : { "X-Opaque-Id" : "123456" } } ] } - } } ``` In this example, `X-Opaque-Id: 123456` is the ID as a part of the response - header. The `X-Opaque-Id` in the task `headers` is the ID for the task that was - initiated by the REST request. The `X-Opaque-Id` in the children `headers` is - the child task of the task that was initiated by the REST request. + .. raw:: html + +

Get all tasks. + Get information about the tasks currently running on one or more nodes in the cluster.

+

WARNING: The task management API is new and should still be considered a beta feature. + The API may change in ways that are not backwards compatible.

+

Identifying running tasks

+

The X-Opaque-Id header, when provided on the HTTP request header, is going to be returned as a header in the response as well as in the headers field for in the task information. + This enables you to track certain calls or associate certain tasks with the client that started them. + For example:

+
curl -i -H "X-Opaque-Id: 123456" "http://localhost:9200/_tasks?group_by=parents"
+          
+

The API returns the following result:

+
HTTP/1.1 200 OK
+          X-Opaque-Id: 123456
+          content-type: application/json; charset=UTF-8
+          content-length: 831
+
+          {
+            "tasks" : {
+              "u5lcZHqcQhu-rUoFaqDphA:45" : {
+                "node" : "u5lcZHqcQhu-rUoFaqDphA",
+                "id" : 45,
+                "type" : "transport",
+                "action" : "cluster:monitor/tasks/lists",
+                "start_time_in_millis" : 1513823752749,
+                "running_time_in_nanos" : 293139,
+                "cancellable" : false,
+                "headers" : {
+                  "X-Opaque-Id" : "123456"
+                },
+                "children" : [
+                  {
+                    "node" : "u5lcZHqcQhu-rUoFaqDphA",
+                    "id" : 46,
+                    "type" : "direct",
+                    "action" : "cluster:monitor/tasks/lists[n]",
+                    "start_time_in_millis" : 1513823752750,
+                    "running_time_in_nanos" : 92133,
+                    "cancellable" : false,
+                    "parent_task_id" : "u5lcZHqcQhu-rUoFaqDphA:45",
+                    "headers" : {
+                      "X-Opaque-Id" : "123456"
+                    }
+                  }
+                ]
+              }
+            }
+           }
+          
+

In this example, X-Opaque-Id: 123456 is the ID as a part of the response header. + The X-Opaque-Id in the task headers is the ID for the task that was initiated by the REST request. + The X-Opaque-Id in the children headers is the child task of the task that was initiated by the REST request.

+ ``_ diff --git a/elasticsearch/_async/client/text_structure.py b/elasticsearch/_async/client/text_structure.py index f06f0940a..e5d7b1e12 100644 --- a/elasticsearch/_async/client/text_structure.py +++ b/elasticsearch/_async/client/text_structure.py @@ -53,22 +53,24 @@ async def find_field_structure( timestamp_format: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Find the structure of a text field. Find the structure of a text field in an - Elasticsearch index. This API provides a starting point for extracting further - information from log messages already ingested into Elasticsearch. For example, - if you have ingested data into a very simple index that has just `@timestamp` - and message fields, you can use this API to see what common structure exists - in the message field. The response from the API contains: * Sample messages. - * Statistics that reveal the most common values for all fields detected within - the text and basic numeric statistics for numeric fields. * Information about - the structure of the text, which is useful when you write ingest configurations - to index it or similarly formatted text. * Appropriate mappings for an Elasticsearch - index, which you could use to ingest the text. All this information can be calculated - by the structure finder with no guidance. However, you can optionally override - some of the decisions about the text structure by specifying one or more query - parameters. If the structure finder produces unexpected results, specify the - `explain` query parameter and an explanation will appear in the response. It - helps determine why the returned structure was chosen. + .. raw:: html + +

Find the structure of a text field. + Find the structure of a text field in an Elasticsearch index.

+

This API provides a starting point for extracting further information from log messages already ingested into Elasticsearch. + For example, if you have ingested data into a very simple index that has just @timestamp and message fields, you can use this API to see what common structure exists in the message field.

+

The response from the API contains:

+
    +
  • Sample messages.
  • +
  • Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields.
  • +
  • Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text.
  • +
  • Appropriate mappings for an Elasticsearch index, which you could use to ingest the text.
  • +
+

All this information can be calculated by the structure finder with no guidance. + However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.

+

If the structure finder produces unexpected results, specify the explain query parameter and an explanation will appear in the response. + It helps determine why the returned structure was chosen.

+ ``_ @@ -237,23 +239,25 @@ async def find_message_structure( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Find the structure of text messages. Find the structure of a list of text messages. - The messages must contain data that is suitable to be ingested into Elasticsearch. - This API provides a starting point for ingesting data into Elasticsearch in a - format that is suitable for subsequent use with other Elastic Stack functionality. - Use this API rather than the find text structure API if your input text has already - been split up into separate messages by some other process. The response from - the API contains: * Sample messages. * Statistics that reveal the most common - values for all fields detected within the text and basic numeric statistics for - numeric fields. * Information about the structure of the text, which is useful - when you write ingest configurations to index it or similarly formatted text. - Appropriate mappings for an Elasticsearch index, which you could use to ingest - the text. All this information can be calculated by the structure finder with - no guidance. However, you can optionally override some of the decisions about - the text structure by specifying one or more query parameters. If the structure - finder produces unexpected results, specify the `explain` query parameter and - an explanation will appear in the response. It helps determine why the returned - structure was chosen. + .. raw:: html + +

Find the structure of text messages. + Find the structure of a list of text messages. + The messages must contain data that is suitable to be ingested into Elasticsearch.

+

This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. + Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process.

+

The response from the API contains:

+
    +
  • Sample messages.
  • +
  • Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields.
  • +
  • Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. + Appropriate mappings for an Elasticsearch index, which you could use to ingest the text.
  • +
+

All this information can be calculated by the structure finder with no guidance. + However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.

+

If the structure finder produces unexpected results, specify the explain query parameter and an explanation will appear in the response. + It helps determine why the returned structure was chosen.

+ ``_ @@ -410,22 +414,24 @@ async def find_structure( timestamp_format: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Find the structure of a text file. The text file must contain data that is suitable - to be ingested into Elasticsearch. This API provides a starting point for ingesting - data into Elasticsearch in a format that is suitable for subsequent use with - other Elastic Stack functionality. Unlike other Elasticsearch endpoints, the - data that is posted to this endpoint does not need to be UTF-8 encoded and in - JSON format. It must, however, be text; binary text formats are not currently - supported. The size is limited to the Elasticsearch HTTP receive buffer size, - which defaults to 100 Mb. The response from the API contains: * A couple of messages - from the beginning of the text. * Statistics that reveal the most common values - for all fields detected within the text and basic numeric statistics for numeric - fields. * Information about the structure of the text, which is useful when you - write ingest configurations to index it or similarly formatted text. * Appropriate - mappings for an Elasticsearch index, which you could use to ingest the text. - All this information can be calculated by the structure finder with no guidance. - However, you can optionally override some of the decisions about the text structure - by specifying one or more query parameters. + .. raw:: html + +

Find the structure of a text file. + The text file must contain data that is suitable to be ingested into Elasticsearch.

+

This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. + Unlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format. + It must, however, be text; binary text formats are not currently supported. + The size is limited to the Elasticsearch HTTP receive buffer size, which defaults to 100 Mb.

+

The response from the API contains:

+
    +
  • A couple of messages from the beginning of the text.
  • +
  • Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields.
  • +
  • Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text.
  • +
  • Appropriate mappings for an Elasticsearch index, which you could use to ingest the text.
  • +
+

All this information can be calculated by the structure finder with no guidance. + However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.

+ ``_ @@ -607,9 +613,12 @@ async def test_grok_pattern( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Test a Grok pattern. Test a Grok pattern on one or more lines of text. The API - indicates whether the lines match the pattern together with the offsets and lengths - of the matched substrings. + .. raw:: html + +

Test a Grok pattern. + Test a Grok pattern on one or more lines of text. + The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings.

+ ``_ diff --git a/elasticsearch/_async/client/transform.py b/elasticsearch/_async/client/transform.py index ca05c9ac7..ae7e846d5 100644 --- a/elasticsearch/_async/client/transform.py +++ b/elasticsearch/_async/client/transform.py @@ -39,7 +39,11 @@ async def delete_transform( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a transform. Deletes a transform. + .. raw:: html + +

Delete a transform. + Deletes a transform.

+ ``_ @@ -99,7 +103,11 @@ async def get_transform( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Get transforms. Retrieves configuration information for transforms. + .. raw:: html + +

Get transforms. + Retrieves configuration information for transforms.

+ ``_ @@ -168,7 +176,11 @@ async def get_transform_stats( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get transform stats. Retrieves usage information for transforms. + .. raw:: html + +

Get transform stats. + Retrieves usage information for transforms.

+ ``_ @@ -249,12 +261,14 @@ async def preview_transform( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Preview a transform. Generates a preview of the results that you will get when - you create a transform with the same configuration. It returns a maximum of 100 - results. The calculations are based on all the current data in the source index. - It also generates a list of mappings and settings for the destination index. - These values are determined based on the field types of the source index and - the transform aggregations. + .. raw:: html + +

Preview a transform. + Generates a preview of the results that you will get when you create a transform with the same configuration.

+

It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also + generates a list of mappings and settings for the destination index. These values are determined based on the field + types of the source index and the transform aggregations.

+ ``_ @@ -371,27 +385,27 @@ async def put_transform( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a transform. Creates a transform. A transform copies data from source - indices, transforms it, and persists it into an entity-centric destination index. - You can also think of the destination index as a two-dimensional tabular data - structure (known as a data frame). The ID for each document in the data frame - is generated from a hash of the entity, so there is a unique row per entity. - You must choose either the latest or pivot method for your transform; you cannot - use both in a single transform. If you choose to use the pivot method for your - transform, the entities are defined by the set of `group_by` fields in the pivot - object. If you choose to use the latest method, the entities are defined by the - `unique_key` field values in the latest object. You must have `create_index`, - `index`, and `read` privileges on the destination index and `read` and `view_index_metadata` - privileges on the source indices. When Elasticsearch security features are enabled, - the transform remembers which roles the user that created it had at the time - of creation and uses those same roles. If those roles do not have the required - privileges on the source and destination indices, the transform fails when it - attempts unauthorized operations. NOTE: You must use Kibana or this API to create - a transform. Do not add a transform directly into any `.transform-internal*` - indices using the Elasticsearch index API. If Elasticsearch security features - are enabled, do not give users any privileges on `.transform-internal*` indices. - If you used transforms prior to 7.5, also do not give users any privileges on - `.data-frame-internal*` indices. + .. raw:: html + +

Create a transform. + Creates a transform.

+

A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as + a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a + unique row per entity.

+

You must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If + you choose to use the pivot method for your transform, the entities are defined by the set of group_by fields in + the pivot object. If you choose to use the latest method, the entities are defined by the unique_key field values + in the latest object.

+

You must have create_index, index, and read privileges on the destination index and read and + view_index_metadata privileges on the source indices. When Elasticsearch security features are enabled, the + transform remembers which roles the user that created it had at the time of creation and uses those same roles. If + those roles do not have the required privileges on the source and destination indices, the transform fails when it + attempts unauthorized operations.

+

NOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any + .transform-internal* indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do + not give users any privileges on .transform-internal* indices. If you used transforms prior to 7.5, also do not + give users any privileges on .data-frame-internal* indices.

+ ``_ @@ -492,9 +506,13 @@ async def reset_transform( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reset a transform. Resets a transform. Before you can reset it, you must stop - it; alternatively, use the `force` query parameter. If the destination index - was created by the transform, it is deleted. + .. raw:: html + +

Reset a transform. + Resets a transform. + Before you can reset it, you must stop it; alternatively, use the force query parameter. + If the destination index was created by the transform, it is deleted.

+ ``_ @@ -546,11 +564,15 @@ async def schedule_now_transform( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Schedule a transform to start now. Instantly runs a transform to process data. - If you _schedule_now a transform, it will process the new data instantly, without - waiting for the configured frequency interval. After _schedule_now API is called, - the transform will be processed again at now + frequency unless _schedule_now - API is called again in the meantime. + .. raw:: html + +

Schedule a transform to start now. + Instantly runs a transform to process data.

+

If you _schedule_now a transform, it will process the new data instantly, + without waiting for the configured frequency interval. After _schedule_now API is called, + the transform will be processed again at now + frequency unless _schedule_now API + is called again in the meantime.

+ ``_ @@ -597,24 +619,24 @@ async def start_transform( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Start a transform. Starts a transform. When you start a transform, it creates - the destination index if it does not already exist. The `number_of_shards` is - set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, - it deduces the mapping definitions for the destination index from the source - indices and the transform aggregations. If fields in the destination index are - derived from scripts (as in the case of `scripted_metric` or `bucket_script` - aggregations), the transform uses dynamic mappings unless an index template exists. - If it is a latest transform, it does not deduce mapping definitions; it uses - dynamic mappings. To use explicit mappings, create the destination index before - you start the transform. Alternatively, you can create an index template, though - it does not affect the deduced mappings in a pivot transform. When the transform - starts, a series of validations occur to ensure its success. If you deferred - validation when you created the transform, they occur when you start the transform—​with - the exception of privilege checks. When Elasticsearch security features are enabled, - the transform remembers which roles the user that created it had at the time - of creation and uses those same roles. If those roles do not have the required - privileges on the source and destination indices, the transform fails when it - attempts unauthorized operations. + .. raw:: html + +

Start a transform. + Starts a transform.

+

When you start a transform, it creates the destination index if it does not already exist. The number_of_shards is + set to 1 and the auto_expand_replicas is set to 0-1. If it is a pivot transform, it deduces the mapping + definitions for the destination index from the source indices and the transform aggregations. If fields in the + destination index are derived from scripts (as in the case of scripted_metric or bucket_script aggregations), + the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce + mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you + start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings + in a pivot transform.

+

When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you + created the transform, they occur when you start the transform—​with the exception of privilege checks. When + Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the + time of creation and uses those same roles. If those roles do not have the required privileges on the source and + destination indices, the transform fails when it attempts unauthorized operations.

+ ``_ @@ -668,7 +690,11 @@ async def stop_transform( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stop transforms. Stops one or more transforms. + .. raw:: html + +

Stop transforms. + Stops one or more transforms.

+ ``_ @@ -761,14 +787,16 @@ async def update_transform( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update a transform. Updates certain properties of a transform. All updated properties - except `description` do not take effect until after the transform starts the - next checkpoint, thus there is data consistency in each checkpoint. To use this - API, you must have `read` and `view_index_metadata` privileges for the source - indices. You must also have `index` and `read` privileges for the destination - index. When Elasticsearch security features are enabled, the transform remembers - which roles the user who updated it had at the time of update and runs with those - privileges. + .. raw:: html + +

Update a transform. + Updates certain properties of a transform.

+

All updated properties except description do not take effect until after the transform starts the next checkpoint, + thus there is data consistency in each checkpoint. To use this API, you must have read and view_index_metadata + privileges for the source indices. You must also have index and read privileges for the destination index. When + Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the + time of update and runs with those privileges.

+ ``_ @@ -849,20 +877,21 @@ async def upgrade_transforms( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Upgrade all transforms. Transforms are compatible across minor versions and between - supported major versions. However, over time, the format of transform configuration - information may change. This API identifies transforms that have a legacy configuration - format and upgrades them to the latest version. It also cleans up the internal - data structures that store the transform state and checkpoints. The upgrade does - not affect the source and destination indices. The upgrade also does not affect - the roles that transforms use when Elasticsearch security features are enabled; - the role used to read source data and write to the destination index remains - unchanged. If a transform upgrade step fails, the upgrade stops and an error - is returned about the underlying issue. Resolve the issue then re-run the process - again. A summary is returned when the upgrade is finished. To ensure continuous - transforms remain running during a major version upgrade of the cluster – for - example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading - the cluster. You may want to perform a recent cluster backup prior to the upgrade. + .. raw:: html + +

Upgrade all transforms. + Transforms are compatible across minor versions and between supported major versions. + However, over time, the format of transform configuration information may change. + This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. + It also cleans up the internal data structures that store the transform state and checkpoints. + The upgrade does not affect the source and destination indices. + The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged.

+

If a transform upgrade step fails, the upgrade stops and an error is returned about the underlying issue. + Resolve the issue then re-run the process again. + A summary is returned when the upgrade is finished.

+

To ensure continuous transforms remain running during a major version upgrade of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster. + You may want to perform a recent cluster backup prior to the upgrade.

+ ``_ diff --git a/elasticsearch/_async/client/watcher.py b/elasticsearch/_async/client/watcher.py index 70949c9e6..7fe3d0a4b 100644 --- a/elasticsearch/_async/client/watcher.py +++ b/elasticsearch/_async/client/watcher.py @@ -37,14 +37,16 @@ async def ack_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Acknowledge a watch. Acknowledging a watch enables you to manually throttle the - execution of the watch's actions. The acknowledgement state of an action is stored - in the `status.actions..ack.state` structure. IMPORTANT: If the specified - watch is currently being executed, this API will return an error The reason for - this behavior is to prevent overwriting the watch status from a watch execution. - Acknowledging an action throttles further executions of that action until its - `ack.state` is reset to `awaits_successful_execution`. This happens when the - condition of the watch is not met (the condition evaluates to false). + .. raw:: html + +

Acknowledge a watch. + Acknowledging a watch enables you to manually throttle the execution of the watch's actions.

+

The acknowledgement state of an action is stored in the status.actions.<id>.ack.state structure.

+

IMPORTANT: If the specified watch is currently being executed, this API will return an error + The reason for this behavior is to prevent overwriting the watch status from a watch execution.

+

Acknowledging an action throttles further executions of that action until its ack.state is reset to awaits_successful_execution. + This happens when the condition of the watch is not met (the condition evaluates to false).

+ ``_ @@ -96,7 +98,11 @@ async def activate_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Activate a watch. A watch can be either active or inactive. + .. raw:: html + +

Activate a watch. + A watch can be either active or inactive.

+ ``_ @@ -136,7 +142,11 @@ async def deactivate_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deactivate a watch. A watch can be either active or inactive. + .. raw:: html + +

Deactivate a watch. + A watch can be either active or inactive.

+ ``_ @@ -176,13 +186,15 @@ async def delete_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a watch. When the watch is removed, the document representing the watch - in the `.watches` index is gone and it will never be run again. Deleting a watch - does not delete any watch execution records related to this watch from the watch - history. IMPORTANT: Deleting a watch must be done by using only this API. Do - not delete the watch directly from the `.watches` index using the Elasticsearch - delete document API When Elasticsearch security features are enabled, make sure - no write privileges are granted to anyone for the `.watches` index. + .. raw:: html + +

Delete a watch. + When the watch is removed, the document representing the watch in the .watches index is gone and it will never be run again.

+

Deleting a watch does not delete any watch execution records related to this watch from the watch history.

+

IMPORTANT: Deleting a watch must be done by using only this API. + Do not delete the watch directly from the .watches index using the Elasticsearch delete document API + When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the .watches index.

+ ``_ @@ -251,21 +263,19 @@ async def execute_watch( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Run a watch. This API can be used to force execution of the watch outside of - its triggering logic or to simulate the watch execution for debugging purposes. - For testing and debugging purposes, you also have fine-grained control on how - the watch runs. You can run the watch without running all of its actions or alternatively - by simulating them. You can also force execution by ignoring the watch condition - and control whether a watch record would be written to the watch history after - it runs. You can use the run watch API to run watches that are not yet registered - by specifying the watch definition inline. This serves as great tool for testing - and debugging your watches prior to adding them to Watcher. When Elasticsearch - security features are enabled on your cluster, watches are run with the privileges - of the user that stored the watches. If your user is allowed to read index `a`, - but not index `b`, then the exact same set of rules will apply during execution - of a watch. When using the run watch API, the authorization data of the user - that called the API will be used as a base, instead of the information who stored - the watch. + .. raw:: html + +

Run a watch. + This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes.

+

For testing and debugging purposes, you also have fine-grained control on how the watch runs. + You can run the watch without running all of its actions or alternatively by simulating them. + You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs.

+

You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. + This serves as great tool for testing and debugging your watches prior to adding them to Watcher.

+

When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. + If your user is allowed to read index a, but not index b, then the exact same set of rules will apply during execution of a watch.

+

When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch.

+ ``_ @@ -348,9 +358,12 @@ async def get_settings( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get Watcher index settings. Get settings for the Watcher internal index (`.watches`). - Only a subset of settings are shown, for example `index.auto_expand_replicas` - and `index.number_of_replicas`. + .. raw:: html + +

Get Watcher index settings. + Get settings for the Watcher internal index (.watches). + Only a subset of settings are shown, for example index.auto_expand_replicas and index.number_of_replicas.

+ ``_ @@ -392,7 +405,10 @@ async def get_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a watch. + .. raw:: html + +

Get a watch.

+ ``_ @@ -456,17 +472,18 @@ async def put_watch( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a watch. When a watch is registered, a new document that represents - the watch is added to the `.watches` index and its trigger is immediately registered - with the relevant trigger engine. Typically for the `schedule` trigger, the scheduler - is the trigger engine. IMPORTANT: You must use Kibana or this API to create a - watch. Do not add a watch directly to the `.watches` index by using the Elasticsearch - index API. If Elasticsearch security features are enabled, do not give users - write privileges on the `.watches` index. When you add a watch you can also define - its initial active state by setting the *active* parameter. When Elasticsearch - security features are enabled, your watch can index or search only on indices - for which the user that stored the watch has privileges. If the user is able - to read index `a`, but not index `b`, the same will apply when the watch runs. + .. raw:: html + +

Create or update a watch. + When a watch is registered, a new document that represents the watch is added to the .watches index and its trigger is immediately registered with the relevant trigger engine. + Typically for the schedule trigger, the scheduler is the trigger engine.

+

IMPORTANT: You must use Kibana or this API to create a watch. + Do not add a watch directly to the .watches index by using the Elasticsearch index API. + If Elasticsearch security features are enabled, do not give users write privileges on the .watches index.

+

When you add a watch you can also define its initial active state by setting the active parameter.

+

When Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges. + If the user is able to read index a, but not index b, the same will apply when the watch runs.

+ ``_ @@ -574,9 +591,12 @@ async def query_watches( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Query watches. Get all registered watches in a paginated manner and optionally - filter watches by a query. Note that only the `_id` and `metadata.*` fields are - queryable or sortable. + .. raw:: html + +

Query watches. + Get all registered watches in a paginated manner and optionally filter watches by a query.

+

Note that only the _id and metadata.* fields are queryable or sortable.

+ ``_ @@ -647,7 +667,11 @@ async def start( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Start the watch service. Start the Watcher service if it is not already running. + .. raw:: html + +

Start the watch service. + Start the Watcher service if it is not already running.

+ ``_ @@ -708,8 +732,12 @@ async def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get Watcher statistics. This API always returns basic metrics. You retrieve more - metrics by using the metric parameter. + .. raw:: html + +

Get Watcher statistics. + This API always returns basic metrics. + You retrieve more metrics by using the metric parameter.

+ ``_ @@ -756,7 +784,11 @@ async def stop( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stop the watch service. Stop the Watcher service if it is running. + .. raw:: html + +

Stop the watch service. + Stop the Watcher service if it is running.

+ ``_ @@ -808,9 +840,13 @@ async def update_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update Watcher index settings. Update settings for the Watcher internal index - (`.watches`). Only a subset of settings can be modified. This includes `index.auto_expand_replicas` - and `index.number_of_replicas`. + .. raw:: html + +

Update Watcher index settings. + Update settings for the Watcher internal index (.watches). + Only a subset of settings can be modified. + This includes index.auto_expand_replicas and index.number_of_replicas.

+ ``_ diff --git a/elasticsearch/_async/client/xpack.py b/elasticsearch/_async/client/xpack.py index 090aca019..36e87da61 100644 --- a/elasticsearch/_async/client/xpack.py +++ b/elasticsearch/_async/client/xpack.py @@ -43,10 +43,16 @@ async def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get information. The information provided by the API includes: * Build information - including the build number and timestamp. * License information about the currently - installed license. * Feature information for the features that are currently - enabled and available under the current license. + .. raw:: html + +

Get information. + The information provided by the API includes:

+
    +
  • Build information including the build number and timestamp.
  • +
  • License information about the currently installed license.
  • +
  • Feature information for the features that are currently enabled and available under the current license.
  • +
+ ``_ @@ -90,9 +96,12 @@ async def usage( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get usage information. Get information about the features that are currently - enabled and available under the current license. The API also provides some usage - statistics. + .. raw:: html + +

Get usage information. + Get information about the features that are currently enabled and available under the current license. + The API also provides some usage statistics.

+ ``_ diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index c308cf846..243849fb8 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -644,83 +644,89 @@ def bulk( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Bulk index or delete documents. Perform multiple `index`, `create`, `delete`, - and `update` actions in a single request. This reduces overhead and can greatly - increase indexing speed. If the Elasticsearch security features are enabled, - you must have the following index privileges for the target data stream, index, - or index alias: * To use the `create` action, you must have the `create_doc`, - `create`, `index`, or `write` index privilege. Data streams support only the - `create` action. * To use the `index` action, you must have the `create`, `index`, - or `write` index privilege. * To use the `delete` action, you must have the `delete` - or `write` index privilege. * To use the `update` action, you must have the `index` - or `write` index privilege. * To automatically create a data stream or index - with a bulk API request, you must have the `auto_configure`, `create_index`, - or `manage` index privilege. * To make the result of a bulk operation visible - to search using the `refresh` parameter, you must have the `maintenance` or `manage` - index privilege. Automatic data stream creation requires a matching index template - with data stream enabled. The actions are specified in the request body using - a newline delimited JSON (NDJSON) structure: ``` action_and_meta_data\\n optional_source\\n - action_and_meta_data\\n optional_source\\n .... action_and_meta_data\\n optional_source\\n - ``` The `index` and `create` actions expect a source on the next line and have - the same semantics as the `op_type` parameter in the standard index API. A `create` - action fails if a document with the same ID already exists in the target An `index` - action adds or replaces a document as necessary. NOTE: Data streams support only - the `create` action. To update or delete a document in a data stream, you must - target the backing index containing the document. An `update` action expects - that the partial doc, upsert, and script and its options are specified on the - next line. A `delete` action does not expect a source on the next line and has - the same semantics as the standard delete API. NOTE: The final line of data must - end with a newline character (`\\n`). Each newline character may be preceded - by a carriage return (`\\r`). When sending NDJSON data to the `_bulk` endpoint, - use a `Content-Type` header of `application/json` or `application/x-ndjson`. - Because this format uses literal newline characters (`\\n`) as delimiters, make - sure that the JSON actions and sources are not pretty printed. If you provide - a target in the request path, it is used for any actions that don't explicitly - specify an `_index` argument. A note on the format: the idea here is to make - processing as fast as possible. As some of the actions are redirected to other - shards on other nodes, only `action_meta_data` is parsed on the receiving node - side. Client libraries using this protocol should try and strive to do something - similar on the client side, and reduce buffering as much as possible. There is - no "correct" number of actions to perform in a single bulk request. Experiment - with different settings to find the optimal size for your particular workload. - Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by - default so clients must ensure that no request exceeds this size. It is not possible - to index a single document that exceeds the size limit, so you must pre-process - any such documents into smaller pieces before sending them to Elasticsearch. - For instance, split documents into pages or chapters before indexing them, or - store raw binary data in a system outside Elasticsearch and replace the raw data - with a link to the external system in the documents that you send to Elasticsearch. - **Client suppport for bulk requests** Some of the officially supported clients - provide helpers to assist with bulk requests and reindexing: * Go: Check out - `esutil.BulkIndexer` * Perl: Check out `Search::Elasticsearch::Client::5_0::Bulk` - and `Search::Elasticsearch::Client::5_0::Scroll` * Python: Check out `elasticsearch.helpers.*` - * JavaScript: Check out `client.helpers.*` * .NET: Check out `BulkAllObservable` - * PHP: Check out bulk indexing. **Submitting bulk requests with cURL** If you're - providing text file input to `curl`, you must use the `--data-binary` flag instead - of plain `-d`. The latter doesn't preserve newlines. For example: ``` $ cat requests - { "index" : { "_index" : "test", "_id" : "1" } } { "field1" : "value1" } $ curl - -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary - "@requests"; echo {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} - ``` **Optimistic concurrency control** Each `index` and `delete` action within - a bulk API call may include the `if_seq_no` and `if_primary_term` parameters - in their respective action and meta data lines. The `if_seq_no` and `if_primary_term` - parameters control how operations are run, based on the last modification to - existing documents. See Optimistic concurrency control for more details. **Versioning** - Each bulk item can include the version value using the `version` field. It automatically - follows the behavior of the index or delete operation based on the `_version` - mapping. It also support the `version_type`. **Routing** Each bulk item can include - the routing value using the `routing` field. It automatically follows the behavior - of the index or delete operation based on the `_routing` mapping. NOTE: Data - streams do not support custom routing unless they were created with the `allow_custom_routing` - setting enabled in the template. **Wait for active shards** When making bulk - calls, you can set the `wait_for_active_shards` parameter to require a minimum - number of shard copies to be active before starting to process the bulk request. - **Refresh** Control when the changes made by this request are visible to search. - NOTE: Only the shards that receive the bulk request will be affected by refresh. - Imagine a `_bulk?refresh=wait_for` request with three documents in it that happen - to be routed to different shards in an index with five shards. The request will - only wait for those three shards to refresh. The other two shards that make up - the index do not participate in the `_bulk` request at all. + .. raw:: html + +

Bulk index or delete documents. + Perform multiple index, create, delete, and update actions in a single request. + This reduces overhead and can greatly increase indexing speed.

+

If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:

+
    +
  • To use the create action, you must have the create_doc, create, index, or write index privilege. Data streams support only the create action.
  • +
  • To use the index action, you must have the create, index, or write index privilege.
  • +
  • To use the delete action, you must have the delete or write index privilege.
  • +
  • To use the update action, you must have the index or write index privilege.
  • +
  • To automatically create a data stream or index with a bulk API request, you must have the auto_configure, create_index, or manage index privilege.
  • +
  • To make the result of a bulk operation visible to search using the refresh parameter, you must have the maintenance or manage index privilege.
  • +
+

Automatic data stream creation requires a matching index template with data stream enabled.

+

The actions are specified in the request body using a newline delimited JSON (NDJSON) structure:

+
action_and_meta_data\\n
+          optional_source\\n
+          action_and_meta_data\\n
+          optional_source\\n
+          ....
+          action_and_meta_data\\n
+          optional_source\\n
+          
+

The index and create actions expect a source on the next line and have the same semantics as the op_type parameter in the standard index API. + A create action fails if a document with the same ID already exists in the target + An index action adds or replaces a document as necessary.

+

NOTE: Data streams support only the create action. + To update or delete a document in a data stream, you must target the backing index containing the document.

+

An update action expects that the partial doc, upsert, and script and its options are specified on the next line.

+

A delete action does not expect a source on the next line and has the same semantics as the standard delete API.

+

NOTE: The final line of data must end with a newline character (\\n). + Each newline character may be preceded by a carriage return (\\r). + When sending NDJSON data to the _bulk endpoint, use a Content-Type header of application/json or application/x-ndjson. + Because this format uses literal newline characters (\\n) as delimiters, make sure that the JSON actions and sources are not pretty printed.

+

If you provide a target in the request path, it is used for any actions that don't explicitly specify an _index argument.

+

A note on the format: the idea here is to make processing as fast as possible. + As some of the actions are redirected to other shards on other nodes, only action_meta_data is parsed on the receiving node side.

+

Client libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible.

+

There is no "correct" number of actions to perform in a single bulk request. + Experiment with different settings to find the optimal size for your particular workload. + Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size. + It is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch. + For instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch.

+

Client suppport for bulk requests

+

Some of the officially supported clients provide helpers to assist with bulk requests and reindexing:

+
    +
  • Go: Check out esutil.BulkIndexer
  • +
  • Perl: Check out Search::Elasticsearch::Client::5_0::Bulk and Search::Elasticsearch::Client::5_0::Scroll
  • +
  • Python: Check out elasticsearch.helpers.*
  • +
  • JavaScript: Check out client.helpers.*
  • +
  • .NET: Check out BulkAllObservable
  • +
  • PHP: Check out bulk indexing.
  • +
+

Submitting bulk requests with cURL

+

If you're providing text file input to curl, you must use the --data-binary flag instead of plain -d. + The latter doesn't preserve newlines. For example:

+
$ cat requests
+          { "index" : { "_index" : "test", "_id" : "1" } }
+          { "field1" : "value1" }
+          $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo
+          {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]}
+          
+

Optimistic concurrency control

+

Each index and delete action within a bulk API call may include the if_seq_no and if_primary_term parameters in their respective action and meta data lines. + The if_seq_no and if_primary_term parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details.

+

Versioning

+

Each bulk item can include the version value using the version field. + It automatically follows the behavior of the index or delete operation based on the _version mapping. + It also support the version_type.

+

Routing

+

Each bulk item can include the routing value using the routing field. + It automatically follows the behavior of the index or delete operation based on the _routing mapping.

+

NOTE: Data streams do not support custom routing unless they were created with the allow_custom_routing setting enabled in the template.

+

Wait for active shards

+

When making bulk calls, you can set the wait_for_active_shards parameter to require a minimum number of shard copies to be active before starting to process the bulk request.

+

Refresh

+

Control when the changes made by this request are visible to search.

+

NOTE: Only the shards that receive the bulk request will be affected by refresh. + Imagine a _bulk?refresh=wait_for request with three documents in it that happen to be routed to different shards in an index with five shards. + The request will only wait for those three shards to refresh. + The other two shards that make up the index do not participate in the _bulk request at all.

+ ``_ @@ -837,8 +843,11 @@ def clear_scroll( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear a scrolling search. Clear the search context and results for a scrolling - search. + .. raw:: html + +

Clear a scrolling search. + Clear the search context and results for a scrolling search.

+ ``_ @@ -888,11 +897,14 @@ def close_point_in_time( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Close a point in time. A point in time must be opened explicitly before being - used in search requests. The `keep_alive` parameter tells Elasticsearch how long - it should persist. A point in time is automatically closed when the `keep_alive` - period has elapsed. However, keeping points in time has a cost; close them as - soon as they are no longer required for search requests. + .. raw:: html + +

Close a point in time. + A point in time must be opened explicitly before being used in search requests. + The keep_alive parameter tells Elasticsearch how long it should persist. + A point in time is automatically closed when the keep_alive period has elapsed. + However, keeping points in time has a cost; close them as soon as they are no longer required for search requests.

+ ``_ @@ -966,14 +978,17 @@ def count( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Count search results. Get the number of documents matching a query. The query - can either be provided using a simple query string as a parameter or using the - Query DSL defined within the request body. The latter must be nested in a `query` - key, which is the same as the search API. The count API supports multi-target - syntax. You can run a single count API search across multiple data streams and - indices. The operation is broadcast across all shards. For each shard ID group, - a replica is chosen and the search is run against it. This means that replicas - increase the scalability of the count. + .. raw:: html + +

Count search results. + Get the number of documents matching a query.

+

The query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body. + The latter must be nested in a query key, which is the same as the search API.

+

The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.

+

The operation is broadcast across all shards. + For each shard ID group, a replica is chosen and the search is run against it. + This means that replicas increase the scalability of the count.

+ ``_ @@ -1115,80 +1130,61 @@ def create( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a new document in the index. You can index a new JSON document with the - `//_doc/` or `//_create/<_id>` APIs Using `_create` guarantees - that the document is indexed only if it does not already exist. It returns a - 409 response when a document with a same ID already exists in the index. To update - an existing document, you must use the `//_doc/` API. If the Elasticsearch - security features are enabled, you must have the following index privileges for - the target data stream, index, or index alias: * To add a document using the - `PUT //_create/<_id>` or `POST //_create/<_id>` request formats, - you must have the `create_doc`, `create`, `index`, or `write` index privilege. - * To automatically create a data stream or index with this API request, you must - have the `auto_configure`, `create_index`, or `manage` index privilege. Automatic - data stream creation requires a matching index template with data stream enabled. - **Automatically create data streams and indices** If the request's target doesn't - exist and matches an index template with a `data_stream` definition, the index - operation automatically creates the data stream. If the target doesn't exist - and doesn't match a data stream template, the operation automatically creates - the index and applies any matching index templates. NOTE: Elasticsearch includes - several built-in index templates. To avoid naming collisions with these templates, - refer to index pattern documentation. If no mapping exists, the index operation - creates a dynamic mapping. By default, new fields and objects are automatically - added to the mapping if needed. Automatic index creation is controlled by the - `action.auto_create_index` setting. If it is `true`, any index can be created - automatically. You can modify this setting to explicitly allow or block automatic - creation of indices that match specified patterns or set it to `false` to turn - off automatic index creation entirely. Specify a comma-separated list of patterns - you want to allow or prefix each pattern with `+` or `-` to indicate whether - it should be allowed or blocked. When a list is specified, the default behaviour - is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic - creation of indices only. It does not affect the creation of data streams. **Routing** - By default, shard placement — or routing — is controlled by using a hash of the - document's ID value. For more explicit control, the value fed into the hash function - used by the router can be directly specified on a per-operation basis using the - `routing` parameter. When setting up explicit mapping, you can also use the `_routing` - field to direct the index operation to extract the routing value from the document - itself. This does come at the (very minimal) cost of an additional document parsing - pass. If the `_routing` mapping is defined and set to be required, the index - operation will fail if no routing value is provided or extracted. NOTE: Data - streams do not support custom routing unless they were created with the `allow_custom_routing` - setting enabled in the template. **Distributed** The index operation is directed - to the primary shard based on its route and performed on the actual node containing - this shard. After the primary shard completes the operation, if needed, the update - is distributed to applicable replicas. **Active shards** To improve the resiliency - of writes to the system, indexing operations can be configured to wait for a - certain number of active shard copies before proceeding with the operation. If - the requisite number of active shard copies are not available, then the write - operation must wait and retry, until either the requisite shard copies have started - or a timeout occurs. By default, write operations only wait for the primary shards - to be active before proceeding (that is to say `wait_for_active_shards` is `1`). - This default can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. - To alter this behavior per operation, use the `wait_for_active_shards request` - parameter. Valid values are all or any positive integer up to the total number - of configured copies per shard in the index (which is `number_of_replicas`+1). - Specifying a negative value or a number greater than the number of shard copies - will throw an error. For example, suppose you have a cluster of three nodes, - A, B, and C and you create an index index with the number of replicas set to - 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt - an indexing operation, by default the operation will only ensure the primary - copy of each shard is available before proceeding. This means that even if B - and C went down and A hosted the primary shard copies, the indexing operation - would still proceed with only one copy of the data. If `wait_for_active_shards` - is set on the request to `3` (and all three nodes are up), the indexing operation - will require 3 active shard copies before proceeding. This requirement should - be met because there are 3 active nodes in the cluster, each one holding a copy - of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, - which is the same in this situation), the indexing operation will not proceed - as you do not have all 4 copies of each shard active in the index. The operation - will timeout unless a new node is brought up in the cluster to host the fourth - copy of the shard. It is important to note that this setting greatly reduces - the chances of the write operation not writing to the requisite number of shard - copies, but it does not completely eliminate the possibility, because this check - occurs before the write operation starts. After the write operation is underway, - it is still possible for replication to fail on any number of shard copies but - still succeed on the primary. The `_shards` section of the API response reveals - the number of shard copies on which replication succeeded and failed. + .. raw:: html + +

Create a new document in the index.

+

You can index a new JSON document with the /<target>/_doc/ or /<target>/_create/<_id> APIs + Using _create guarantees that the document is indexed only if it does not already exist. + It returns a 409 response when a document with a same ID already exists in the index. + To update an existing document, you must use the /<target>/_doc/ API.

+

If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:

+
    +
  • To add a document using the PUT /<target>/_create/<_id> or POST /<target>/_create/<_id> request formats, you must have the create_doc, create, index, or write index privilege.
  • +
  • To automatically create a data stream or index with this API request, you must have the auto_configure, create_index, or manage index privilege.
  • +
+

Automatic data stream creation requires a matching index template with data stream enabled.

+

Automatically create data streams and indices

+

If the request's target doesn't exist and matches an index template with a data_stream definition, the index operation automatically creates the data stream.

+

If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.

+

NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.

+

If no mapping exists, the index operation creates a dynamic mapping. + By default, new fields and objects are automatically added to the mapping if needed.

+

Automatic index creation is controlled by the action.auto_create_index setting. + If it is true, any index can be created automatically. + You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to false to turn off automatic index creation entirely. + Specify a comma-separated list of patterns you want to allow or prefix each pattern with + or - to indicate whether it should be allowed or blocked. + When a list is specified, the default behaviour is to disallow.

+

NOTE: The action.auto_create_index setting affects the automatic creation of indices only. + It does not affect the creation of data streams.

+

Routing

+

By default, shard placement — or routing — is controlled by using a hash of the document's ID value. + For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the routing parameter.

+

When setting up explicit mapping, you can also use the _routing field to direct the index operation to extract the routing value from the document itself. + This does come at the (very minimal) cost of an additional document parsing pass. + If the _routing mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.

+

NOTE: Data streams do not support custom routing unless they were created with the allow_custom_routing setting enabled in the template.

+

Distributed

+

The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. + After the primary shard completes the operation, if needed, the update is distributed to applicable replicas.

+

Active shards

+

To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. + If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. + By default, write operations only wait for the primary shards to be active before proceeding (that is to say wait_for_active_shards is 1). + This default can be overridden in the index settings dynamically by setting index.write.wait_for_active_shards. + To alter this behavior per operation, use the wait_for_active_shards request parameter.

+

Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is number_of_replicas+1). + Specifying a negative value or a number greater than the number of shard copies will throw an error.

+

For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). + If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. + This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. + If wait_for_active_shards is set on the request to 3 (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. + This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. + However, if you set wait_for_active_shards to all (or to 4, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. + The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.

+

It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. + After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. + The _shards section of the API response reveals the number of shard copies on which replication succeeded and failed.

+ ``_ @@ -1302,30 +1298,33 @@ def delete( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a document. Remove a JSON document from the specified index. NOTE: You - cannot send deletion requests directly to a data stream. To delete a document - in a data stream, you must target the backing index containing the document. - **Optimistic concurrency control** Delete operations can be made conditional - and only be performed if the last modification to the document was assigned the - sequence number and primary term specified by the `if_seq_no` and `if_primary_term` - parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` - and a status code of `409`. **Versioning** Each document indexed is versioned. - When deleting a document, the version can be specified to make sure the relevant - document you are trying to delete is actually being deleted and it has not changed - in the meantime. Every write operation run on a document, deletes included, causes - its version to be incremented. The version number of a deleted document remains - available for a short time after deletion to allow for control of concurrent - operations. The length of time for which a deleted document's version remains - available is determined by the `index.gc_deletes` index setting. **Routing** - If routing is used during indexing, the routing value also needs to be specified - to delete a document. If the `_routing` mapping is set to `required` and no routing - value is specified, the delete API throws a `RoutingMissingException` and rejects - the request. For example: ``` DELETE /my-index-000001/_doc/1?routing=shard-1 - ``` This request deletes the document with ID 1, but it is routed based on the - user. The document is not deleted if the correct routing is not specified. **Distributed** - The delete operation gets hashed into a specific shard ID. It then gets redirected - into the primary shard within that ID group and replicated (if needed) to shard - replicas within that ID group. + .. raw:: html + +

Delete a document.

+

Remove a JSON document from the specified index.

+

NOTE: You cannot send deletion requests directly to a data stream. + To delete a document in a data stream, you must target the backing index containing the document.

+

Optimistic concurrency control

+

Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the if_seq_no and if_primary_term parameters. + If a mismatch is detected, the operation will result in a VersionConflictException and a status code of 409.

+

Versioning

+

Each document indexed is versioned. + When deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime. + Every write operation run on a document, deletes included, causes its version to be incremented. + The version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations. + The length of time for which a deleted document's version remains available is determined by the index.gc_deletes index setting.

+

Routing

+

If routing is used during indexing, the routing value also needs to be specified to delete a document.

+

If the _routing mapping is set to required and no routing value is specified, the delete API throws a RoutingMissingException and rejects the request.

+

For example:

+
DELETE /my-index-000001/_doc/1?routing=shard-1
+          
+

This request deletes the document with ID 1, but it is routed based on the user. + The document is not deleted if the correct routing is not specified.

+

Distributed

+

The delete operation gets hashed into a specific shard ID. + It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group.

+ ``_ @@ -1452,7 +1451,11 @@ def delete_by_query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete documents. Deletes documents that match the specified query. + .. raw:: html + +

Delete documents. + Deletes documents that match the specified query.

+ ``_ @@ -1630,10 +1633,12 @@ def delete_by_query_rethrottle( requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ - Throttle a delete by query operation. Change the number of requests per second - for a particular delete by query operation. Rethrottling that speeds up the query - takes effect immediately but rethrotting that slows down the query takes effect - after completing the current batch to prevent scroll timeouts. + .. raw:: html + +

Throttle a delete by query operation.

+

Change the number of requests per second for a particular delete by query operation. + Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts.

+ ``_ @@ -1679,7 +1684,11 @@ def delete_script( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a script or search template. Deletes a stored script or search template. + .. raw:: html + +

Delete a script or search template. + Deletes a stored script or search template.

+ ``_ @@ -1747,15 +1756,21 @@ def exists( ] = None, ) -> HeadApiResponse: """ - Check a document. Verify that a document exists. For example, check to see if - a document with the `_id` 0 exists: ``` HEAD my-index-000001/_doc/0 ``` If the - document exists, the API returns a status code of `200 - OK`. If the document - doesn’t exist, the API returns `404 - Not Found`. **Versioning support** You - can use the `version` parameter to check the document only if its current version - is equal to the specified one. Internally, Elasticsearch has marked the old document - as deleted and added an entirely new document. The old version of the document - doesn't disappear immediately, although you won't be able to access it. Elasticsearch - cleans up deleted documents in the background as you continue to index more data. + .. raw:: html + +

Check a document.

+

Verify that a document exists. + For example, check to see if a document with the _id 0 exists:

+
HEAD my-index-000001/_doc/0
+          
+

If the document exists, the API returns a status code of 200 - OK. + If the document doesn’t exist, the API returns 404 - Not Found.

+

Versioning support

+

You can use the version parameter to check the document only if its current version is equal to the specified one.

+

Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. + The old version of the document doesn't disappear immediately, although you won't be able to access it. + Elasticsearch cleans up deleted documents in the background as you continue to index more data.

+ ``_ @@ -1870,9 +1885,15 @@ def exists_source( ] = None, ) -> HeadApiResponse: """ - Check for a document source. Check whether a document source exists in an index. - For example: ``` HEAD my-index-000001/_source/1 ``` A document's source is not - available if it is disabled in the mapping. + .. raw:: html + +

Check for a document source.

+

Check whether a document source exists in an index. + For example:

+
HEAD my-index-000001/_source/1
+          
+

A document's source is not available if it is disabled in the mapping.

+ ``_ @@ -1973,8 +1994,11 @@ def explain( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Explain a document match result. Returns information about why a specific document - matches, or doesn’t match, a query. + .. raw:: html + +

Explain a document match result. + Returns information about why a specific document matches, or doesn’t match, a query.

+ ``_ @@ -2093,11 +2117,14 @@ def field_caps( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the field capabilities. Get information about the capabilities of fields - among multiple indices. For data streams, the API returns field capabilities - among the stream’s backing indices. It returns runtime fields like any other - field. For example, a runtime field with a type of keyword is returned the same - as any other field that belongs to the `keyword` family. + .. raw:: html + +

Get the field capabilities.

+

Get information about the capabilities of fields among multiple indices.

+

For data streams, the API returns field capabilities among the stream’s backing indices. + It returns runtime fields like any other field. + For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the keyword family.

+ ``_ @@ -2213,36 +2240,45 @@ def get( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a document by its ID. Get a document and its source or stored fields from - an index. By default, this API is realtime and is not affected by the refresh - rate of the index (when data will become visible for search). In the case where - stored fields are requested with the `stored_fields` parameter and the document - has been updated but is not yet refreshed, the API will have to parse and analyze - the source to extract the stored fields. To turn off realtime behavior, set the - `realtime` parameter to false. **Source filtering** By default, the API returns - the contents of the `_source` field unless you have used the `stored_fields` - parameter or the `_source` field is turned off. You can turn off `_source` retrieval - by using the `_source` parameter: ``` GET my-index-000001/_doc/0?_source=false - ``` If you only need one or two fields from the `_source`, use the `_source_includes` - or `_source_excludes` parameters to include or filter out particular fields. - This can be helpful with large documents where partial retrieval can save on - network overhead Both parameters take a comma separated list of fields or wildcard - expressions. For example: ``` GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities - ``` If you only want to specify includes, you can use a shorter notation: ``` - GET my-index-000001/_doc/0?_source=*.id ``` **Routing** If routing is used during - indexing, the routing value also needs to be specified to retrieve a document. - For example: ``` GET my-index-000001/_doc/2?routing=user1 ``` This request gets - the document with ID 2, but it is routed based on the user. The document is not - fetched if the correct routing is not specified. **Distributed** The GET operation - is hashed into a specific shard ID. It is then redirected to one of the replicas - within that shard ID and returns the result. The replicas are the primary shard - and its replicas within that shard ID group. This means that the more replicas - you have, the better your GET scaling will be. **Versioning support** You can - use the `version` parameter to retrieve the document only if its current version - is equal to the specified one. Internally, Elasticsearch has marked the old document - as deleted and added an entirely new document. The old version of the document - doesn't disappear immediately, although you won't be able to access it. Elasticsearch - cleans up deleted documents in the background as you continue to index more data. + .. raw:: html + +

Get a document by its ID.

+

Get a document and its source or stored fields from an index.

+

By default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search). + In the case where stored fields are requested with the stored_fields parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields. + To turn off realtime behavior, set the realtime parameter to false.

+

Source filtering

+

By default, the API returns the contents of the _source field unless you have used the stored_fields parameter or the _source field is turned off. + You can turn off _source retrieval by using the _source parameter:

+
GET my-index-000001/_doc/0?_source=false
+          
+

If you only need one or two fields from the _source, use the _source_includes or _source_excludes parameters to include or filter out particular fields. + This can be helpful with large documents where partial retrieval can save on network overhead + Both parameters take a comma separated list of fields or wildcard expressions. + For example:

+
GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities
+          
+

If you only want to specify includes, you can use a shorter notation:

+
GET my-index-000001/_doc/0?_source=*.id
+          
+

Routing

+

If routing is used during indexing, the routing value also needs to be specified to retrieve a document. + For example:

+
GET my-index-000001/_doc/2?routing=user1
+          
+

This request gets the document with ID 2, but it is routed based on the user. + The document is not fetched if the correct routing is not specified.

+

Distributed

+

The GET operation is hashed into a specific shard ID. + It is then redirected to one of the replicas within that shard ID and returns the result. + The replicas are the primary shard and its replicas within that shard ID group. + This means that the more replicas you have, the better your GET scaling will be.

+

Versioning support

+

You can use the version parameter to retrieve the document only if its current version is equal to the specified one.

+

Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. + The old version of the document doesn't disappear immediately, although you won't be able to access it. + Elasticsearch cleans up deleted documents in the background as you continue to index more data.

+ ``_ @@ -2345,7 +2381,11 @@ def get_script( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a script or search template. Retrieves a stored script or search template. + .. raw:: html + +

Get a script or search template. + Retrieves a stored script or search template.

+ ``_ @@ -2387,7 +2427,11 @@ def get_script_context( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get script contexts. Get a list of supported script contexts and their methods. + .. raw:: html + +

Get script contexts.

+

Get a list of supported script contexts and their methods.

+ ``_ """ @@ -2422,7 +2466,11 @@ def get_script_languages( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get script languages. Get a list of available script types, languages, and contexts. + .. raw:: html + +

Get script languages.

+

Get a list of available script types, languages, and contexts.

+ ``_ """ @@ -2477,10 +2525,17 @@ def get_source( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a document's source. Get the source of a document. For example: ``` GET my-index-000001/_source/1 - ``` You can use the source filtering parameters to control which parts of the - `_source` are returned: ``` GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities - ``` + .. raw:: html + +

Get a document's source.

+

Get the source of a document. + For example:

+
GET my-index-000001/_source/1
+          
+

You can use the source filtering parameters to control which parts of the _source are returned:

+
GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities
+          
+ ``_ @@ -2565,26 +2620,22 @@ def health_report( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the cluster health. Get a report with the health status of an Elasticsearch - cluster. The report contains a list of indicators that compose Elasticsearch - functionality. Each indicator has a health status of: green, unknown, yellow - or red. The indicator will provide an explanation and metadata describing the - reason for its current health status. The cluster’s status is controlled by the - worst indicator status. In the event that an indicator’s status is non-green, - a list of impacts may be present in the indicator result which detail the functionalities - that are negatively affected by the health issue. Each impact carries with it - a severity level, an area of the system that is affected, and a simple description - of the impact on the system. Some health indicators can determine the root cause - of a health problem and prescribe a set of steps that can be performed in order - to improve the health of the system. The root cause and remediation steps are - encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause - analysis, an action containing a brief description of the steps to take to fix - the problem, the list of affected resources (if applicable), and a detailed step-by-step - troubleshooting guide to fix the diagnosed problem. NOTE: The health indicators - perform root cause analysis of non-green health statuses. This can be computationally - expensive when called frequently. When setting up automated polling of the API - for health status, set verbose to false to disable the more expensive analysis - logic. + .. raw:: html + +

Get the cluster health. + Get a report with the health status of an Elasticsearch cluster. + The report contains a list of indicators that compose Elasticsearch functionality.

+

Each indicator has a health status of: green, unknown, yellow or red. + The indicator will provide an explanation and metadata describing the reason for its current health status.

+

The cluster’s status is controlled by the worst indicator status.

+

In the event that an indicator’s status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue. + Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system.

+

Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system. + The root cause and remediation steps are encapsulated in a diagnosis. + A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed step-by-step troubleshooting guide to fix the diagnosed problem.

+

NOTE: The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. + When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic.

+ ``_ @@ -2659,120 +2710,96 @@ def index( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a document in an index. Add a JSON document to the specified - data stream or index and make it searchable. If the target is an index and the - document already exists, the request updates the document and increments its - version. NOTE: You cannot use this API to send update requests for existing documents - in a data stream. If the Elasticsearch security features are enabled, you must - have the following index privileges for the target data stream, index, or index - alias: * To add or overwrite a document using the `PUT //_doc/<_id>` - request format, you must have the `create`, `index`, or `write` index privilege. - * To add a document using the `POST //_doc/` request format, you must - have the `create_doc`, `create`, `index`, or `write` index privilege. * To automatically - create a data stream or index with this API request, you must have the `auto_configure`, - `create_index`, or `manage` index privilege. Automatic data stream creation requires - a matching index template with data stream enabled. NOTE: Replica shards might - not all be started when an indexing operation returns successfully. By default, - only the primary is required. Set `wait_for_active_shards` to change this default - behavior. **Automatically create data streams and indices** If the request's - target doesn't exist and matches an index template with a `data_stream` definition, - the index operation automatically creates the data stream. If the target doesn't - exist and doesn't match a data stream template, the operation automatically creates - the index and applies any matching index templates. NOTE: Elasticsearch includes - several built-in index templates. To avoid naming collisions with these templates, - refer to index pattern documentation. If no mapping exists, the index operation - creates a dynamic mapping. By default, new fields and objects are automatically - added to the mapping if needed. Automatic index creation is controlled by the - `action.auto_create_index` setting. If it is `true`, any index can be created - automatically. You can modify this setting to explicitly allow or block automatic - creation of indices that match specified patterns or set it to `false` to turn - off automatic index creation entirely. Specify a comma-separated list of patterns - you want to allow or prefix each pattern with `+` or `-` to indicate whether - it should be allowed or blocked. When a list is specified, the default behaviour - is to disallow. NOTE: The `action.auto_create_index` setting affects the automatic - creation of indices only. It does not affect the creation of data streams. **Optimistic - concurrency control** Index operations can be made conditional and only be performed - if the last modification to the document was assigned the sequence number and - primary term specified by the `if_seq_no` and `if_primary_term` parameters. If - a mismatch is detected, the operation will result in a `VersionConflictException` - and a status code of `409`. **Routing** By default, shard placement — or routing - — is controlled by using a hash of the document's ID value. For more explicit - control, the value fed into the hash function used by the router can be directly - specified on a per-operation basis using the `routing` parameter. When setting - up explicit mapping, you can also use the `_routing` field to direct the index - operation to extract the routing value from the document itself. This does come - at the (very minimal) cost of an additional document parsing pass. If the `_routing` - mapping is defined and set to be required, the index operation will fail if no - routing value is provided or extracted. NOTE: Data streams do not support custom - routing unless they were created with the `allow_custom_routing` setting enabled - in the template. **Distributed** The index operation is directed to the primary - shard based on its route and performed on the actual node containing this shard. - After the primary shard completes the operation, if needed, the update is distributed - to applicable replicas. **Active shards** To improve the resiliency of writes - to the system, indexing operations can be configured to wait for a certain number - of active shard copies before proceeding with the operation. If the requisite - number of active shard copies are not available, then the write operation must - wait and retry, until either the requisite shard copies have started or a timeout - occurs. By default, write operations only wait for the primary shards to be active - before proceeding (that is to say `wait_for_active_shards` is `1`). This default - can be overridden in the index settings dynamically by setting `index.write.wait_for_active_shards`. - To alter this behavior per operation, use the `wait_for_active_shards request` - parameter. Valid values are all or any positive integer up to the total number - of configured copies per shard in the index (which is `number_of_replicas`+1). - Specifying a negative value or a number greater than the number of shard copies - will throw an error. For example, suppose you have a cluster of three nodes, - A, B, and C and you create an index index with the number of replicas set to - 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt - an indexing operation, by default the operation will only ensure the primary - copy of each shard is available before proceeding. This means that even if B - and C went down and A hosted the primary shard copies, the indexing operation - would still proceed with only one copy of the data. If `wait_for_active_shards` - is set on the request to `3` (and all three nodes are up), the indexing operation - will require 3 active shard copies before proceeding. This requirement should - be met because there are 3 active nodes in the cluster, each one holding a copy - of the shard. However, if you set `wait_for_active_shards` to `all` (or to `4`, - which is the same in this situation), the indexing operation will not proceed - as you do not have all 4 copies of each shard active in the index. The operation - will timeout unless a new node is brought up in the cluster to host the fourth - copy of the shard. It is important to note that this setting greatly reduces - the chances of the write operation not writing to the requisite number of shard - copies, but it does not completely eliminate the possibility, because this check - occurs before the write operation starts. After the write operation is underway, - it is still possible for replication to fail on any number of shard copies but - still succeed on the primary. The `_shards` section of the API response reveals - the number of shard copies on which replication succeeded and failed. **No operation - (noop) updates** When updating a document by using this API, a new version of - the document is always created even if the document hasn't changed. If this isn't - acceptable use the `_update` API with `detect_noop` set to `true`. The `detect_noop` - option isn't available on this API because it doesn’t fetch the old source and - isn't able to compare it against the new source. There isn't a definitive rule - for when noop updates aren't acceptable. It's a combination of lots of factors - like how frequently your data source sends updates that are actually noops and - how many queries per second Elasticsearch runs on the shard receiving the updates. - **Versioning** Each indexed document is given a version number. By default, internal - versioning is used that starts at 1 and increments with each update, deletes - included. Optionally, the version number can be set to an external value (for - example, if maintained in a database). To enable this functionality, `version_type` - should be set to `external`. The value provided must be a numeric, long value - greater than or equal to 0, and less than around `9.2e+18`. NOTE: Versioning - is completely real time, and is not affected by the near real time aspects of - search operations. If no version is provided, the operation runs without any - version checks. When using the external version type, the system checks to see - if the version number passed to the index request is greater than the version - of the currently stored document. If true, the document will be indexed and the - new version number used. If the value provided is less than or equal to the stored - document's version number, a version conflict will occur and the index operation - will fail. For example: ``` PUT my-index-000001/_doc/1?version=2&version_type=external - { "user": { "id": "elkbee" } } In this example, the operation will succeed since - the supplied version of 2 is higher than the current document version of 1. If - the document was already updated and its version was set to 2 or higher, the - indexing command will fail and result in a conflict (409 HTTP status code). A - nice side effect is that there is no need to maintain strict ordering of async - indexing operations run as a result of changes to a source database, as long - as version numbers from the source database are used. Even the simple case of - updating the Elasticsearch index using data from a database is simplified if - external versioning is used, as only the latest version will be used if the index - operations arrive out of order. + .. raw:: html + +

Create or update a document in an index.

+

Add a JSON document to the specified data stream or index and make it searchable. + If the target is an index and the document already exists, the request updates the document and increments its version.

+

NOTE: You cannot use this API to send update requests for existing documents in a data stream.

+

If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:

+
    +
  • To add or overwrite a document using the PUT /<target>/_doc/<_id> request format, you must have the create, index, or write index privilege.
  • +
  • To add a document using the POST /<target>/_doc/ request format, you must have the create_doc, create, index, or write index privilege.
  • +
  • To automatically create a data stream or index with this API request, you must have the auto_configure, create_index, or manage index privilege.
  • +
+

Automatic data stream creation requires a matching index template with data stream enabled.

+

NOTE: Replica shards might not all be started when an indexing operation returns successfully. + By default, only the primary is required. Set wait_for_active_shards to change this default behavior.

+

Automatically create data streams and indices

+

If the request's target doesn't exist and matches an index template with a data_stream definition, the index operation automatically creates the data stream.

+

If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.

+

NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.

+

If no mapping exists, the index operation creates a dynamic mapping. + By default, new fields and objects are automatically added to the mapping if needed.

+

Automatic index creation is controlled by the action.auto_create_index setting. + If it is true, any index can be created automatically. + You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to false to turn off automatic index creation entirely. + Specify a comma-separated list of patterns you want to allow or prefix each pattern with + or - to indicate whether it should be allowed or blocked. + When a list is specified, the default behaviour is to disallow.

+

NOTE: The action.auto_create_index setting affects the automatic creation of indices only. + It does not affect the creation of data streams.

+

Optimistic concurrency control

+

Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the if_seq_no and if_primary_term parameters. + If a mismatch is detected, the operation will result in a VersionConflictException and a status code of 409.

+

Routing

+

By default, shard placement — or routing — is controlled by using a hash of the document's ID value. + For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the routing parameter.

+

When setting up explicit mapping, you can also use the _routing field to direct the index operation to extract the routing value from the document itself. + This does come at the (very minimal) cost of an additional document parsing pass. + If the _routing mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.

+

NOTE: Data streams do not support custom routing unless they were created with the allow_custom_routing setting enabled in the template.

+

Distributed

+

The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. + After the primary shard completes the operation, if needed, the update is distributed to applicable replicas.

+

Active shards

+

To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. + If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. + By default, write operations only wait for the primary shards to be active before proceeding (that is to say wait_for_active_shards is 1). + This default can be overridden in the index settings dynamically by setting index.write.wait_for_active_shards. + To alter this behavior per operation, use the wait_for_active_shards request parameter.

+

Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is number_of_replicas+1). + Specifying a negative value or a number greater than the number of shard copies will throw an error.

+

For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). + If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. + This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. + If wait_for_active_shards is set on the request to 3 (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. + This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. + However, if you set wait_for_active_shards to all (or to 4, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. + The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.

+

It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. + After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. + The _shards section of the API response reveals the number of shard copies on which replication succeeded and failed.

+

No operation (noop) updates

+

When updating a document by using this API, a new version of the document is always created even if the document hasn't changed. + If this isn't acceptable use the _update API with detect_noop set to true. + The detect_noop option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.

+

There isn't a definitive rule for when noop updates aren't acceptable. + It's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.

+

Versioning

+

Each indexed document is given a version number. + By default, internal versioning is used that starts at 1 and increments with each update, deletes included. + Optionally, the version number can be set to an external value (for example, if maintained in a database). + To enable this functionality, version_type should be set to external. + The value provided must be a numeric, long value greater than or equal to 0, and less than around 9.2e+18.

+

NOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations. + If no version is provided, the operation runs without any version checks.

+

When using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document. + If true, the document will be indexed and the new version number used. + If the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:

+
PUT my-index-000001/_doc/1?version=2&version_type=external
+          {
+            "user": {
+              "id": "elkbee"
+            }
+          }
+
+          In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.
+          If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).
+
+          A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.
+          Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.
+          
+ ``_ @@ -2896,7 +2923,11 @@ def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get cluster info. Get basic build, version, and cluster information. + .. raw:: html + +

Get cluster info. + Get basic build, version, and cluster information.

+ ``_ """ @@ -2953,15 +2984,18 @@ def knn_search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Run a knn search. NOTE: The kNN search API has been replaced by the `knn` option - in the search API. Perform a k-nearest neighbor (kNN) search on a dense_vector - field and return the matching documents. Given a query vector, the API finds - the k closest vectors and returns those documents as search hits. Elasticsearch - uses the HNSW algorithm to support efficient kNN search. Like most kNN algorithms, - HNSW is an approximate method that sacrifices result accuracy for improved search - speed. This means the results returned are not always the true k closest neighbors. - The kNN search API supports restricting the search using a filter. The search - will return the top k documents that also match the filter query. + .. raw:: html + +

Run a knn search.

+

NOTE: The kNN search API has been replaced by the knn option in the search API.

+

Perform a k-nearest neighbor (kNN) search on a dense_vector field and return the matching documents. + Given a query vector, the API finds the k closest vectors and returns those documents as search hits.

+

Elasticsearch uses the HNSW algorithm to support efficient kNN search. + Like most kNN algorithms, HNSW is an approximate method that sacrifices result accuracy for improved search speed. + This means the results returned are not always the true k closest neighbors.

+

The kNN search API supports restricting the search using a filter. + The search will return the top k documents that also match the filter query.

+ ``_ @@ -3062,10 +3096,13 @@ def mget( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get multiple documents. Get multiple JSON documents by ID from one or more indices. - If you specify an index in the request URI, you only need to specify the document - IDs in the request body. To ensure fast responses, this multi get (mget) API - responds with partial results if one or more shards fail. + .. raw:: html + +

Get multiple documents.

+

Get multiple JSON documents by ID from one or more indices. + If you specify an index in the request URI, you only need to specify the document IDs in the request body. + To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.

+ ``_ @@ -3186,13 +3223,21 @@ def msearch( typed_keys: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Run multiple searches. The format of the request is similar to the bulk API format - and makes use of the newline delimited JSON (NDJSON) format. The structure is - as follows: ``` header\\n body\\n header\\n body\\n ``` This structure is specifically - optimized to reduce parsing if a specific search ends up redirected to another - node. IMPORTANT: The final line of data must end with a newline character `\\n`. - Each newline character may be preceded by a carriage return `\\r`. When sending - requests to this endpoint the `Content-Type` header should be set to `application/x-ndjson`. + .. raw:: html + +

Run multiple searches.

+

The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. + The structure is as follows:

+
header\\n
+          body\\n
+          header\\n
+          body\\n
+          
+

This structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node.

+

IMPORTANT: The final line of data must end with a newline character \\n. + Each newline character may be preceded by a carriage return \\r. + When sending requests to this endpoint the Content-Type header should be set to application/x-ndjson.

+ ``_ @@ -3324,7 +3369,10 @@ def msearch_template( typed_keys: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Run multiple templated searches. + .. raw:: html + +

Run multiple templated searches.

+ ``_ @@ -3419,11 +3467,14 @@ def mtermvectors( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get multiple term vectors. You can specify existing documents by index and ID - or provide artificial documents in the body of the request. You can specify the - index in the request body or request URI. The response contains a `docs` array - with all the fetched termvectors. Each element has the structure provided by - the termvectors API. + .. raw:: html + +

Get multiple term vectors.

+

You can specify existing documents by index and ID or provide artificial documents in the body of the request. + You can specify the index in the request body or request URI. + The response contains a docs array with all the fetched termvectors. + Each element has the structure provided by the termvectors API.

+ ``_ @@ -3535,15 +3586,18 @@ def open_point_in_time( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Open a point in time. A search request by default runs against the most recent - visible data of the target indices, which is called point in time. Elasticsearch - pit (point in time) is a lightweight view into the state of the data as it existed - when initiated. In some cases, it’s preferred to perform multiple search requests - using the same point in time. For example, if refreshes happen between `search_after` - requests, then the results of those requests might not be consistent as changes - happening between searches are only visible to the more recent point in time. - A point in time must be opened explicitly before being used in search requests. - The `keep_alive` parameter tells Elasticsearch how long it should persist. + .. raw:: html + +

Open a point in time.

+

A search request by default runs against the most recent visible data of the target indices, + which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the + state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple + search requests using the same point in time. For example, if refreshes happen between + search_after requests, then the results of those requests might not be consistent as changes happening + between searches are only visible to the more recent point in time.

+

A point in time must be opened explicitly before being used in search requests. + The keep_alive parameter tells Elasticsearch how long it should persist.

+ ``_ @@ -3630,8 +3684,11 @@ def put_script( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a script or search template. Creates or updates a stored script - or search template. + .. raw:: html + +

Create or update a script or search template. + Creates or updates a stored script or search template.

+ ``_ @@ -3716,8 +3773,11 @@ def rank_eval( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Evaluate ranked search results. Evaluate the quality of ranked search results - over a set of typical search queries. + .. raw:: html + +

Evaluate ranked search results.

+

Evaluate the quality of ranked search results over a set of typical search queries.

+ ``_ @@ -3811,149 +3871,145 @@ def reindex( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reindex documents. Copy documents from a source to a destination. You can copy - all documents to the destination index or reindex a subset of the documents. - The source can be any existing index, alias, or data stream. The destination - must differ from the source. For example, you cannot reindex a data stream into - itself. IMPORTANT: Reindex requires `_source` to be enabled for all documents - in the source. The destination should be configured as wanted before calling - the reindex API. Reindex does not copy the settings from the source or its associated - template. Mappings, shard counts, and replicas, for example, must be configured - ahead of time. If the Elasticsearch security features are enabled, you must have - the following security privileges: * The `read` index privilege for the source - data stream, index, or alias. * The `write` index privilege for the destination - data stream, index, or index alias. * To automatically create a data stream or - index with a reindex API request, you must have the `auto_configure`, `create_index`, - or `manage` index privilege for the destination data stream, index, or alias. - * If reindexing from a remote cluster, the `source.remote.user` must have the - `monitor` cluster privilege and the `read` index privilege for the source data - stream, index, or alias. If reindexing from a remote cluster, you must explicitly - allow the remote host in the `reindex.remote.whitelist` setting. Automatic data - stream creation requires a matching index template with data stream enabled. - The `dest` element can be configured like the index API to control optimistic - concurrency control. Omitting `version_type` or setting it to `internal` causes - Elasticsearch to blindly dump documents into the destination, overwriting any - that happen to have the same ID. Setting `version_type` to `external` causes - Elasticsearch to preserve the `version` from the source, create any documents - that are missing, and update any documents that have an older version in the - destination than they do in the source. Setting `op_type` to `create` causes - the reindex API to create only missing documents in the destination. All existing - documents will cause a version conflict. IMPORTANT: Because data streams are - append-only, any reindex request to a destination data stream must have an `op_type` - of `create`. A reindex can only add new documents to a destination data stream. - It cannot update existing documents in a destination data stream. By default, - version conflicts abort the reindex process. To continue reindexing if there - are conflicts, set the `conflicts` request body property to `proceed`. In this - case, the response includes a count of the version conflicts that were encountered. - Note that the handling of other error types is unaffected by the `conflicts` - property. Additionally, if you opt to count version conflicts, the operation - could attempt to reindex more documents from the source than `max_docs` until - it has successfully indexed `max_docs` documents into the target or it has gone - through every document in the source query. NOTE: The reindex API makes no effort - to handle ID collisions. The last document written will "win" but the order isn't - usually predictable so it is not a good idea to rely on this behavior. Instead, - make sure that IDs are unique by using a script. **Running reindex asynchronously** - If the request contains `wait_for_completion=false`, Elasticsearch performs some - preflight checks, launches the request, and returns a task you can use to cancel - or get the status of the task. Elasticsearch creates a record of this task as - a document at `_tasks/`. **Reindex from multiple sources** If you have - many sources to reindex it is generally better to reindex them one at a time - rather than using a glob pattern to pick up multiple sources. That way you can - resume the process if there are any errors by removing the partially completed - source and starting over. It also makes parallelizing the process fairly simple: - split the list of sources to reindex and run each list in parallel. For example, - you can use a bash script like this: ``` for index in i1 i2 i3 i4 i5; do curl - -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{ "source": - { "index": "'$index'" }, "dest": { "index": "'$index'-reindexed" } }' done ``` - **Throttling** Set `requests_per_second` to any positive decimal number (`1.4`, - `6`, `1000`, for example) to throttle the rate at which reindex issues batches - of index operations. Requests are throttled by padding each batch with a wait - time. To turn off throttling, set `requests_per_second` to `-1`. The throttling - is done by waiting between batches so that the scroll that reindex uses internally - can be given a timeout that takes into account the padding. The padding time - is the difference between the batch size divided by the `requests_per_second` - and the time spent writing. By default the batch size is `1000`, so if `requests_per_second` - is set to `500`: ``` target_time = 1000 / 500 per second = 2 seconds wait_time - = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds ``` Since the - batch is issued as a single bulk request, large batch sizes cause Elasticsearch - to create many requests and then wait for a while before starting the next set. - This is "bursty" instead of "smooth". **Slicing** Reindex supports sliced scroll - to parallelize the reindexing process. This parallelization can improve efficiency - and provide a convenient way to break the request down into smaller parts. NOTE: - Reindexing from remote clusters does not support manual or automatic slicing. - You can slice a reindex request manually by providing a slice ID and total number - of slices to each request. You can also let reindex automatically parallelize - by using sliced scroll to slice on `_id`. The `slices` parameter specifies the - number of slices to use. Adding `slices` to the reindex request just automates - the manual process, creating sub-requests which means it has some quirks: * You - can see these requests in the tasks API. These sub-requests are "child" tasks - of the task for the request with slices. * Fetching the status of the task for - the request with `slices` only contains the status of completed slices. * These - sub-requests are individually addressable for things like cancellation and rethrottling. - * Rethrottling the request with `slices` will rethrottle the unfinished sub-request - proportionally. * Canceling the request with `slices` will cancel each sub-request. - * Due to the nature of `slices`, each sub-request won't get a perfectly even - portion of the documents. All documents will be addressed, but some slices may - be larger than others. Expect larger slices to have a more even distribution. - * Parameters like `requests_per_second` and `max_docs` on a request with `slices` - are distributed proportionally to each sub-request. Combine that with the previous - point about distribution being uneven and you should conclude that using `max_docs` - with `slices` might not result in exactly `max_docs` documents being reindexed. - * Each sub-request gets a slightly different snapshot of the source, though these - are all taken at approximately the same time. If slicing automatically, setting - `slices` to `auto` will choose a reasonable number for most indices. If slicing - manually or otherwise tuning automatic slicing, use the following guidelines. - Query performance is most efficient when the number of slices is equal to the - number of shards in the index. If that number is large (for example, `500`), - choose a lower number as too many slices will hurt performance. Setting slices - higher than the number of shards generally does not improve efficiency and adds - overhead. Indexing performance scales linearly across available resources with - the number of slices. Whether query or indexing performance dominates the runtime - depends on the documents being reindexed and cluster resources. **Modify documents - during reindexing** Like `_update_by_query`, reindex operations support a script - that modifies the document. Unlike `_update_by_query`, the script is allowed - to modify the document's metadata. Just as in `_update_by_query`, you can set - `ctx.op` to change the operation that is run on the destination. For example, - set `ctx.op` to `noop` if your script decides that the document doesn’t have - to be indexed in the destination. This "no operation" will be reported in the - `noop` counter in the response body. Set `ctx.op` to `delete` if your script - decides that the document must be deleted from the destination. The deletion - will be reported in the `deleted` counter in the response body. Setting `ctx.op` - to anything else will return an error, as will setting any other field in `ctx`. - Think of the possibilities! Just be careful; you are able to change: * `_id` - * `_index` * `_version` * `_routing` Setting `_version` to `null` or clearing - it from the `ctx` map is just like not sending the version in an indexing request. - It will cause the document to be overwritten in the destination regardless of - the version on the target or the version type you use in the reindex API. **Reindex - from remote** Reindex supports reindexing from a remote Elasticsearch cluster. - The `host` parameter must contain a scheme, host, port, and optional path. The - `username` and `password` parameters are optional and when they are present the - reindex operation will connect to the remote Elasticsearch node using basic authentication. - Be sure to use HTTPS when using basic authentication or the password will be - sent in plain text. There are a range of settings available to configure the - behavior of the HTTPS connection. When using Elastic Cloud, it is also possible - to authenticate against the remote cluster through the use of a valid API key. - Remote hosts must be explicitly allowed with the `reindex.remote.whitelist` setting. - It can be set to a comma delimited list of allowed remote host and port combinations. - Scheme is ignored; only the host and port are used. For example: ``` reindex.remote.whitelist: - [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"] ``` The list of - allowed hosts must be configured on any nodes that will coordinate the reindex. - This feature should work with remote clusters of any version of Elasticsearch. - This should enable you to upgrade from any version of Elasticsearch to the current - version by reindexing from a cluster of the old version. WARNING: Elasticsearch - does not support forward compatibility across major versions. For example, you - cannot reindex from a 7.x cluster into a 6.x cluster. To enable queries sent - to older versions of Elasticsearch, the `query` parameter is sent directly to - the remote host without validation or modification. NOTE: Reindexing from remote - clusters does not support manual or automatic slicing. Reindexing from a remote - server uses an on-heap buffer that defaults to a maximum size of 100mb. If the - remote index includes very large documents you'll need to use a smaller batch - size. It is also possible to set the socket read timeout on the remote connection - with the `socket_timeout` field and the connection timeout with the `connect_timeout` - field. Both default to 30 seconds. **Configuring SSL parameters** Reindex from - remote supports configurable SSL settings. These must be specified in the `elasticsearch.yml` - file, with the exception of the secure settings, which you add in the Elasticsearch - keystore. It is not possible to configure SSL in the body of the reindex request. + .. raw:: html + +

Reindex documents.

+

Copy documents from a source to a destination. + You can copy all documents to the destination index or reindex a subset of the documents. + The source can be any existing index, alias, or data stream. + The destination must differ from the source. + For example, you cannot reindex a data stream into itself.

+

IMPORTANT: Reindex requires _source to be enabled for all documents in the source. + The destination should be configured as wanted before calling the reindex API. + Reindex does not copy the settings from the source or its associated template. + Mappings, shard counts, and replicas, for example, must be configured ahead of time.

+

If the Elasticsearch security features are enabled, you must have the following security privileges:

+
    +
  • The read index privilege for the source data stream, index, or alias.
  • +
  • The write index privilege for the destination data stream, index, or index alias.
  • +
  • To automatically create a data stream or index with a reindex API request, you must have the auto_configure, create_index, or manage index privilege for the destination data stream, index, or alias.
  • +
  • If reindexing from a remote cluster, the source.remote.user must have the monitor cluster privilege and the read index privilege for the source data stream, index, or alias.
  • +
+

If reindexing from a remote cluster, you must explicitly allow the remote host in the reindex.remote.whitelist setting. + Automatic data stream creation requires a matching index template with data stream enabled.

+

The dest element can be configured like the index API to control optimistic concurrency control. + Omitting version_type or setting it to internal causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID.

+

Setting version_type to external causes Elasticsearch to preserve the version from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source.

+

Setting op_type to create causes the reindex API to create only missing documents in the destination. + All existing documents will cause a version conflict.

+

IMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an op_type of create. + A reindex can only add new documents to a destination data stream. + It cannot update existing documents in a destination data stream.

+

By default, version conflicts abort the reindex process. + To continue reindexing if there are conflicts, set the conflicts request body property to proceed. + In this case, the response includes a count of the version conflicts that were encountered. + Note that the handling of other error types is unaffected by the conflicts property. + Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than max_docs until it has successfully indexed max_docs documents into the target or it has gone through every document in the source query.

+

NOTE: The reindex API makes no effort to handle ID collisions. + The last document written will "win" but the order isn't usually predictable so it is not a good idea to rely on this behavior. + Instead, make sure that IDs are unique by using a script.

+

Running reindex asynchronously

+

If the request contains wait_for_completion=false, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. + Elasticsearch creates a record of this task as a document at _tasks/<task_id>.

+

Reindex from multiple sources

+

If you have many sources to reindex it is generally better to reindex them one at a time rather than using a glob pattern to pick up multiple sources. + That way you can resume the process if there are any errors by removing the partially completed source and starting over. + It also makes parallelizing the process fairly simple: split the list of sources to reindex and run each list in parallel.

+

For example, you can use a bash script like this:

+
for index in i1 i2 i3 i4 i5; do
+            curl -HContent-Type:application/json -XPOST localhost:9200/_reindex?pretty -d'{
+              "source": {
+                "index": "'$index'"
+              },
+              "dest": {
+                "index": "'$index'-reindexed"
+              }
+            }'
+          done
+          
+

Throttling

+

Set requests_per_second to any positive decimal number (1.4, 6, 1000, for example) to throttle the rate at which reindex issues batches of index operations. + Requests are throttled by padding each batch with a wait time. + To turn off throttling, set requests_per_second to -1.

+

The throttling is done by waiting between batches so that the scroll that reindex uses internally can be given a timeout that takes into account the padding. + The padding time is the difference between the batch size divided by the requests_per_second and the time spent writing. + By default the batch size is 1000, so if requests_per_second is set to 500:

+
target_time = 1000 / 500 per second = 2 seconds
+          wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
+          
+

Since the batch is issued as a single bulk request, large batch sizes cause Elasticsearch to create many requests and then wait for a while before starting the next set. + This is "bursty" instead of "smooth".

+

Slicing

+

Reindex supports sliced scroll to parallelize the reindexing process. + This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts.

+

NOTE: Reindexing from remote clusters does not support manual or automatic slicing.

+

You can slice a reindex request manually by providing a slice ID and total number of slices to each request. + You can also let reindex automatically parallelize by using sliced scroll to slice on _id. + The slices parameter specifies the number of slices to use.

+

Adding slices to the reindex request just automates the manual process, creating sub-requests which means it has some quirks:

+
    +
  • You can see these requests in the tasks API. These sub-requests are "child" tasks of the task for the request with slices.
  • +
  • Fetching the status of the task for the request with slices only contains the status of completed slices.
  • +
  • These sub-requests are individually addressable for things like cancellation and rethrottling.
  • +
  • Rethrottling the request with slices will rethrottle the unfinished sub-request proportionally.
  • +
  • Canceling the request with slices will cancel each sub-request.
  • +
  • Due to the nature of slices, each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.
  • +
  • Parameters like requests_per_second and max_docs on a request with slices are distributed proportionally to each sub-request. Combine that with the previous point about distribution being uneven and you should conclude that using max_docs with slices might not result in exactly max_docs documents being reindexed.
  • +
  • Each sub-request gets a slightly different snapshot of the source, though these are all taken at approximately the same time.
  • +
+

If slicing automatically, setting slices to auto will choose a reasonable number for most indices. + If slicing manually or otherwise tuning automatic slicing, use the following guidelines.

+

Query performance is most efficient when the number of slices is equal to the number of shards in the index. + If that number is large (for example, 500), choose a lower number as too many slices will hurt performance. + Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.

+

Indexing performance scales linearly across available resources with the number of slices.

+

Whether query or indexing performance dominates the runtime depends on the documents being reindexed and cluster resources.

+

Modify documents during reindexing

+

Like _update_by_query, reindex operations support a script that modifies the document. + Unlike _update_by_query, the script is allowed to modify the document's metadata.

+

Just as in _update_by_query, you can set ctx.op to change the operation that is run on the destination. + For example, set ctx.op to noop if your script decides that the document doesn’t have to be indexed in the destination. This "no operation" will be reported in the noop counter in the response body. + Set ctx.op to delete if your script decides that the document must be deleted from the destination. + The deletion will be reported in the deleted counter in the response body. + Setting ctx.op to anything else will return an error, as will setting any other field in ctx.

+

Think of the possibilities! Just be careful; you are able to change:

+
    +
  • _id
  • +
  • _index
  • +
  • _version
  • +
  • _routing
  • +
+

Setting _version to null or clearing it from the ctx map is just like not sending the version in an indexing request. + It will cause the document to be overwritten in the destination regardless of the version on the target or the version type you use in the reindex API.

+

Reindex from remote

+

Reindex supports reindexing from a remote Elasticsearch cluster. + The host parameter must contain a scheme, host, port, and optional path. + The username and password parameters are optional and when they are present the reindex operation will connect to the remote Elasticsearch node using basic authentication. + Be sure to use HTTPS when using basic authentication or the password will be sent in plain text. + There are a range of settings available to configure the behavior of the HTTPS connection.

+

When using Elastic Cloud, it is also possible to authenticate against the remote cluster through the use of a valid API key. + Remote hosts must be explicitly allowed with the reindex.remote.whitelist setting. + It can be set to a comma delimited list of allowed remote host and port combinations. + Scheme is ignored; only the host and port are used. + For example:

+
reindex.remote.whitelist: [otherhost:9200, another:9200, 127.0.10.*:9200, localhost:*"]
+          
+

The list of allowed hosts must be configured on any nodes that will coordinate the reindex. + This feature should work with remote clusters of any version of Elasticsearch. + This should enable you to upgrade from any version of Elasticsearch to the current version by reindexing from a cluster of the old version.

+

WARNING: Elasticsearch does not support forward compatibility across major versions. + For example, you cannot reindex from a 7.x cluster into a 6.x cluster.

+

To enable queries sent to older versions of Elasticsearch, the query parameter is sent directly to the remote host without validation or modification.

+

NOTE: Reindexing from remote clusters does not support manual or automatic slicing.

+

Reindexing from a remote server uses an on-heap buffer that defaults to a maximum size of 100mb. + If the remote index includes very large documents you'll need to use a smaller batch size. + It is also possible to set the socket read timeout on the remote connection with the socket_timeout field and the connection timeout with the connect_timeout field. + Both default to 30 seconds.

+

Configuring SSL parameters

+

Reindex from remote supports configurable SSL settings. + These must be specified in the elasticsearch.yml file, with the exception of the secure settings, which you add in the Elasticsearch keystore. + It is not possible to configure SSL in the body of the reindex request.

+ ``_ @@ -4067,11 +4123,17 @@ def reindex_rethrottle( requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ - Throttle a reindex operation. Change the number of requests per second for a - particular reindex operation. For example: ``` POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 - ``` Rethrottling that speeds up the query takes effect immediately. Rethrottling - that slows down the query will take effect after completing the current batch. - This behavior prevents scroll timeouts. + .. raw:: html + +

Throttle a reindex operation.

+

Change the number of requests per second for a particular reindex operation. + For example:

+
POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1
+          
+

Rethrottling that speeds up the query takes effect immediately. + Rethrottling that slows down the query will take effect after completing the current batch. + This behavior prevents scroll timeouts.

+ ``_ @@ -4123,7 +4185,11 @@ def render_search_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Render a search template. Render a search template as a search request body. + .. raw:: html + +

Render a search template.

+

Render a search template as a search request body.

+ ``_ @@ -4192,7 +4258,11 @@ def scripts_painless_execute( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Run a script. Runs a script and returns a result. + .. raw:: html + +

Run a script. + Runs a script and returns a result.

+ ``_ @@ -4250,22 +4320,19 @@ def scroll( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Run a scrolling search. IMPORTANT: The scroll API is no longer recommend for - deep pagination. If you need to preserve the index state while paging through - more than 10,000 hits, use the `search_after` parameter with a point in time - (PIT). The scroll API gets large sets of results from a single scrolling search - request. To get the necessary scroll ID, submit a search API request that includes - an argument for the `scroll` query parameter. The `scroll` parameter indicates - how long Elasticsearch should retain the search context for the request. The - search response returns a scroll ID in the `_scroll_id` response body parameter. - You can then use the scroll ID with the scroll API to retrieve the next batch - of results for the request. If the Elasticsearch security features are enabled, - the access to the results of a specific scroll ID is restricted to the user or - API key that submitted the search. You can also use the scroll API to specify - a new scroll parameter that extends or shortens the retention period for the - search context. IMPORTANT: Results from a scrolling search reflect the state - of the index at the time of the initial search request. Subsequent indexing or - document changes only affect later search and scroll requests. + .. raw:: html + +

Run a scrolling search.

+

IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the search_after parameter with a point in time (PIT).

+

The scroll API gets large sets of results from a single scrolling search request. + To get the necessary scroll ID, submit a search API request that includes an argument for the scroll query parameter. + The scroll parameter indicates how long Elasticsearch should retain the search context for the request. + The search response returns a scroll ID in the _scroll_id response body parameter. + You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. + If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search.

+

You can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context.

+

IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.

+ ``_ @@ -4455,9 +4522,13 @@ def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Run a search. Get search hits that match the query defined in the request. You - can provide search queries using the `q` query string parameter or the request - body. If both are specified, only the query parameter is used. + .. raw:: html + +

Run a search.

+

Get search hits that match the query defined in the request. + You can provide search queries using the q query string parameter or the request body. + If both are specified, only the query parameter is used.

+ ``_ @@ -4887,7 +4958,11 @@ def search_mvt( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> BinaryApiResponse: """ - Search a vector tile. Search a vector tile for geospatial values. + .. raw:: html + +

Search a vector tile.

+

Search a vector tile for geospatial values.

+ ``_ @@ -5042,10 +5117,13 @@ def search_shards( routing: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the search shards. Get the indices and shards that a search request would - be run against. This information can be useful for working out issues or planning - optimizations with routing and shard preferences. When filtered aliases are used, - the filter is returned as part of the indices section. + .. raw:: html + +

Get the search shards.

+

Get the indices and shards that a search request would be run against. + This information can be useful for working out issues or planning optimizations with routing and shard preferences. + When filtered aliases are used, the filter is returned as part of the indices section.

+ ``_ @@ -5149,7 +5227,10 @@ def search_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Run a search with a search template. + .. raw:: html + +

Run a search with a search template.

+ ``_ @@ -5281,15 +5362,15 @@ def terms_enum( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get terms in an index. Discover terms that match a partial string in an index. - This "terms enum" API is designed for low-latency look-ups used in auto-complete - scenarios. If the `complete` property in the response is false, the returned - terms set may be incomplete and should be treated as approximate. This can occur - due to a few reasons, such as a request timeout or a node error. NOTE: The terms - enum API may return terms from deleted documents. Deleted documents are initially - only marked as deleted. It is not until their segments are merged that documents - are actually deleted. Until that happens, the terms enum API will return terms - from these documents. + .. raw:: html + +

Get terms in an index.

+

Discover terms that match a partial string in an index. + This "terms enum" API is designed for low-latency look-ups used in auto-complete scenarios.

+

If the complete property in the response is false, the returned terms set may be incomplete and should be treated as approximate. + This can occur due to a few reasons, such as a request timeout or a node error.

+

NOTE: The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents.

+ ``_ @@ -5387,8 +5468,11 @@ def termvectors( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get term vector information. Get information and statistics about terms in the - fields of a particular document. + .. raw:: html + +

Get term vector information.

+

Get information and statistics about terms in the fields of a particular document.

+ ``_ @@ -5530,19 +5614,24 @@ def update( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update a document. Update a document by running a script or passing a partial - document. If the Elasticsearch security features are enabled, you must have the - `index` or `write` index privilege for the target index or index alias. The script - can update, delete, or skip modifying the document. The API also supports passing - a partial document, which is merged into the existing document. To fully replace - an existing document, use the index API. This operation: * Gets the document - (collocated with the shard) from the index. * Runs the specified script. * Indexes - the result. The document must still be reindexed, but using this API removes - some network roundtrips and reduces chances of version conflicts between the - GET and the index operation. The `_source` field must be enabled to use this - API. In addition to `_source`, you can access the following variables through - the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, and `_now` (the - current timestamp). + .. raw:: html + +

Update a document.

+

Update a document by running a script or passing a partial document.

+

If the Elasticsearch security features are enabled, you must have the index or write index privilege for the target index or index alias.

+

The script can update, delete, or skip modifying the document. + The API also supports passing a partial document, which is merged into the existing document. + To fully replace an existing document, use the index API. + This operation:

+
    +
  • Gets the document (collocated with the shard) from the index.
  • +
  • Runs the specified script.
  • +
  • Indexes the result.
  • +
+

The document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation.

+

The _source field must be enabled to use this API. + In addition to _source, you can access the following variables through the ctx map: _index, _type, _id, _version, _routing, and _now (the current timestamp).

+ ``_ @@ -5709,9 +5798,12 @@ def update_by_query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update documents. Updates documents that match the specified query. If no query - is specified, performs an update on every document in the data stream or index - without modifying the source, which is useful for picking up mapping changes. + .. raw:: html + +

Update documents. + Updates documents that match the specified query. + If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes.

+ ``_ @@ -5907,10 +5999,12 @@ def update_by_query_rethrottle( requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ - Throttle an update by query operation. Change the number of requests per second - for a particular update by query operation. Rethrottling that speeds up the query - takes effect immediately but rethrotting that slows down the query takes effect - after completing the current batch to prevent scroll timeouts. + .. raw:: html + +

Throttle an update by query operation.

+

Change the number of requests per second for a particular update by query operation. + Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts.

+ ``_ diff --git a/elasticsearch/_sync/client/async_search.py b/elasticsearch/_sync/client/async_search.py index 87a8e5707..30a582b10 100644 --- a/elasticsearch/_sync/client/async_search.py +++ b/elasticsearch/_sync/client/async_search.py @@ -36,11 +36,13 @@ def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete an async search. If the asynchronous search is still running, it is cancelled. - Otherwise, the saved search results are deleted. If the Elasticsearch security - features are enabled, the deletion of a specific async search is restricted to: - the authenticated user that submitted the original search request; users that - have the `cancel_task` cluster privilege. + .. raw:: html + +

Delete an async search.

+

If the asynchronous search is still running, it is cancelled. + Otherwise, the saved search results are deleted. + If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the cancel_task cluster privilege.

+ ``_ @@ -85,10 +87,12 @@ def get( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Get async search results. Retrieve the results of a previously submitted asynchronous - search request. If the Elasticsearch security features are enabled, access to - the results of a specific async search is restricted to the user or API key that - submitted it. + .. raw:: html + +

Get async search results.

+

Retrieve the results of a previously submitted asynchronous search request. + If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it.

+ ``_ @@ -149,10 +153,12 @@ def status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the async search status. Get the status of a previously submitted async search - request given its identifier, without retrieving search results. If the Elasticsearch - security features are enabled, use of this API is restricted to the `monitoring_user` - role. + .. raw:: html + +

Get the async search status.

+

Get the status of a previously submitted async search request given its identifier, without retrieving search results. + If the Elasticsearch security features are enabled, use of this API is restricted to the monitoring_user role.

+ ``_ @@ -326,15 +332,14 @@ def submit( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Run an async search. When the primary sort of the results is an indexed field, - shards get sorted based on minimum and maximum value that they hold for that - field. Partial results become available following the sort criteria that was - requested. Warning: Asynchronous search does not support scroll or search requests - that include only the suggest section. By default, Elasticsearch does not allow - you to store an async search response larger than 10Mb and an attempt to do this - results in an error. The maximum allowed size for a stored async search response - can be set by changing the `search.max_async_search_response_size` cluster level - setting. + .. raw:: html + +

Run an async search.

+

When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field. Partial results become available following the sort criteria that was requested.

+

Warning: Asynchronous search does not support scroll or search requests that include only the suggest section.

+

By default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. + The maximum allowed size for a stored async search response can be set by changing the search.max_async_search_response_size cluster level setting.

+ ``_ diff --git a/elasticsearch/_sync/client/autoscaling.py b/elasticsearch/_sync/client/autoscaling.py index ab6ec9f21..6a3768a98 100644 --- a/elasticsearch/_sync/client/autoscaling.py +++ b/elasticsearch/_sync/client/autoscaling.py @@ -38,9 +38,11 @@ def delete_autoscaling_policy( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete an autoscaling policy. NOTE: This feature is designed for indirect use - by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. - Direct use is not supported. + .. raw:: html + +

Delete an autoscaling policy.

+

NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

+ ``_ @@ -89,18 +91,18 @@ def get_autoscaling_capacity( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the autoscaling capacity. NOTE: This feature is designed for indirect use - by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. - Direct use is not supported. This API gets the current autoscaling capacity based - on the configured autoscaling policy. It will return information to size the - cluster appropriately to the current workload. The `required_capacity` is calculated - as the maximum of the `required_capacity` result of all individual deciders that - are enabled for the policy. The operator should verify that the `current_nodes` - match the operator’s knowledge of the cluster to avoid making autoscaling decisions - based on stale or incomplete information. The response contains decider-specific - information you can use to diagnose how and why autoscaling determined a certain - capacity was required. This information is provided for diagnosis only. Do not - use this information to make autoscaling decisions. + .. raw:: html + +

Get the autoscaling capacity.

+

NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

+

This API gets the current autoscaling capacity based on the configured autoscaling policy. + It will return information to size the cluster appropriately to the current workload.

+

The required_capacity is calculated as the maximum of the required_capacity result of all individual deciders that are enabled for the policy.

+

The operator should verify that the current_nodes match the operator’s knowledge of the cluster to avoid making autoscaling decisions based on stale or incomplete information.

+

The response contains decider-specific information you can use to diagnose how and why autoscaling determined a certain capacity was required. + This information is provided for diagnosis only. + Do not use this information to make autoscaling decisions.

+ ``_ @@ -143,9 +145,11 @@ def get_autoscaling_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get an autoscaling policy. NOTE: This feature is designed for indirect use by - Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. - Direct use is not supported. + .. raw:: html + +

Get an autoscaling policy.

+

NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

+ ``_ @@ -196,9 +200,11 @@ def put_autoscaling_policy( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update an autoscaling policy. NOTE: This feature is designed for indirect - use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on - Kubernetes. Direct use is not supported. + .. raw:: html + +

Create or update an autoscaling policy.

+

NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

+ ``_ diff --git a/elasticsearch/_sync/client/cat.py b/elasticsearch/_sync/client/cat.py index cb97b3054..cbacf8a67 100644 --- a/elasticsearch/_sync/client/cat.py +++ b/elasticsearch/_sync/client/cat.py @@ -57,11 +57,13 @@ def aliases( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get aliases. Get the cluster's index aliases, including filter and routing information. - This API does not return data stream aliases. IMPORTANT: CAT APIs are only intended - for human consumption using the command line or the Kibana console. They are - not intended for use by applications. For application consumption, use the aliases - API. + .. raw:: html + +

Get aliases.

+

Get the cluster's index aliases, including filter and routing information. + This API does not return data stream aliases.

+

IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.

+ ``_ @@ -152,10 +154,12 @@ def allocation( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get shard allocation information. Get a snapshot of the number of shards allocated - to each data node and their disk space. IMPORTANT: CAT APIs are only intended - for human consumption using the command line or Kibana console. They are not - intended for use by applications. + .. raw:: html + +

Get shard allocation information.

+

Get a snapshot of the number of shards allocated to each data node and their disk space.

+

IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.

+ ``_ @@ -237,12 +241,14 @@ def component_templates( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get component templates. Get information about component templates in a cluster. - Component templates are building blocks for constructing index templates that - specify index mappings, settings, and aliases. IMPORTANT: CAT APIs are only intended - for human consumption using the command line or Kibana console. They are not - intended for use by applications. For application consumption, use the get component - template API. + .. raw:: html + +

Get component templates.

+

Get information about component templates in a cluster. + Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.

+

IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. + They are not intended for use by applications. For application consumption, use the get component template API.

+ ``_ @@ -319,12 +325,14 @@ def count( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get a document count. Get quick access to a document count for a data stream, - an index, or an entire cluster. The document count only includes live documents, - not deleted documents which have not yet been removed by the merge process. IMPORTANT: - CAT APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the count API. + .. raw:: html + +

Get a document count.

+

Get quick access to a document count for a data stream, an index, or an entire cluster. + The document count only includes live documents, not deleted documents which have not yet been removed by the merge process.

+

IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. + They are not intended for use by applications. For application consumption, use the count API.

+ ``_ @@ -396,11 +404,13 @@ def fielddata( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get field data cache information. Get the amount of heap memory currently used - by the field data cache on every data node in the cluster. IMPORTANT: cat APIs - are only intended for human consumption using the command line or Kibana console. - They are not intended for use by applications. For application consumption, use - the nodes stats API. + .. raw:: html + +

Get field data cache information.

+

Get the amount of heap memory currently used by the field data cache on every data node in the cluster.

+

IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. + They are not intended for use by applications. For application consumption, use the nodes stats API.

+ ``_ @@ -474,17 +484,19 @@ def health( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get the cluster health status. IMPORTANT: CAT APIs are only intended for human - consumption using the command line or Kibana console. They are not intended for - use by applications. For application consumption, use the cluster health API. - This API is often used to check malfunctioning clusters. To help you track cluster - health alongside log files and alerting systems, the API returns timestamps in - two formats: `HH:MM:SS`, which is human-readable but includes no date information; - `Unix epoch time`, which is machine-sortable and includes date information. The - latter format is useful for cluster recoveries that take multiple days. You can - use the cat health API to verify cluster health across multiple nodes. You also - can use the API to track the recovery of a large cluster over a longer period - of time. + .. raw:: html + +

Get the cluster health status.

+

IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. + They are not intended for use by applications. For application consumption, use the cluster health API. + This API is often used to check malfunctioning clusters. + To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: + HH:MM:SS, which is human-readable but includes no date information; + Unix epoch time, which is machine-sortable and includes date information. + The latter format is useful for cluster recoveries that take multiple days. + You can use the cat health API to verify cluster health across multiple nodes. + You also can use the API to track the recovery of a large cluster over a longer period of time.

+ ``_ @@ -538,7 +550,11 @@ def health( @_rewrite_parameters() def help(self) -> TextApiResponse: """ - Get CAT help. Get help for the CAT APIs. + .. raw:: html + +

Get CAT help.

+

Get help for the CAT APIs.

+ ``_ """ @@ -589,16 +605,23 @@ def indices( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get index information. Get high-level information about indices in a cluster, - including backing indices for data streams. Use this request to get the following - information for each index in a cluster: - shard count - document count - deleted - document count - primary store size - total store size of all shards, including - shard replicas These metrics are retrieved directly from Lucene, which Elasticsearch - uses internally to power indexing and search. As a result, all document counts - include hidden nested documents. To get an accurate count of Elasticsearch documents, - use the cat count or count APIs. CAT APIs are only intended for human consumption - using the command line or Kibana console. They are not intended for use by applications. - For application consumption, use an index endpoint. + .. raw:: html + +

Get index information.

+

Get high-level information about indices in a cluster, including backing indices for data streams.

+

Use this request to get the following information for each index in a cluster:

+
    +
  • shard count
  • +
  • document count
  • +
  • deleted document count
  • +
  • primary store size
  • +
  • total store size of all shards, including shard replicas
  • +
+

These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. + To get an accurate count of Elasticsearch documents, use the cat count or count APIs.

+

CAT APIs are only intended for human consumption using the command line or Kibana console. + They are not intended for use by applications. For application consumption, use an index endpoint.

+ ``_ @@ -691,10 +714,12 @@ def master( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get master node information. Get information about the master node, including - the ID, bound IP address, and name. IMPORTANT: cat APIs are only intended for - human consumption using the command line or Kibana console. They are not intended - for use by applications. For application consumption, use the nodes info API. + .. raw:: html + +

Get master node information.

+

Get information about the master node, including the ID, bound IP address, and name.

+

IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

+ ``_ @@ -865,11 +890,14 @@ def ml_data_frame_analytics( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get data frame analytics jobs. Get configuration and usage information about - data frame analytics jobs. IMPORTANT: CAT APIs are only intended for human consumption - using the Kibana console or command line. They are not intended for use by applications. - For application consumption, use the get data frame analytics jobs statistics - API. + .. raw:: html + +

Get data frame analytics jobs.

+

Get configuration and usage information about data frame analytics jobs.

+

IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + console or command line. They are not intended for use by applications. For + application consumption, use the get data frame analytics jobs statistics API.

+ ``_ @@ -1027,13 +1055,17 @@ def ml_datafeeds( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get datafeeds. Get configuration and usage information about datafeeds. This - API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features - are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` - cluster privileges to use this API. IMPORTANT: CAT APIs are only intended for - human consumption using the Kibana console or command line. They are not intended - for use by applications. For application consumption, use the get datafeed statistics - API. + .. raw:: html + +

Get datafeeds.

+

Get configuration and usage information about datafeeds. + This API returns a maximum of 10,000 datafeeds. + If the Elasticsearch security features are enabled, you must have monitor_ml, monitor, manage_ml, or manage + cluster privileges to use this API.

+

IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + console or command line. They are not intended for use by applications. For + application consumption, use the get datafeed statistics API.

+ ``_ @@ -1389,13 +1421,17 @@ def ml_jobs( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get anomaly detection jobs. Get configuration and usage information for anomaly - detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch - security features are enabled, you must have `monitor_ml`, `monitor`, `manage_ml`, - or `manage` cluster privileges to use this API. IMPORTANT: CAT APIs are only - intended for human consumption using the Kibana console or command line. They - are not intended for use by applications. For application consumption, use the - get anomaly detection job statistics API. + .. raw:: html + +

Get anomaly detection jobs.

+

Get configuration and usage information for anomaly detection jobs. + This API returns a maximum of 10,000 jobs. + If the Elasticsearch security features are enabled, you must have monitor_ml, + monitor, manage_ml, or manage cluster privileges to use this API.

+

IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + console or command line. They are not intended for use by applications. For + application consumption, use the get anomaly detection job statistics API.

+ ``_ @@ -1573,10 +1609,14 @@ def ml_trained_models( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get trained models. Get configuration and usage information about inference trained - models. IMPORTANT: CAT APIs are only intended for human consumption using the - Kibana console or command line. They are not intended for use by applications. - For application consumption, use the get trained models statistics API. + .. raw:: html + +

Get trained models.

+

Get configuration and usage information about inference trained models.

+

IMPORTANT: CAT APIs are only intended for human consumption using the Kibana + console or command line. They are not intended for use by applications. For + application consumption, use the get trained models statistics API.

+ ``_ @@ -1664,10 +1704,12 @@ def nodeattrs( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get node attribute information. Get information about custom node attributes. - IMPORTANT: cat APIs are only intended for human consumption using the command - line or Kibana console. They are not intended for use by applications. For application - consumption, use the nodes info API. + .. raw:: html + +

Get node attribute information.

+

Get information about custom node attributes. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

+ ``_ @@ -1745,10 +1787,12 @@ def nodes( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get node information. Get information about the nodes in a cluster. IMPORTANT: - cat APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. For application consumption, - use the nodes info API. + .. raw:: html + +

Get node information.

+

Get information about the nodes in a cluster. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

+ ``_ @@ -1830,10 +1874,12 @@ def pending_tasks( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get pending task information. Get information about cluster-level changes that - have not yet taken effect. IMPORTANT: cat APIs are only intended for human consumption - using the command line or Kibana console. They are not intended for use by applications. - For application consumption, use the pending cluster tasks API. + .. raw:: html + +

Get pending task information.

+

Get information about cluster-level changes that have not yet taken effect. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API.

+ ``_ @@ -1908,10 +1954,12 @@ def plugins( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get plugin information. Get a list of plugins running on each node of a cluster. - IMPORTANT: cat APIs are only intended for human consumption using the command - line or Kibana console. They are not intended for use by applications. For application - consumption, use the nodes info API. + .. raw:: html + +

Get plugin information.

+

Get a list of plugins running on each node of a cluster. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

+ ``_ @@ -1992,14 +2040,14 @@ def recovery( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get shard recovery information. Get information about ongoing and completed shard - recoveries. Shard recovery is the process of initializing a shard copy, such - as restoring a primary shard from a snapshot or syncing a replica shard from - a primary shard. When a shard recovery completes, the recovered shard is available - for search and indexing. For data streams, the API returns information about - the stream’s backing indices. IMPORTANT: cat APIs are only intended for human - consumption using the command line or Kibana console. They are not intended for - use by applications. For application consumption, use the index recovery API. + .. raw:: html + +

Get shard recovery information.

+

Get information about ongoing and completed shard recoveries. + Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. + For data streams, the API returns information about the stream’s backing indices. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API.

+ ``_ @@ -2082,10 +2130,12 @@ def repositories( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get snapshot repository information. Get a list of snapshot repositories for - a cluster. IMPORTANT: cat APIs are only intended for human consumption using - the command line or Kibana console. They are not intended for use by applications. - For application consumption, use the get snapshot repository API. + .. raw:: html + +

Get snapshot repository information.

+

Get a list of snapshot repositories for a cluster. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API.

+ ``_ @@ -2160,11 +2210,13 @@ def segments( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get segment information. Get low-level information about the Lucene segments - in index shards. For data streams, the API returns information about the backing - indices. IMPORTANT: cat APIs are only intended for human consumption using the - command line or Kibana console. They are not intended for use by applications. - For application consumption, use the index segments API. + .. raw:: html + +

Get segment information.

+

Get low-level information about the Lucene segments in index shards. + For data streams, the API returns information about the backing indices. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API.

+ ``_ @@ -2252,10 +2304,13 @@ def shards( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get shard information. Get information about the shards in a cluster. For data - streams, the API returns information about the backing indices. IMPORTANT: cat - APIs are only intended for human consumption using the command line or Kibana - console. They are not intended for use by applications. + .. raw:: html + +

Get shard information.

+

Get information about the shards in a cluster. + For data streams, the API returns information about the backing indices. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.

+ ``_ @@ -2338,11 +2393,13 @@ def snapshots( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get snapshot information. Get information about the snapshots stored in one or - more repositories. A snapshot is a backup of an index or running Elasticsearch - cluster. IMPORTANT: cat APIs are only intended for human consumption using the - command line or Kibana console. They are not intended for use by applications. - For application consumption, use the get snapshot API. + .. raw:: html + +

Get snapshot information.

+

Get information about the snapshots stored in one or more repositories. + A snapshot is a backup of an index or running Elasticsearch cluster. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API.

+ ``_ @@ -2430,10 +2487,12 @@ def tasks( wait_for_completion: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get task information. Get information about tasks currently running in the cluster. - IMPORTANT: cat APIs are only intended for human consumption using the command - line or Kibana console. They are not intended for use by applications. For application - consumption, use the task management API. + .. raw:: html + +

Get task information.

+

Get information about tasks currently running in the cluster. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API.

+ ``_ @@ -2521,11 +2580,13 @@ def templates( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get index template information. Get information about the index templates in - a cluster. You can use index templates to apply index settings and field mappings - to new indices at creation. IMPORTANT: cat APIs are only intended for human consumption - using the command line or Kibana console. They are not intended for use by applications. - For application consumption, use the get index template API. + .. raw:: html + +

Get index template information.

+

Get information about the index templates in a cluster. + You can use index templates to apply index settings and field mappings to new indices at creation. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API.

+ ``_ @@ -2607,11 +2668,13 @@ def thread_pool( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get thread pool statistics. Get thread pool statistics for each node in a cluster. - Returned information includes all built-in thread pools and custom thread pools. - IMPORTANT: cat APIs are only intended for human consumption using the command - line or Kibana console. They are not intended for use by applications. For application - consumption, use the nodes info API. + .. raw:: html + +

Get thread pool statistics.

+

Get thread pool statistics for each node in a cluster. + Returned information includes all built-in thread pools and custom thread pools. + IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

+ ``_ @@ -2861,10 +2924,14 @@ def transforms( v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ - Get transform information. Get configuration and usage information about transforms. - CAT APIs are only intended for human consumption using the Kibana console or - command line. They are not intended for use by applications. For application - consumption, use the get transform statistics API. + .. raw:: html + +

Get transform information.

+

Get configuration and usage information about transforms.

+

CAT APIs are only intended for human consumption using the Kibana + console or command line. They are not intended for use by applications. For + application consumption, use the get transform statistics API.

+ ``_ diff --git a/elasticsearch/_sync/client/ccr.py b/elasticsearch/_sync/client/ccr.py index fdd79e2c9..5f0ce695f 100644 --- a/elasticsearch/_sync/client/ccr.py +++ b/elasticsearch/_sync/client/ccr.py @@ -37,8 +37,11 @@ def delete_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete auto-follow patterns. Delete a collection of cross-cluster replication - auto-follow patterns. + .. raw:: html + +

Delete auto-follow patterns. + Delete a collection of cross-cluster replication auto-follow patterns.

+ ``_ @@ -117,10 +120,12 @@ def follow( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a follower. Create a cross-cluster replication follower index that follows - a specific leader index. When the API returns, the follower index exists and - cross-cluster replication starts replicating operations from the leader index - to the follower index. + .. raw:: html + +

Create a follower. + Create a cross-cluster replication follower index that follows a specific leader index. + When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index.

+ ``_ @@ -244,10 +249,12 @@ def follow_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get follower information. Get information about all cross-cluster replication - follower indices. For example, the results include follower index names, leader - index names, replication options, and whether the follower indices are active - or paused. + .. raw:: html + +

Get follower information. + Get information about all cross-cluster replication follower indices. + For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused.

+ ``_ @@ -292,9 +299,12 @@ def follow_stats( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get follower stats. Get cross-cluster replication follower stats. The API returns - shard-level stats about the "following tasks" associated with each shard for - the specified indices. + .. raw:: html + +

Get follower stats. + Get cross-cluster replication follower stats. + The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices.

+ ``_ @@ -352,23 +362,19 @@ def forget_follower( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Forget a follower. Remove the cross-cluster replication follower retention leases - from the leader. A following index takes out retention leases on its leader index. - These leases are used to increase the likelihood that the shards of the leader - index retain the history of operations that the shards of the following index - need to run replication. When a follower index is converted to a regular index - by the unfollow API (either by directly calling the API or by index lifecycle - management tasks), these leases are removed. However, removal of the leases can - fail, for example when the remote cluster containing the leader index is unavailable. - While the leases will eventually expire on their own, their extended existence - can cause the leader index to hold more history than necessary and prevent index - lifecycle management from performing some operations on the leader index. This - API exists to enable manually removing the leases when the unfollow API is unable - to do so. NOTE: This API does not stop replication by a following index. If you - use this API with a follower index that is still actively following, the following - index will add back retention leases on the leader. The only purpose of this - API is to handle the case of failure to remove the following retention leases - after the unfollow API is invoked. + .. raw:: html + +

Forget a follower. + Remove the cross-cluster replication follower retention leases from the leader.

+

A following index takes out retention leases on its leader index. + These leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication. + When a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed. + However, removal of the leases can fail, for example when the remote cluster containing the leader index is unavailable. + While the leases will eventually expire on their own, their extended existence can cause the leader index to hold more history than necessary and prevent index lifecycle management from performing some operations on the leader index. + This API exists to enable manually removing the leases when the unfollow API is unable to do so.

+

NOTE: This API does not stop replication by a following index. If you use this API with a follower index that is still actively following, the following index will add back retention leases on the leader. + The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked.

+ ``_ @@ -429,7 +435,11 @@ def get_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get auto-follow patterns. Get cross-cluster replication auto-follow patterns. + .. raw:: html + +

Get auto-follow patterns. + Get cross-cluster replication auto-follow patterns.

+ ``_ @@ -477,14 +487,16 @@ def pause_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Pause an auto-follow pattern. Pause a cross-cluster replication auto-follow pattern. - When the API returns, the auto-follow pattern is inactive. New indices that are - created on the remote cluster and match the auto-follow patterns are ignored. - You can resume auto-following with the resume auto-follow pattern API. When it - resumes, the auto-follow pattern is active again and automatically configures - follower indices for newly created indices on the remote cluster that match its - patterns. Remote indices that were created while the pattern was paused will - also be followed, unless they have been deleted or closed in the interim. + .. raw:: html + +

Pause an auto-follow pattern. + Pause a cross-cluster replication auto-follow pattern. + When the API returns, the auto-follow pattern is inactive. + New indices that are created on the remote cluster and match the auto-follow patterns are ignored.

+

You can resume auto-following with the resume auto-follow pattern API. + When it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns. + Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim.

+ ``_ @@ -529,10 +541,14 @@ def pause_follow( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Pause a follower. Pause a cross-cluster replication follower index. The follower - index will not fetch any additional operations from the leader index. You can - resume following with the resume follower API. You can pause and resume a follower - index to change the configuration of the following task. + .. raw:: html + +

Pause a follower. + Pause a cross-cluster replication follower index. + The follower index will not fetch any additional operations from the leader index. + You can resume following with the resume follower API. + You can pause and resume a follower index to change the configuration of the following task.

+ ``_ @@ -611,14 +627,15 @@ def put_auto_follow_pattern( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update auto-follow patterns. Create a collection of cross-cluster replication - auto-follow patterns for a remote cluster. Newly created indices on the remote - cluster that match any of the patterns are automatically configured as follower - indices. Indices on the remote cluster that were created before the auto-follow - pattern was created will not be auto-followed even if they match the pattern. - This API can also be used to update auto-follow patterns. NOTE: Follower indices - that were configured automatically before updating an auto-follow pattern will - remain unchanged even if they do not match against the new patterns. + .. raw:: html + +

Create or update auto-follow patterns. + Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. + Newly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices. + Indices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern.

+

This API can also be used to update auto-follow patterns. + NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns.

+ ``_ @@ -746,11 +763,13 @@ def resume_auto_follow_pattern( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resume an auto-follow pattern. Resume a cross-cluster replication auto-follow - pattern that was paused. The auto-follow pattern will resume configuring following - indices for newly created indices that match its patterns on the remote cluster. - Remote indices created while the pattern was paused will also be followed unless - they have been deleted or closed in the interim. + .. raw:: html + +

Resume an auto-follow pattern. + Resume a cross-cluster replication auto-follow pattern that was paused. + The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. + Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim.

+ ``_ @@ -819,11 +838,14 @@ def resume_follow( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Resume a follower. Resume a cross-cluster replication follower index that was - paused. The follower index could have been paused with the pause follower API. - Alternatively it could be paused due to replication that cannot be retried due - to failures during following tasks. When this API returns, the follower index - will resume fetching operations from the leader index. + .. raw:: html + +

Resume a follower. + Resume a cross-cluster replication follower index that was paused. + The follower index could have been paused with the pause follower API. + Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. + When this API returns, the follower index will resume fetching operations from the leader index.

+ ``_ @@ -910,8 +932,11 @@ def stats( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get cross-cluster replication stats. This API returns stats about auto-following - and the same shard-level stats as the get follower stats API. + .. raw:: html + +

Get cross-cluster replication stats. + This API returns stats about auto-following and the same shard-level stats as the get follower stats API.

+ ``_ @@ -956,13 +981,14 @@ def unfollow( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Unfollow an index. Convert a cross-cluster replication follower index to a regular - index. The API stops the following task associated with a follower index and - removes index metadata and settings associated with cross-cluster replication. - The follower index must be paused and closed before you call the unfollow API. - NOTE: Currently cross-cluster replication does not support converting an existing - regular index to a follower index. Converting a follower index to a regular index - is an irreversible operation. + .. raw:: html + +

Unfollow an index. + Convert a cross-cluster replication follower index to a regular index. + The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. + The follower index must be paused and closed before you call the unfollow API.

+

NOTE: Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation.

+ ``_ diff --git a/elasticsearch/_sync/client/cluster.py b/elasticsearch/_sync/client/cluster.py index 636fae5d1..0d7fb74db 100644 --- a/elasticsearch/_sync/client/cluster.py +++ b/elasticsearch/_sync/client/cluster.py @@ -45,13 +45,14 @@ def allocation_explain( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Explain the shard allocations. Get explanations for shard allocations in the - cluster. For unassigned shards, it provides an explanation for why the shard - is unassigned. For assigned shards, it provides an explanation for why the shard - is remaining on its current node and has not moved or rebalanced to another node. - This API can be very useful when attempting to diagnose why a shard is unassigned - or why a shard continues to remain on its current node when you might expect - otherwise. + .. raw:: html + +

Explain the shard allocations. + Get explanations for shard allocations in the cluster. + For unassigned shards, it provides an explanation for why the shard is unassigned. + For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. + This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise.

+ ``_ @@ -123,8 +124,11 @@ def delete_component_template( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete component templates. Component templates are building blocks for constructing - index templates that specify index mappings, settings, and aliases. + .. raw:: html + +

Delete component templates. + Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.

+ ``_ @@ -175,8 +179,11 @@ def delete_voting_config_exclusions( wait_for_removal: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear cluster voting config exclusions. Remove master-eligible nodes from the - voting configuration exclusion list. + .. raw:: html + +

Clear cluster voting config exclusions. + Remove master-eligible nodes from the voting configuration exclusion list.

+ ``_ @@ -226,8 +233,11 @@ def exists_component_template( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Check component templates. Returns information about whether a particular component - template exists. + .. raw:: html + +

Check component templates. + Returns information about whether a particular component template exists.

+ ``_ @@ -282,7 +292,11 @@ def get_component_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get component templates. Get information about component templates. + .. raw:: html + +

Get component templates. + Get information about component templates.

+ ``_ @@ -345,8 +359,11 @@ def get_settings( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get cluster-wide settings. By default, it returns only settings that have been - explicitly defined. + .. raw:: html + +

Get cluster-wide settings. + By default, it returns only settings that have been explicitly defined.

+ ``_ @@ -428,16 +445,17 @@ def health( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the cluster health status. You can also use the API to get the health status - of only specified data streams and indices. For data streams, the API retrieves - the health status of the stream’s backing indices. The cluster health status - is: green, yellow or red. On the shard level, a red status indicates that the - specific shard is not allocated in the cluster. Yellow means that the primary - shard is allocated but replicas are not. Green means that all shards are allocated. - The index level status is controlled by the worst shard status. One of the main - benefits of the API is the ability to wait until the cluster reaches a certain - high watermark health level. The cluster status is controlled by the worst index - status. + .. raw:: html + +

Get the cluster health status. + You can also use the API to get the health status of only specified data streams and indices. + For data streams, the API retrieves the health status of the stream’s backing indices.

+

The cluster health status is: green, yellow or red. + On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated. + The index level status is controlled by the worst shard status.

+

One of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level. + The cluster status is controlled by the worst index status.

+ ``_ @@ -541,7 +559,11 @@ def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get cluster info. Returns basic information about the cluster. + .. raw:: html + +

Get cluster info. + Returns basic information about the cluster.

+ ``_ @@ -583,14 +605,14 @@ def pending_tasks( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the pending cluster tasks. Get information about cluster-level changes (such - as create index, update mapping, allocate or fail shard) that have not yet taken - effect. NOTE: This API returns a list of any pending updates to the cluster state. - These are distinct from the tasks reported by the task management API which include - periodic tasks and tasks initiated by the user, such as node stats, search queries, - or create index requests. However, if a user-initiated task such as a create - index command causes a cluster state update, the activity of this task might - be reported by both task api and pending cluster tasks API. + .. raw:: html + +

Get the pending cluster tasks. + Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect.

+

NOTE: This API returns a list of any pending updates to the cluster state. + These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. + However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API.

+ ``_ @@ -639,33 +661,24 @@ def post_voting_config_exclusions( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update voting configuration exclusions. Update the cluster voting config exclusions - by node IDs or node names. By default, if there are more than three master-eligible - nodes in the cluster and you remove fewer than half of the master-eligible nodes - in the cluster at once, the voting configuration automatically shrinks. If you - want to shrink the voting configuration to contain fewer than three nodes or - to remove half or more of the master-eligible nodes in the cluster at once, use - this API to remove departing nodes from the voting configuration manually. The - API adds an entry for each specified node to the cluster’s voting configuration - exclusions list. It then waits until the cluster has reconfigured its voting - configuration to exclude the specified nodes. Clusters should have no voting - configuration exclusions in normal operation. Once the excluded nodes have stopped, - clear the voting configuration exclusions with `DELETE /_cluster/voting_config_exclusions`. - This API waits for the nodes to be fully removed from the cluster before it returns. - If your cluster has voting configuration exclusions for nodes that you no longer - intend to remove, use `DELETE /_cluster/voting_config_exclusions?wait_for_removal=false` - to clear the voting configuration exclusions without waiting for the nodes to - leave the cluster. A response to `POST /_cluster/voting_config_exclusions` with - an HTTP status code of 200 OK guarantees that the node has been removed from - the voting configuration and will not be reinstated until the voting configuration - exclusions are cleared by calling `DELETE /_cluster/voting_config_exclusions`. - If the call to `POST /_cluster/voting_config_exclusions` fails or returns a response - with an HTTP status code other than 200 OK then the node may not have been removed - from the voting configuration. In that case, you may safely retry the call. NOTE: - Voting exclusions are required only when you remove at least half of the master-eligible - nodes from a cluster in a short time period. They are not required when removing - master-ineligible nodes or when removing fewer than half of the master-eligible - nodes. + .. raw:: html + +

Update voting configuration exclusions. + Update the cluster voting config exclusions by node IDs or node names. + By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks. + If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually. + The API adds an entry for each specified node to the cluster’s voting configuration exclusions list. + It then waits until the cluster has reconfigured its voting configuration to exclude the specified nodes.

+

Clusters should have no voting configuration exclusions in normal operation. + Once the excluded nodes have stopped, clear the voting configuration exclusions with DELETE /_cluster/voting_config_exclusions. + This API waits for the nodes to be fully removed from the cluster before it returns. + If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use DELETE /_cluster/voting_config_exclusions?wait_for_removal=false to clear the voting configuration exclusions without waiting for the nodes to leave the cluster.

+

A response to POST /_cluster/voting_config_exclusions with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling DELETE /_cluster/voting_config_exclusions. + If the call to POST /_cluster/voting_config_exclusions fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration. + In that case, you may safely retry the call.

+

NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. + They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes.

+ ``_ @@ -730,21 +743,23 @@ def put_component_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a component template. Component templates are building blocks - for constructing index templates that specify index mappings, settings, and aliases. - An index template can be composed of multiple component templates. To use a component - template, specify it in an index template’s `composed_of` list. Component templates - are only applied to new data streams and indices as part of a matching index - template. Settings and mappings specified directly in the index template or the - create index request override any settings or mappings specified in a component - template. Component templates are only used during index creation. For data streams, - this includes data stream creation and the creation of a stream’s backing indices. - Changes to component templates do not affect existing indices, including a stream’s - backing indices. You can use C-style `/* *\\/` block comments in component templates. - You can include comments anywhere in the request body except before the opening - curly bracket. **Applying component templates** You cannot directly apply a component - template to a data stream or index. To be applied, a component template must - be included in an index template's `composed_of` list. + .. raw:: html + +

Create or update a component template. + Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.

+

An index template can be composed of multiple component templates. + To use a component template, specify it in an index template’s composed_of list. + Component templates are only applied to new data streams and indices as part of a matching index template.

+

Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.

+

Component templates are only used during index creation. + For data streams, this includes data stream creation and the creation of a stream’s backing indices. + Changes to component templates do not affect existing indices, including a stream’s backing indices.

+

You can use C-style /* *\\/ block comments in component templates. + You can include comments anywhere in the request body except before the opening curly bracket.

+

Applying component templates

+

You cannot directly apply a component template to a data stream or index. + To be applied, a component template must be included in an index template's composed_of list.

+ ``_ @@ -833,26 +848,23 @@ def put_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the cluster settings. Configure and update dynamic settings on a running - cluster. You can also configure dynamic settings locally on an unstarted or shut - down node in `elasticsearch.yml`. Updates made with this API can be persistent, - which apply across cluster restarts, or transient, which reset after a cluster - restart. You can also reset transient or persistent settings by assigning them - a null value. If you configure the same setting using multiple methods, Elasticsearch - applies the settings in following order of precedence: 1) Transient setting; - 2) Persistent setting; 3) `elasticsearch.yml` setting; 4) Default setting value. - For example, you can apply a transient setting to override a persistent setting - or `elasticsearch.yml` setting. However, a change to an `elasticsearch.yml` setting - will not override a defined transient or persistent setting. TIP: In Elastic - Cloud, use the user settings feature to configure all cluster settings. This - method automatically rejects unsafe settings that could break your cluster. If - you run Elasticsearch on your own hardware, use this API to configure dynamic - cluster settings. Only use `elasticsearch.yml` for static cluster settings and - node settings. The API doesn’t require a restart and ensures a setting’s value - is the same on all nodes. WARNING: Transient cluster settings are no longer recommended. - Use persistent cluster settings instead. If a cluster becomes unstable, transient - settings can clear unexpectedly, resulting in a potentially undesired cluster - configuration. + .. raw:: html + +

Update the cluster settings. + Configure and update dynamic settings on a running cluster. + You can also configure dynamic settings locally on an unstarted or shut down node in elasticsearch.yml.

+

Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart. + You can also reset transient or persistent settings by assigning them a null value.

+

If you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) elasticsearch.yml setting; 4) Default setting value. + For example, you can apply a transient setting to override a persistent setting or elasticsearch.yml setting. + However, a change to an elasticsearch.yml setting will not override a defined transient or persistent setting.

+

TIP: In Elastic Cloud, use the user settings feature to configure all cluster settings. This method automatically rejects unsafe settings that could break your cluster. + If you run Elasticsearch on your own hardware, use this API to configure dynamic cluster settings. + Only use elasticsearch.yml for static cluster settings and node settings. + The API doesn’t require a restart and ensures a setting’s value is the same on all nodes.

+

WARNING: Transient cluster settings are no longer recommended. Use persistent cluster settings instead. + If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration.

+ ``_ @@ -906,9 +918,12 @@ def remote_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get remote cluster information. Get all of the configured remote cluster information. - This API returns connection and endpoint information keyed by the configured - remote cluster alias. + .. raw:: html + +

Get remote cluster information. + Get all of the configured remote cluster information. + This API returns connection and endpoint information keyed by the configured remote cluster alias.

+ ``_ """ @@ -953,25 +968,19 @@ def reroute( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reroute the cluster. Manually change the allocation of individual shards in the - cluster. For example, a shard can be moved from one node to another explicitly, - an allocation can be canceled, and an unassigned shard can be explicitly allocated - to a specific node. It is important to note that after processing any reroute - commands Elasticsearch will perform rebalancing as normal (respecting the values - of settings such as `cluster.routing.rebalance.enable`) in order to remain in - a balanced state. For example, if the requested allocation includes moving a - shard from node1 to node2 then this may cause a shard to be moved from node2 - back to node1 to even things out. The cluster can be set to disable allocations - using the `cluster.routing.allocation.enable` setting. If allocations are disabled - then the only allocations that will be performed are explicit ones given using - the reroute command, and consequent allocations due to rebalancing. The cluster - will attempt to allocate a shard a maximum of `index.allocation.max_retries` - times in a row (defaults to `5`), before giving up and leaving the shard unallocated. - This scenario can be caused by structural problems such as having an analyzer - which refers to a stopwords file which doesn’t exist on all nodes. Once the problem - has been corrected, allocation can be manually retried by calling the reroute - API with the `?retry_failed` URI query parameter, which will attempt a single - retry round for these shards. + .. raw:: html + +

Reroute the cluster. + Manually change the allocation of individual shards in the cluster. + For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node.

+

It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as cluster.routing.rebalance.enable) in order to remain in a balanced state. + For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out.

+

The cluster can be set to disable allocations using the cluster.routing.allocation.enable setting. + If allocations are disabled then the only allocations that will be performed are explicit ones given using the reroute command, and consequent allocations due to rebalancing.

+

The cluster will attempt to allocate a shard a maximum of index.allocation.max_retries times in a row (defaults to 5), before giving up and leaving the shard unallocated. + This scenario can be caused by structural problems such as having an analyzer which refers to a stopwords file which doesn’t exist on all nodes.

+

Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the ?retry_failed URI query parameter, which will attempt a single retry round for these shards.

+ ``_ @@ -1060,26 +1069,23 @@ def state( wait_for_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the cluster state. Get comprehensive information about the state of the cluster. - The cluster state is an internal data structure which keeps track of a variety - of information needed by every node, including the identity and attributes of - the other nodes in the cluster; cluster-wide settings; index metadata, including - the mapping and settings for each index; the location and status of every shard - copy in the cluster. The elected master node ensures that every node in the cluster - has a copy of the same cluster state. This API lets you retrieve a representation - of this internal state for debugging or diagnostic purposes. You may need to - consult the Elasticsearch source code to determine the precise meaning of the - response. By default the API will route requests to the elected master node since - this node is the authoritative source of cluster states. You can also retrieve - the cluster state held on the node handling the API request by adding the `?local=true` - query parameter. Elasticsearch may need to expend significant effort to compute - a response to this API in larger clusters, and the response may comprise a very - large quantity of data. If you use this API repeatedly, your cluster may become - unstable. WARNING: The response is a representation of an internal data structure. - Its format is not subject to the same compatibility guarantees as other more - stable APIs and may change from version to version. Do not query this API using - external monitoring tools. Instead, obtain the information you require using - other more stable cluster APIs. + .. raw:: html + +

Get the cluster state. + Get comprehensive information about the state of the cluster.

+

The cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster.

+

The elected master node ensures that every node in the cluster has a copy of the same cluster state. + This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes. + You may need to consult the Elasticsearch source code to determine the precise meaning of the response.

+

By default the API will route requests to the elected master node since this node is the authoritative source of cluster states. + You can also retrieve the cluster state held on the node handling the API request by adding the ?local=true query parameter.

+

Elasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data. + If you use this API repeatedly, your cluster may become unstable.

+

WARNING: The response is a representation of an internal data structure. + Its format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version. + Do not query this API using external monitoring tools. + Instead, obtain the information you require using other more stable cluster APIs.

+ ``_ @@ -1163,9 +1169,11 @@ def stats( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get cluster statistics. Get basic index metrics (shard numbers, store size, memory - usage) and information about the current nodes that form the cluster (number, - roles, os, jvm versions, memory usage, cpu and installed plugins). + .. raw:: html + +

Get cluster statistics. + Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins).

+ ``_ diff --git a/elasticsearch/_sync/client/connector.py b/elasticsearch/_sync/client/connector.py index 7b334ab01..76c7d8735 100644 --- a/elasticsearch/_sync/client/connector.py +++ b/elasticsearch/_sync/client/connector.py @@ -43,8 +43,11 @@ def check_in( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Check in a connector. Update the `last_seen` field in the connector and set it - to the current timestamp. + .. raw:: html + +

Check in a connector.

+

Update the last_seen field in the connector and set it to the current timestamp.

+ ``_ @@ -86,10 +89,14 @@ def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a connector. Removes a connector and associated sync jobs. This is a destructive - action that is not recoverable. NOTE: This action doesn’t delete any API keys, - ingest pipelines, or data indices associated with the connector. These need to - be removed manually. + .. raw:: html + +

Delete a connector.

+

Removes a connector and associated sync jobs. + This is a destructive action that is not recoverable. + NOTE: This action doesn’t delete any API keys, ingest pipelines, or data indices associated with the connector. + These need to be removed manually.

+ ``_ @@ -134,7 +141,11 @@ def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a connector. Get the details about a connector. + .. raw:: html + +

Get a connector.

+

Get the details about a connector.

+ ``_ @@ -229,8 +240,12 @@ def last_sync( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector last sync stats. Update the fields related to the last sync - of a connector. This action is used for analytics and monitoring. + .. raw:: html + +

Update the connector last sync stats.

+

Update the fields related to the last sync of a connector. + This action is used for analytics and monitoring.

+ ``_ @@ -325,7 +340,11 @@ def list( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Get all connectors. Get information about all connectors. + .. raw:: html + +

Get all connectors.

+

Get information about all connectors.

+ ``_ @@ -400,11 +419,13 @@ def post( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a connector. Connectors are Elasticsearch integrations that bring content - from third-party data sources, which can be deployed on Elastic Cloud or hosted - on your own infrastructure. Elastic managed connectors (Native connectors) are - a managed service on Elastic Cloud. Self-managed connectors (Connector clients) - are self-managed on your infrastructure. + .. raw:: html + +

Create a connector.

+

Connectors are Elasticsearch integrations that bring content from third-party data sources, which can be deployed on Elastic Cloud or hosted on your own infrastructure. + Elastic managed connectors (Native connectors) are a managed service on Elastic Cloud. + Self-managed connectors (Connector clients) are self-managed on your infrastructure.

+ ``_ @@ -483,7 +504,10 @@ def put( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a connector. + .. raw:: html + +

Create or update a connector.

+ ``_ @@ -553,10 +577,12 @@ def sync_job_cancel( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Cancel a connector sync job. Cancel a connector sync job, which sets the status - to cancelling and updates `cancellation_requested_at` to the current time. The - connector service is then responsible for setting the status of connector sync - jobs to cancelled. + .. raw:: html + +

Cancel a connector sync job.

+

Cancel a connector sync job, which sets the status to cancelling and updates cancellation_requested_at to the current time. + The connector service is then responsible for setting the status of connector sync jobs to cancelled.

+ ``_ @@ -601,11 +627,13 @@ def sync_job_check_in( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Check in a connector sync job. Check in a connector sync job and set the `last_seen` - field to the current time before updating it in the internal index. To sync data - using self-managed connectors, you need to deploy the Elastic connector service - on your own infrastructure. This service runs automatically on Elastic Cloud - for Elastic managed connectors. + .. raw:: html + +

Check in a connector sync job. + Check in a connector sync job and set the last_seen field to the current time before updating it in the internal index.

+

To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. + This service runs automatically on Elastic Cloud for Elastic managed connectors.

+ ``_ @@ -656,14 +684,16 @@ def sync_job_claim( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Claim a connector sync job. This action updates the job status to `in_progress` - and sets the `last_seen` and `started_at` timestamps to the current time. Additionally, - it can set the `sync_cursor` property for the sync job. This API is not intended - for direct connector management by users. It supports the implementation of services - that utilize the connector protocol to communicate with Elasticsearch. To sync - data using self-managed connectors, you need to deploy the Elastic connector - service on your own infrastructure. This service runs automatically on Elastic - Cloud for Elastic managed connectors. + .. raw:: html + +

Claim a connector sync job. + This action updates the job status to in_progress and sets the last_seen and started_at timestamps to the current time. + Additionally, it can set the sync_cursor property for the sync job.

+

This API is not intended for direct connector management by users. + It supports the implementation of services that utilize the connector protocol to communicate with Elasticsearch.

+

To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. + This service runs automatically on Elastic Cloud for Elastic managed connectors.

+ ``_ @@ -720,8 +750,12 @@ def sync_job_delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a connector sync job. Remove a connector sync job and its associated data. - This is a destructive action that is not recoverable. + .. raw:: html + +

Delete a connector sync job.

+

Remove a connector sync job and its associated data. + This is a destructive action that is not recoverable.

+ ``_ @@ -769,10 +803,13 @@ def sync_job_error( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Set a connector sync job error. Set the `error` field for a connector sync job - and set its `status` to `error`. To sync data using self-managed connectors, - you need to deploy the Elastic connector service on your own infrastructure. - This service runs automatically on Elastic Cloud for Elastic managed connectors. + .. raw:: html + +

Set a connector sync job error. + Set the error field for a connector sync job and set its status to error.

+

To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. + This service runs automatically on Elastic Cloud for Elastic managed connectors.

+ ``_ @@ -823,7 +860,10 @@ def sync_job_get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a connector sync job. + .. raw:: html + +

Get a connector sync job.

+ ``_ @@ -892,8 +932,11 @@ def sync_job_list( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Get all connector sync jobs. Get information about all stored connector sync - jobs listed by their creation date in ascending order. + .. raw:: html + +

Get all connector sync jobs.

+

Get information about all stored connector sync jobs listed by their creation date in ascending order.

+ ``_ @@ -955,8 +998,11 @@ def sync_job_post( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a connector sync job. Create a connector sync job document in the internal - index and initialize its counters and timestamps with default values. + .. raw:: html + +

Create a connector sync job.

+

Create a connector sync job document in the internal index and initialize its counters and timestamps with default values.

+ ``_ @@ -1024,12 +1070,15 @@ def sync_job_update_stats( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Set the connector sync job stats. Stats include: `deleted_document_count`, `indexed_document_count`, - `indexed_document_volume`, and `total_document_count`. You can also update `last_seen`. - This API is mainly used by the connector service for updating sync job information. - To sync data using self-managed connectors, you need to deploy the Elastic connector - service on your own infrastructure. This service runs automatically on Elastic - Cloud for Elastic managed connectors. + .. raw:: html + +

Set the connector sync job stats. + Stats include: deleted_document_count, indexed_document_count, indexed_document_volume, and total_document_count. + You can also update last_seen. + This API is mainly used by the connector service for updating sync job information.

+

To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. + This service runs automatically on Elastic Cloud for Elastic managed connectors.

+ ``_ @@ -1108,8 +1157,11 @@ def update_active_filtering( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Activate the connector draft filter. Activates the valid draft filtering for - a connector. + .. raw:: html + +

Activate the connector draft filter.

+

Activates the valid draft filtering for a connector.

+ ``_ @@ -1155,11 +1207,14 @@ def update_api_key_id( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector API key ID. Update the `api_key_id` and `api_key_secret_id` - fields of a connector. You can specify the ID of the API key used for authorization - and the ID of the connector secret where the API key is stored. The connector - secret ID is required only for Elastic managed (native) connectors. Self-managed - connectors (connector clients) do not use this field. + .. raw:: html + +

Update the connector API key ID.

+

Update the api_key_id and api_key_secret_id fields of a connector. + You can specify the ID of the API key used for authorization and the ID of the connector secret where the API key is stored. + The connector secret ID is required only for Elastic managed (native) connectors. + Self-managed connectors (connector clients) do not use this field.

+ ``_ @@ -1214,8 +1269,11 @@ def update_configuration( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector configuration. Update the configuration field in the connector - document. + .. raw:: html + +

Update the connector configuration.

+

Update the configuration field in the connector document.

+ ``_ @@ -1269,10 +1327,13 @@ def update_error( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector error field. Set the error field for the connector. If the - error provided in the request body is non-null, the connector’s status is updated - to error. Otherwise, if the error is reset to null, the connector status is updated - to connected. + .. raw:: html + +

Update the connector error field.

+

Set the error field for the connector. + If the error provided in the request body is non-null, the connector’s status is updated to error. + Otherwise, if the error is reset to null, the connector status is updated to connected.

+ ``_ @@ -1325,14 +1386,22 @@ def update_features( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector features. Update the connector features in the connector - document. This API can be used to control the following aspects of a connector: - * document-level security * incremental syncs * advanced sync rules * basic sync - rules Normally, the running connector service automatically manages these features. - However, you can use this API to override the default behavior. To sync data - using self-managed connectors, you need to deploy the Elastic connector service - on your own infrastructure. This service runs automatically on Elastic Cloud - for Elastic managed connectors. + .. raw:: html + +

Update the connector features. + Update the connector features in the connector document. + This API can be used to control the following aspects of a connector:

+
    +
  • document-level security
  • +
  • incremental syncs
  • +
  • advanced sync rules
  • +
  • basic sync rules
  • +
+

Normally, the running connector service automatically manages these features. + However, you can use this API to override the default behavior.

+

To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. + This service runs automatically on Elastic Cloud for Elastic managed connectors.

+ ``_ @@ -1387,10 +1456,13 @@ def update_filtering( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector filtering. Update the draft filtering configuration of a - connector and marks the draft validation state as edited. The filtering draft - is activated once validated by the running Elastic connector service. The filtering - property is used to configure sync rules (both basic and advanced) for a connector. + .. raw:: html + +

Update the connector filtering.

+

Update the draft filtering configuration of a connector and marks the draft validation state as edited. + The filtering draft is activated once validated by the running Elastic connector service. + The filtering property is used to configure sync rules (both basic and advanced) for a connector.

+ ``_ @@ -1447,8 +1519,11 @@ def update_filtering_validation( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector draft filtering validation. Update the draft filtering validation - info for a connector. + .. raw:: html + +

Update the connector draft filtering validation.

+

Update the draft filtering validation info for a connector.

+ ``_ @@ -1501,8 +1576,11 @@ def update_index_name( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector index name. Update the `index_name` field of a connector, - specifying the index where the data ingested by the connector is stored. + .. raw:: html + +

Update the connector index name.

+

Update the index_name field of a connector, specifying the index where the data ingested by the connector is stored.

+ ``_ @@ -1556,7 +1634,10 @@ def update_name( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector name and description. + .. raw:: html + +

Update the connector name and description.

+ ``_ @@ -1610,7 +1691,10 @@ def update_native( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector is_native flag. + .. raw:: html + +

Update the connector is_native flag.

+ ``_ @@ -1663,8 +1747,11 @@ def update_pipeline( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector pipeline. When you create a new connector, the configuration - of an ingest pipeline is populated with default settings. + .. raw:: html + +

Update the connector pipeline.

+

When you create a new connector, the configuration of an ingest pipeline is populated with default settings.

+ ``_ @@ -1717,7 +1804,10 @@ def update_scheduling( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector scheduling. + .. raw:: html + +

Update the connector scheduling.

+ ``_ @@ -1770,7 +1860,10 @@ def update_service_type( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector service type. + .. raw:: html + +

Update the connector service type.

+ ``_ @@ -1830,7 +1923,10 @@ def update_status( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the connector status. + .. raw:: html + +

Update the connector status.

+ ``_ diff --git a/elasticsearch/_sync/client/dangling_indices.py b/elasticsearch/_sync/client/dangling_indices.py index 9e0ab3870..28f228ca6 100644 --- a/elasticsearch/_sync/client/dangling_indices.py +++ b/elasticsearch/_sync/client/dangling_indices.py @@ -39,10 +39,12 @@ def delete_dangling_index( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a dangling index. If Elasticsearch encounters index data that is absent - from the current cluster state, those indices are considered to be dangling. - For example, this can happen if you delete more than `cluster.indices.tombstones.size` - indices while an Elasticsearch node is offline. + .. raw:: html + +

Delete a dangling index. + If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. + For example, this can happen if you delete more than cluster.indices.tombstones.size indices while an Elasticsearch node is offline.

+ ``_ @@ -98,10 +100,12 @@ def import_dangling_index( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Import a dangling index. If Elasticsearch encounters index data that is absent - from the current cluster state, those indices are considered to be dangling. - For example, this can happen if you delete more than `cluster.indices.tombstones.size` - indices while an Elasticsearch node is offline. + .. raw:: html + +

Import a dangling index.

+

If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. + For example, this can happen if you delete more than cluster.indices.tombstones.size indices while an Elasticsearch node is offline.

+ ``_ @@ -156,11 +160,13 @@ def list_dangling_indices( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the dangling indices. If Elasticsearch encounters index data that is absent - from the current cluster state, those indices are considered to be dangling. - For example, this can happen if you delete more than `cluster.indices.tombstones.size` - indices while an Elasticsearch node is offline. Use this API to list dangling - indices, which you can then import or delete. + .. raw:: html + +

Get the dangling indices.

+

If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. + For example, this can happen if you delete more than cluster.indices.tombstones.size indices while an Elasticsearch node is offline.

+

Use this API to list dangling indices, which you can then import or delete.

+ ``_ """ diff --git a/elasticsearch/_sync/client/enrich.py b/elasticsearch/_sync/client/enrich.py index 8a9755d89..90fc153b8 100644 --- a/elasticsearch/_sync/client/enrich.py +++ b/elasticsearch/_sync/client/enrich.py @@ -37,7 +37,11 @@ def delete_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete an enrich policy. Deletes an existing enrich policy and its enrich index. + .. raw:: html + +

Delete an enrich policy. + Deletes an existing enrich policy and its enrich index.

+ ``_ @@ -82,7 +86,11 @@ def execute_policy( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Run an enrich policy. Create the enrich index for an existing enrich policy. + .. raw:: html + +

Run an enrich policy. + Create the enrich index for an existing enrich policy.

+ ``_ @@ -130,7 +138,11 @@ def get_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get an enrich policy. Returns information about an enrich policy. + .. raw:: html + +

Get an enrich policy. + Returns information about an enrich policy.

+ ``_ @@ -184,7 +196,11 @@ def put_policy( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create an enrich policy. Creates an enrich policy. + .. raw:: html + +

Create an enrich policy. + Creates an enrich policy.

+ ``_ @@ -241,8 +257,11 @@ def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get enrich stats. Returns enrich coordinator statistics and information about - enrich policies that are currently executing. + .. raw:: html + +

Get enrich stats. + Returns enrich coordinator statistics and information about enrich policies that are currently executing.

+ ``_ diff --git a/elasticsearch/_sync/client/eql.py b/elasticsearch/_sync/client/eql.py index 1d01501a6..558e9bad5 100644 --- a/elasticsearch/_sync/client/eql.py +++ b/elasticsearch/_sync/client/eql.py @@ -36,8 +36,12 @@ def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete an async EQL search. Delete an async EQL search or a stored synchronous - EQL search. The API also deletes results for the search. + .. raw:: html + +

Delete an async EQL search. + Delete an async EQL search or a stored synchronous EQL search. + The API also deletes results for the search.

+ ``_ @@ -83,8 +87,11 @@ def get( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Get async EQL search results. Get the current status and available results for - an async EQL search or a stored synchronous EQL search. + .. raw:: html + +

Get async EQL search results. + Get the current status and available results for an async EQL search or a stored synchronous EQL search.

+ ``_ @@ -134,8 +141,11 @@ def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the async EQL status. Get the current status for an async EQL search or a - stored synchronous EQL search without returning results. + .. raw:: html + +

Get the async EQL status. + Get the current status for an async EQL search or a stored synchronous EQL search without returning results.

+ ``_ @@ -229,9 +239,12 @@ def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get EQL search results. Returns search results for an Event Query Language (EQL) - query. EQL assumes each document in a data stream or index corresponds to an - event. + .. raw:: html + +

Get EQL search results. + Returns search results for an Event Query Language (EQL) query. + EQL assumes each document in a data stream or index corresponds to an event.

+ ``_ diff --git a/elasticsearch/_sync/client/esql.py b/elasticsearch/_sync/client/esql.py index 8a087c6ba..85e129c5f 100644 --- a/elasticsearch/_sync/client/esql.py +++ b/elasticsearch/_sync/client/esql.py @@ -73,10 +73,12 @@ def async_query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Run an async ES|QL query. Asynchronously run an ES|QL (Elasticsearch query language) - query, monitor its progress, and retrieve results when they become available. - The API accepts the same parameters and request body as the synchronous query - API, along with additional async related properties. + .. raw:: html + +

Run an async ES|QL query. + Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available.

+

The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties.

+ ``_ @@ -183,11 +185,17 @@ def async_query_delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete an async ES|QL query. If the query is still running, it is cancelled. - Otherwise, the stored results are deleted. If the Elasticsearch security features - are enabled, only the following users can use this API to delete a query: * The - authenticated user that submitted the original query request * Users with the - `cancel_task` cluster privilege + .. raw:: html + +

Delete an async ES|QL query. + If the query is still running, it is cancelled. + Otherwise, the stored results are deleted.

+

If the Elasticsearch security features are enabled, only the following users can use this API to delete a query:

+
    +
  • The authenticated user that submitted the original query request
  • +
  • Users with the cancel_task cluster privilege
  • +
+ ``_ @@ -235,10 +243,12 @@ def async_query_get( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Get async ES|QL query results. Get the current status and available results or - stored results for an ES|QL asynchronous query. If the Elasticsearch security - features are enabled, only the user who first submitted the ES|QL query can retrieve - the results using this API. + .. raw:: html + +

Get async ES|QL query results. + Get the current status and available results or stored results for an ES|QL asynchronous query. + If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API.

+ ``_ @@ -331,8 +341,11 @@ def query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Run an ES|QL query. Get search results for an ES|QL (Elasticsearch query language) - query. + .. raw:: html + +

Run an ES|QL query. + Get search results for an ES|QL (Elasticsearch query language) query.

+ ``_ diff --git a/elasticsearch/_sync/client/features.py b/elasticsearch/_sync/client/features.py index 6bc6c1c66..66f19522b 100644 --- a/elasticsearch/_sync/client/features.py +++ b/elasticsearch/_sync/client/features.py @@ -36,17 +36,17 @@ def get_features( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the features. Get a list of features that can be included in snapshots using - the `feature_states` field when creating a snapshot. You can use this API to - determine which feature states to include when taking a snapshot. By default, - all feature states are included in a snapshot if that snapshot includes the global - state, or none if it does not. A feature state includes one or more system indices - necessary for a given feature to function. In order to ensure data integrity, - all system indices that comprise a feature state are snapshotted and restored - together. The features listed by this API are a combination of built-in features - and features defined by plugins. In order for a feature state to be listed in - this API and recognized as a valid feature state by the create snapshot API, - the plugin that defines that feature must be installed on the master node. + .. raw:: html + +

Get the features. + Get a list of features that can be included in snapshots using the feature_states field when creating a snapshot. + You can use this API to determine which feature states to include when taking a snapshot. + By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not.

+

A feature state includes one or more system indices necessary for a given feature to function. + In order to ensure data integrity, all system indices that comprise a feature state are snapshotted and restored together.

+

The features listed by this API are a combination of built-in features and features defined by plugins. + In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node.

+ ``_ @@ -87,20 +87,20 @@ def reset_features( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Reset the features. Clear all of the state information stored in system indices - by Elasticsearch features, including the security and machine learning indices. - WARNING: Intended for development and testing use only. Do not reset features - on a production cluster. Return a cluster to the same state as a new installation - by resetting the feature state for all Elasticsearch features. This deletes all - state information stored in system indices. The response code is HTTP 200 if - the state is successfully reset for all features. It is HTTP 500 if the reset - operation failed for any feature. Note that select features might provide a way - to reset particular system indices. Using this API resets all features, both - those that are built-in and implemented as plugins. To list the features that - will be affected, use the get features API. IMPORTANT: The features installed - on the node you submit this request to are the features that will be reset. Run - on the master node if you have any doubts about which plugins are installed on - individual nodes. + .. raw:: html + +

Reset the features. + Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices.

+

WARNING: Intended for development and testing use only. Do not reset features on a production cluster.

+

Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. + This deletes all state information stored in system indices.

+

The response code is HTTP 200 if the state is successfully reset for all features. + It is HTTP 500 if the reset operation failed for any feature.

+

Note that select features might provide a way to reset particular system indices. + Using this API resets all features, both those that are built-in and implemented as plugins.

+

To list the features that will be affected, use the get features API.

+

IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes.

+ ``_ diff --git a/elasticsearch/_sync/client/fleet.py b/elasticsearch/_sync/client/fleet.py index 39d30f376..f7bce669d 100644 --- a/elasticsearch/_sync/client/fleet.py +++ b/elasticsearch/_sync/client/fleet.py @@ -46,8 +46,10 @@ def global_checkpoints( wait_for_index: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Returns the current global checkpoints for an index. This API is design for internal - use by the fleet server project. + .. raw:: html + +

Returns the current global checkpoints for an index. This API is design for internal use by the fleet server project.

+ ``_ @@ -132,10 +134,12 @@ def msearch( wait_for_checkpoints: t.Optional[t.Sequence[int]] = None, ) -> ObjectApiResponse[t.Any]: """ - Executes several [fleet searches](https://www.elastic.co/guide/en/elasticsearch/reference/current/fleet-search.html) - with a single API request. The API follows the same structure as the [multi search](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html) - API. However, similar to the fleet search API, it supports the wait_for_checkpoints - parameter. + .. raw:: html + +

Executes several fleet searches with a single API request. + The API follows the same structure as the multi search API. However, similar to the fleet search API, it + supports the wait_for_checkpoints parameter.

+ :param searches: :param index: A single target to search. If the target is an index alias, it @@ -378,9 +382,11 @@ def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - The purpose of the fleet search api is to provide a search api where the search - will only be executed after provided checkpoint has been processed and is visible - for searches inside of Elasticsearch. + .. raw:: html + +

The purpose of the fleet search api is to provide a search api where the search will only be executed + after provided checkpoint has been processed and is visible for searches inside of Elasticsearch.

+ :param index: A single target to search. If the target is an index alias, it must resolve to a single index. diff --git a/elasticsearch/_sync/client/graph.py b/elasticsearch/_sync/client/graph.py index b8253cfc1..127b6172c 100644 --- a/elasticsearch/_sync/client/graph.py +++ b/elasticsearch/_sync/client/graph.py @@ -45,14 +45,15 @@ def explore( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Explore graph analytics. Extract and summarize information about the documents - and terms in an Elasticsearch data stream or index. The easiest way to understand - the behavior of this API is to use the Graph UI to explore connections. An initial - request to the `_explore` API contains a seed query that identifies the documents - of interest and specifies the fields that define the vertices and connections - you want to include in the graph. Subsequent requests enable you to spider out - from one more vertices of interest. You can exclude vertices that have already - been returned. + .. raw:: html + +

Explore graph analytics. + Extract and summarize information about the documents and terms in an Elasticsearch data stream or index. + The easiest way to understand the behavior of this API is to use the Graph UI to explore connections. + An initial request to the _explore API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph. + Subsequent requests enable you to spider out from one more vertices of interest. + You can exclude vertices that have already been returned.

+ ``_ diff --git a/elasticsearch/_sync/client/ilm.py b/elasticsearch/_sync/client/ilm.py index 2b133ea2c..f42c24b26 100644 --- a/elasticsearch/_sync/client/ilm.py +++ b/elasticsearch/_sync/client/ilm.py @@ -38,9 +38,11 @@ def delete_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a lifecycle policy. You cannot delete policies that are currently in use. - If the policy is being used to manage any indices, the request fails and returns - an error. + .. raw:: html + +

Delete a lifecycle policy. + You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error.

+ ``_ @@ -92,11 +94,13 @@ def explain_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Explain the lifecycle state. Get the current lifecycle status for one or more - indices. For data streams, the API retrieves the current lifecycle status for - the stream's backing indices. The response indicates when the index entered each - lifecycle state, provides the definition of the running phase, and information - about any failures. + .. raw:: html + +

Explain the lifecycle state. + Get the current lifecycle status for one or more indices. + For data streams, the API retrieves the current lifecycle status for the stream's backing indices.

+

The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures.

+ ``_ @@ -154,7 +158,10 @@ def get_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get lifecycle policies. + .. raw:: html + +

Get lifecycle policies.

+ ``_ @@ -205,7 +212,11 @@ def get_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the ILM status. Get the current index lifecycle management status. + .. raw:: html + +

Get the ILM status. + Get the current index lifecycle management status.

+ ``_ """ @@ -246,18 +257,22 @@ def migrate_to_data_tiers( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, - composable, and component templates from using custom node attributes and attribute-based - allocation filters to using data tiers. Optionally, delete one legacy index template. - Using node roles enables ILM to automatically move the indices between data tiers. - Migrating away from custom node attributes routing can be manually performed. - This API provides an automated way of performing three out of the four manual - steps listed in the migration guide: 1. Stop setting the custom hot attribute - on new indices. 1. Remove custom allocation settings from existing ILM policies. - 1. Replace custom allocation settings from existing indices with the corresponding - tier preference. ILM must be stopped before performing the migration. Use the - stop ILM and get ILM status APIs to wait until the reported operation mode is - `STOPPED`. + .. raw:: html + +

Migrate to data tiers routing. + Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. + Optionally, delete one legacy index template. + Using node roles enables ILM to automatically move the indices between data tiers.

+

Migrating away from custom node attributes routing can be manually performed. + This API provides an automated way of performing three out of the four manual steps listed in the migration guide:

+
    +
  1. Stop setting the custom hot attribute on new indices.
  2. +
  3. Remove custom allocation settings from existing ILM policies.
  4. +
  5. Replace custom allocation settings from existing indices with the corresponding tier preference.
  6. +
+

ILM must be stopped before performing the migration. + Use the stop ILM and get ILM status APIs to wait until the reported operation mode is STOPPED.

+ ``_ @@ -317,21 +332,20 @@ def move_to_step( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Move to a lifecycle step. Manually move an index into a specific step in the - lifecycle policy and run that step. WARNING: This operation can result in the - loss of data. Manually moving an index into a specific step runs that step even - if it has already been performed. This is a potentially destructive action and - this should be considered an expert level API. You must specify both the current - step and the step to be executed in the body of the request. The request will - fail if the current step does not match the step currently running for the index - This is to prevent the index from being moved from an unexpected step into the - next step. When specifying the target (`next_step`) to which the index will be - moved, either the name or both the action and name fields are optional. If only - the phase is specified, the index will move to the first step of the first action - in the target phase. If the phase and action are specified, the index will move - to the first step of the specified action in the specified phase. Only actions - specified in the ILM policy are considered valid. An index cannot move to a step - that is not part of its policy. + .. raw:: html + +

Move to a lifecycle step. + Manually move an index into a specific step in the lifecycle policy and run that step.

+

WARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API.

+

You must specify both the current step and the step to be executed in the body of the request. + The request will fail if the current step does not match the step currently running for the index + This is to prevent the index from being moved from an unexpected step into the next step.

+

When specifying the target (next_step) to which the index will be moved, either the name or both the action and name fields are optional. + If only the phase is specified, the index will move to the first step of the first action in the target phase. + If the phase and action are specified, the index will move to the first step of the specified action in the specified phase. + Only actions specified in the ILM policy are considered valid. + An index cannot move to a step that is not part of its policy.

+ ``_ @@ -394,9 +408,12 @@ def put_lifecycle( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a lifecycle policy. If the specified policy exists, it is replaced - and the policy version is incremented. NOTE: Only the latest version of the policy - is stored, you cannot revert to previous versions. + .. raw:: html + +

Create or update a lifecycle policy. + If the specified policy exists, it is replaced and the policy version is incremented.

+

NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions.

+ ``_ @@ -455,8 +472,12 @@ def remove_policy( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Remove policies from an index. Remove the assigned lifecycle policies from an - index or a data stream's backing indices. It also stops managing the indices. + .. raw:: html + +

Remove policies from an index. + Remove the assigned lifecycle policies from an index or a data stream's backing indices. + It also stops managing the indices.

+ ``_ @@ -496,10 +517,13 @@ def retry( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Retry a policy. Retry running the lifecycle policy for an index that is in the - ERROR step. The API sets the policy back to the step where the error occurred - and runs the step. Use the explain lifecycle state API to determine whether an - index is in the ERROR step. + .. raw:: html + +

Retry a policy. + Retry running the lifecycle policy for an index that is in the ERROR step. + The API sets the policy back to the step where the error occurred and runs the step. + Use the explain lifecycle state API to determine whether an index is in the ERROR step.

+ ``_ @@ -541,9 +565,13 @@ def start( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Start the ILM plugin. Start the index lifecycle management plugin if it is currently - stopped. ILM is started automatically when the cluster is formed. Restarting - ILM is necessary only when it has been stopped using the stop ILM API. + .. raw:: html + +

Start the ILM plugin. + Start the index lifecycle management plugin if it is currently stopped. + ILM is started automatically when the cluster is formed. + Restarting ILM is necessary only when it has been stopped using the stop ILM API.

+ ``_ @@ -590,12 +618,14 @@ def stop( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Stop the ILM plugin. Halt all lifecycle management operations and stop the index - lifecycle management plugin. This is useful when you are performing maintenance - on the cluster and need to prevent ILM from performing any actions on your indices. - The API returns as soon as the stop request has been acknowledged, but the plugin - might continue to run until in-progress operations complete and the plugin can - be safely stopped. Use the get ILM status API to check whether ILM is running. + .. raw:: html + +

Stop the ILM plugin. + Halt all lifecycle management operations and stop the index lifecycle management plugin. + This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices.

+

The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. + Use the get ILM status API to check whether ILM is running.

+ ``_ diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index a700cb9f2..39e513ea1 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -55,8 +55,11 @@ def add_block( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Add an index block. Limits the operations allowed on an index by blocking specific - operation types. + .. raw:: html + +

Add an index block. + Limits the operations allowed on an index by blocking specific operation types.

+ ``_ @@ -143,12 +146,15 @@ def analyze( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get tokens from text analysis. The analyze API performs analysis on a text string - and returns the resulting tokens. Generating excessive amount of tokens may cause - a node to run out of memory. The `index.analyze.max_token_count` setting enables - you to limit the number of tokens that can be produced. If more than this limit - of tokens gets generated, an error occurs. The `_analyze` endpoint without a - specified index will always use `10000` as its limit. + .. raw:: html + +

Get tokens from text analysis. + The analyze API performs analysis on a text string and returns the resulting tokens.

+

Generating excessive amount of tokens may cause a node to run out of memory. + The index.analyze.max_token_count setting enables you to limit the number of tokens that can be produced. + If more than this limit of tokens gets generated, an error occurs. + The _analyze endpoint without a specified index will always use 10000 as its limit.

+ ``_ @@ -249,11 +255,15 @@ def clear_cache( request: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear the cache. Clear the cache of one or more indices. For data streams, the - API clears the caches of the stream's backing indices. By default, the clear - cache API clears all caches. To clear only specific caches, use the `fielddata`, - `query`, or `request` parameters. To clear the cache only of specific fields, - use the `fields` parameter. + .. raw:: html + +

Clear the cache. + Clear the cache of one or more indices. + For data streams, the API clears the caches of the stream's backing indices.

+

By default, the clear cache API clears all caches. + To clear only specific caches, use the fielddata, query, or request parameters. + To clear the cache only of specific fields, use the fields parameter.

+ ``_ @@ -338,44 +348,44 @@ def clone( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clone an index. Clone an existing index into a new index. Each original primary - shard is cloned into a new primary shard in the new index. IMPORTANT: Elasticsearch - does not apply index templates to the resulting index. The API also does not - copy index metadata from the original index. Index metadata includes aliases, - index lifecycle management phase definitions, and cross-cluster replication (CCR) - follower information. For example, if you clone a CCR follower index, the resulting - clone will not be a follower index. The clone API copies most index settings - from the source index to the resulting index, with the exception of `index.number_of_replicas` - and `index.auto_expand_replicas`. To set the number of replicas in the resulting - index, configure these settings in the clone request. Cloning works as follows: - * First, it creates a new target index with the same definition as the source - index. * Then it hard-links segments from the source index into the target index. - If the file system does not support hard-linking, all segments are copied into - the new index, which is a much more time consuming process. * Finally, it recovers - the target index as though it were a closed index which had just been re-opened. - IMPORTANT: Indices can only be cloned if they meet the following requirements: - * The index must be marked as read-only and have a cluster health status of green. - * The target index must not exist. * The source index must have the same number - of primary shards as the target index. * The node handling the clone process - must have sufficient free disk space to accommodate a second copy of the existing - index. The current write index on a data stream cannot be cloned. In order to - clone the current write index, the data stream must first be rolled over so that - a new write index is created and then the previous write index can be cloned. - NOTE: Mappings cannot be specified in the `_clone` request. The mappings of the - source index will be used for the target index. **Monitor the cloning process** - The cloning process can be monitored with the cat recovery API or the cluster - health API can be used to wait until all primary shards have been allocated by - setting the `wait_for_status` parameter to `yellow`. The `_clone` API returns - as soon as the target index has been added to the cluster state, before any shards - have been allocated. At this point, all shards are in the state unassigned. If, - for any reason, the target index can't be allocated, its primary shard will remain - unassigned until it can be allocated on that node. Once the primary shard is - allocated, it moves to state initializing, and the clone process begins. When - the clone operation completes, the shard will become active. At that point, Elasticsearch - will try to allocate any replicas and may decide to relocate the primary shard - to another node. **Wait for active shards** Because the clone operation creates - a new index to clone the shards to, the wait for active shards setting on index - creation applies to the clone index action as well. + .. raw:: html + +

Clone an index. + Clone an existing index into a new index. + Each original primary shard is cloned into a new primary shard in the new index.

+

IMPORTANT: Elasticsearch does not apply index templates to the resulting index. + The API also does not copy index metadata from the original index. + Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. + For example, if you clone a CCR follower index, the resulting clone will not be a follower index.

+

The clone API copies most index settings from the source index to the resulting index, with the exception of index.number_of_replicas and index.auto_expand_replicas. + To set the number of replicas in the resulting index, configure these settings in the clone request.

+

Cloning works as follows:

+
    +
  • First, it creates a new target index with the same definition as the source index.
  • +
  • Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process.
  • +
  • Finally, it recovers the target index as though it were a closed index which had just been re-opened.
  • +
+

IMPORTANT: Indices can only be cloned if they meet the following requirements:

+
    +
  • The index must be marked as read-only and have a cluster health status of green.
  • +
  • The target index must not exist.
  • +
  • The source index must have the same number of primary shards as the target index.
  • +
  • The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index.
  • +
+

The current write index on a data stream cannot be cloned. + In order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned.

+

NOTE: Mappings cannot be specified in the _clone request. The mappings of the source index will be used for the target index.

+

Monitor the cloning process

+

The cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the wait_for_status parameter to yellow.

+

The _clone API returns as soon as the target index has been added to the cluster state, before any shards have been allocated. + At this point, all shards are in the state unassigned. + If, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node.

+

Once the primary shard is allocated, it moves to state initializing, and the clone process begins. + When the clone operation completes, the shard will become active. + At that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node.

+

Wait for active shards

+

Because the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well.

+ ``_ @@ -463,24 +473,23 @@ def close( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Close an index. A closed index is blocked for read or write operations and does - not allow all operations that opened indices allow. It is not possible to index - documents or to search for documents in a closed index. Closed indices do not - have to maintain internal data structures for indexing or searching documents, - which results in a smaller overhead on the cluster. When opening or closing an - index, the master node is responsible for restarting the index shards to reflect - the new state of the index. The shards will then go through the normal recovery - process. The data of opened and closed indices is automatically replicated by - the cluster to ensure that enough shard copies are safely kept around at all - times. You can open and close multiple indices. An error is thrown if the request - explicitly refers to a missing index. This behaviour can be turned off using - the `ignore_unavailable=true` parameter. By default, you must explicitly name - the indices you are opening or closing. To open or close indices with `_all`, - `*`, or other wildcard expressions, change the` action.destructive_requires_name` - setting to `false`. This setting can also be changed with the cluster update - settings API. Closed indices consume a significant amount of disk-space which - can cause problems in managed environments. Closing indices can be turned off - with the cluster settings API by setting `cluster.indices.close.enable` to `false`. + .. raw:: html + +

Close an index. + A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. + It is not possible to index documents or to search for documents in a closed index. + Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster.

+

When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. + The shards will then go through the normal recovery process. + The data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times.

+

You can open and close multiple indices. + An error is thrown if the request explicitly refers to a missing index. + This behaviour can be turned off using the ignore_unavailable=true parameter.

+

By default, you must explicitly name the indices you are opening or closing. + To open or close indices with _all, *, or other wildcard expressions, change the action.destructive_requires_name setting to false. This setting can also be changed with the cluster update settings API.

+

Closed indices consume a significant amount of disk-space which can cause problems in managed environments. + Closing indices can be turned off with the cluster settings API by setting cluster.indices.close.enable to false.

+ ``_ @@ -561,26 +570,27 @@ def create( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create an index. You can use the create index API to add a new index to an Elasticsearch - cluster. When creating an index, you can specify the following: * Settings for - the index. * Mappings for fields in the index. * Index aliases **Wait for active - shards** By default, index creation will only return a response to the client - when the primary copies of each shard have been started, or the request times - out. The index creation response will indicate what happened. For example, `acknowledged` - indicates whether the index was successfully created in the cluster, `while shards_acknowledged` - indicates whether the requisite number of shard copies were started for each - shard in the index before timing out. Note that it is still possible for either - `acknowledged` or `shards_acknowledged` to be `false`, but for the index creation - to be successful. These values simply indicate whether the operation completed - before the timeout. If `acknowledged` is false, the request timed out before - the cluster state was updated with the newly created index, but it probably will - be created sometime soon. If `shards_acknowledged` is false, then the request - timed out before the requisite number of shards were started (by default just - the primaries), even if the cluster state was successfully updated to reflect - the newly created index (that is to say, `acknowledged` is `true`). You can change - the default of only waiting for the primary shards to start through the index - setting `index.write.wait_for_active_shards`. Note that changing this setting - will also affect the `wait_for_active_shards` value on all subsequent write operations. + .. raw:: html + +

Create an index. + You can use the create index API to add a new index to an Elasticsearch cluster. + When creating an index, you can specify the following:

+
    +
  • Settings for the index.
  • +
  • Mappings for fields in the index.
  • +
  • Index aliases
  • +
+

Wait for active shards

+

By default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out. + The index creation response will indicate what happened. + For example, acknowledged indicates whether the index was successfully created in the cluster, while shards_acknowledged indicates whether the requisite number of shard copies were started for each shard in the index before timing out. + Note that it is still possible for either acknowledged or shards_acknowledged to be false, but for the index creation to be successful. + These values simply indicate whether the operation completed before the timeout. + If acknowledged is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon. + If shards_acknowledged is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, acknowledged is true).

+

You can change the default of only waiting for the primary shards to start through the index setting index.write.wait_for_active_shards. + Note that changing this setting will also affect the wait_for_active_shards value on all subsequent write operations.

+ ``_ @@ -653,8 +663,12 @@ def create_data_stream( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a data stream. Creates a data stream. You must have a matching index template - with data stream enabled. + .. raw:: html + +

Create a data stream. + Creates a data stream. + You must have a matching index template with data stream enabled.

+ ``_ @@ -715,7 +729,11 @@ def data_streams_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get data stream stats. Retrieves statistics for one or more data streams. + .. raw:: html + +

Get data stream stats. + Retrieves statistics for one or more data streams.

+ ``_ @@ -776,11 +794,15 @@ def delete( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete indices. Deleting an index deletes its documents, shards, and metadata. - It does not delete related Kibana components, such as data views, visualizations, - or dashboards. You cannot delete the current write index of a data stream. To - delete the index, you must roll over the data stream so a new write index is - created. You can then use the delete index API to delete the previous write index. + .. raw:: html + +

Delete indices. + Deleting an index deletes its documents, shards, and metadata. + It does not delete related Kibana components, such as data views, visualizations, or dashboards.

+

You cannot delete the current write index of a data stream. + To delete the index, you must roll over the data stream so a new write index is created. + You can then use the delete index API to delete the previous write index.

+ ``_ @@ -850,7 +872,11 @@ def delete_alias( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete an alias. Removes a data stream or index from an alias. + .. raw:: html + +

Delete an alias. + Removes a data stream or index from an alias.

+ ``_ @@ -914,8 +940,11 @@ def delete_data_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete data stream lifecycles. Removes the data stream lifecycle from a data - stream, rendering it not managed by the data stream lifecycle. + .. raw:: html + +

Delete data stream lifecycles. + Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle.

+ ``_ @@ -975,7 +1004,11 @@ def delete_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete data streams. Deletes one or more data streams and their backing indices. + .. raw:: html + +

Delete data streams. + Deletes one or more data streams and their backing indices.

+ ``_ @@ -1027,10 +1060,13 @@ def delete_index_template( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete an index template. The provided may contain multiple - template names separated by a comma. If multiple template names are specified - then there is no wildcard support and the provided names should match completely - with existing templates. + .. raw:: html + +

Delete an index template. + The provided may contain multiple template names separated by a comma. If multiple template + names are specified then there is no wildcard support and the provided names should match completely with + existing templates.

+ ``_ @@ -1082,7 +1118,10 @@ def delete_template( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a legacy index template. + .. raw:: html + +

Delete a legacy index template.

+ ``_ @@ -1145,16 +1184,16 @@ def disk_usage( run_expensive_tasks: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Analyze the index disk usage. Analyze the disk usage of each field of an index - or data stream. This API might not support indices created in previous Elasticsearch - versions. The result of a small index can be inaccurate as some parts of an index - might not be analyzed by the API. NOTE: The total size of fields of the analyzed - shards of the index in the response is usually smaller than the index `store_size` - value because some small metadata files are ignored and some parts of data files - might not be scanned by the API. Since stored fields are stored together in a - compressed format, the sizes of stored fields are also estimates and can be inaccurate. - The stored size of the `_id` field is likely underestimated while the `_source` - field is overestimated. + .. raw:: html + +

Analyze the index disk usage. + Analyze the disk usage of each field of an index or data stream. + This API might not support indices created in previous Elasticsearch versions. + The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API.

+

NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index store_size value because some small metadata files are ignored and some parts of data files might not be scanned by the API. + Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. + The stored size of the _id field is likely underestimated while the _source field is overestimated.

+ ``_ @@ -1228,14 +1267,16 @@ def downsample( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Downsample an index. Aggregate a time series (TSDS) index and store pre-computed - statistical summaries (`min`, `max`, `sum`, `value_count` and `avg`) for each - metric field grouped by a configured time interval. For example, a TSDS index - that contains metrics sampled every 10 seconds can be downsampled to an hourly - index. All documents within an hour interval are summarized and stored as a single - document in the downsample index. NOTE: Only indices in a time series data stream - are supported. Neither field nor document level security can be defined on the - source index. The source index must be read only (`index.blocks.write: true`). + .. raw:: html + +

Downsample an index. + Aggregate a time series (TSDS) index and store pre-computed statistical summaries (min, max, sum, value_count and avg) for each metric field grouped by a configured time interval. + For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. + All documents within an hour interval are summarized and stored as a single document in the downsample index.

+

NOTE: Only indices in a time series data stream are supported. + Neither field nor document level security can be defined on the source index. + The source index must be read only (index.blocks.write: true).

+ ``_ @@ -1303,7 +1344,11 @@ def exists( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Check indices. Check if one or more indices, index aliases, or data streams exist. + .. raw:: html + +

Check indices. + Check if one or more indices, index aliases, or data streams exist.

+ ``_ @@ -1381,7 +1426,11 @@ def exists_alias( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Check aliases. Checks if one or more data stream or index aliases exist. + .. raw:: html + +

Check aliases. + Checks if one or more data stream or index aliases exist.

+ ``_ @@ -1451,7 +1500,11 @@ def exists_index_template( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Check index templates. Check whether index templates exist. + .. raw:: html + +

Check index templates. + Check whether index templates exist.

+ ``_ @@ -1500,11 +1553,13 @@ def exists_template( pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ - Check existence of index templates. Get information about whether index templates - exist. Index templates define settings, mappings, and aliases that can be applied - automatically to new indices. IMPORTANT: This documentation is about legacy index - templates, which are deprecated and will be replaced by the composable templates - introduced in Elasticsearch 7.8. + .. raw:: html + +

Check existence of index templates. + Get information about whether index templates exist. + Index templates define settings, mappings, and aliases that can be applied automatically to new indices.

+

IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

+ ``_ @@ -1558,10 +1613,11 @@ def explain_data_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the status for a data stream lifecycle. Get information about an index or - data stream's current data stream lifecycle status, such as time since index - creation, time since rollover, the lifecycle configuration managing the index, - or any errors encountered during lifecycle execution. + .. raw:: html + +

Get the status for a data stream lifecycle. + Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution.

+ ``_ @@ -1623,13 +1679,15 @@ def field_usage_stats( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Get field usage stats. Get field usage information for each shard and field of - an index. Field usage statistics are automatically captured when queries are - running on a cluster. A shard-level search request that accesses a given field, - even if multiple times during that request, is counted as a single use. The response - body reports the per-shard usage count of the data structures that back the fields - in the index. A given request will increment each count by a maximum value of - 1, even if the request accesses the same field multiple times. + .. raw:: html + +

Get field usage stats. + Get field usage information for each shard and field of an index. + Field usage statistics are automatically captured when queries are running on a cluster. + A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use.

+

The response body reports the per-shard usage count of the data structures that back the fields in the index. + A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times.

+ ``_ @@ -1708,22 +1766,18 @@ def flush( wait_if_ongoing: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Flush data streams or indices. Flushing a data stream or index is the process - of making sure that any data that is currently only stored in the transaction - log is also permanently stored in the Lucene index. When restarting, Elasticsearch - replays any unflushed operations from the transaction log into the Lucene index - to bring it back into the state that it was in before the restart. Elasticsearch - automatically triggers flushes as needed, using heuristics that trade off the - size of the unflushed transaction log against the cost of performing each flush. - After each operation has been flushed it is permanently stored in the Lucene - index. This may mean that there is no need to maintain an additional copy of - it in the transaction log. The transaction log is made up of multiple files, - called generations, and Elasticsearch will delete any generation files when they - are no longer needed, freeing up disk space. It is also possible to trigger a - flush on one or more indices using the flush API, although it is rare for users - to need to call this API directly. If you call the flush API after indexing some - documents then a successful response indicates that Elasticsearch has flushed - all the documents that were indexed before the flush API was called. + .. raw:: html + +

Flush data streams or indices. + Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. + When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. + Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush.

+

After each operation has been flushed it is permanently stored in the Lucene index. + This may mean that there is no need to maintain an additional copy of it in the transaction log. + The transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space.

+

It is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly. + If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called.

+ ``_ @@ -1806,49 +1860,49 @@ def forcemerge( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Force a merge. Perform the force merge operation on the shards of one or more - indices. For data streams, the API forces a merge on the shards of the stream's - backing indices. Merging reduces the number of segments in each shard by merging - some of them together and also frees up the space used by deleted documents. - Merging normally happens automatically, but sometimes it is useful to trigger - a merge manually. WARNING: We recommend force merging only a read-only index - (meaning the index is no longer receiving writes). When documents are updated - or deleted, the old version is not immediately removed but instead soft-deleted - and marked with a "tombstone". These soft-deleted documents are automatically - cleaned up during regular segment merges. But force merge can cause very large - (greater than 5 GB) segments to be produced, which are not eligible for regular - merges. So the number of soft-deleted documents can then grow rapidly, resulting - in higher disk usage and worse search performance. If you regularly force merge - an index receiving writes, this can also make snapshots more expensive, since - the new documents can't be backed up incrementally. **Blocks during a force merge** - Calls to this API block until the merge is complete (unless request contains - `wait_for_completion=false`). If the client connection is lost before completion - then the force merge process will continue in the background. Any new requests - to force merge the same indices will also block until the ongoing force merge - is complete. **Running force merge asynchronously** If the request contains `wait_for_completion=false`, - Elasticsearch performs some preflight checks, launches the request, and returns - a task you can use to get the status of the task. However, you can not cancel - this task as the force merge task is not cancelable. Elasticsearch creates a - record of this task as a document at `_tasks/`. When you are done with - a task, you should delete the task document so Elasticsearch can reclaim the - space. **Force merging multiple indices** You can force merge multiple indices - with a single request by targeting: * One or more data streams that contain multiple - backing indices * Multiple indices * One or more aliases * All data streams and - indices in a cluster Each targeted shard is force-merged separately using the - force_merge threadpool. By default each node only has a single `force_merge` - thread which means that the shards on that node are force-merged one at a time. - If you expand the `force_merge` threadpool on a node then it will force merge - its shards in parallel Force merge makes the storage for the shard being merged - temporarily increase, as it may require free space up to triple its size in case - `max_num_segments parameter` is set to `1`, to rewrite all segments into a new - one. **Data streams and time-based indices** Force-merging is useful for managing - a data stream's older backing indices and other time-based indices, particularly - after a rollover. In these cases, each index only receives indexing traffic for - a certain period of time. Once an index receive no more writes, its shards can - be force-merged to a single segment. This can be a good idea because single-segment - shards can sometimes use simpler and more efficient data structures to perform - searches. For example: ``` POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1 - ``` + .. raw:: html + +

Force a merge. + Perform the force merge operation on the shards of one or more indices. + For data streams, the API forces a merge on the shards of the stream's backing indices.

+

Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. + Merging normally happens automatically, but sometimes it is useful to trigger a merge manually.

+

WARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes). + When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". + These soft-deleted documents are automatically cleaned up during regular segment merges. + But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. + So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. + If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally.

+

Blocks during a force merge

+

Calls to this API block until the merge is complete (unless request contains wait_for_completion=false). + If the client connection is lost before completion then the force merge process will continue in the background. + Any new requests to force merge the same indices will also block until the ongoing force merge is complete.

+

Running force merge asynchronously

+

If the request contains wait_for_completion=false, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task. + However, you can not cancel this task as the force merge task is not cancelable. + Elasticsearch creates a record of this task as a document at _tasks/<task_id>. + When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space.

+

Force merging multiple indices

+

You can force merge multiple indices with a single request by targeting:

+
    +
  • One or more data streams that contain multiple backing indices
  • +
  • Multiple indices
  • +
  • One or more aliases
  • +
  • All data streams and indices in a cluster
  • +
+

Each targeted shard is force-merged separately using the force_merge threadpool. + By default each node only has a single force_merge thread which means that the shards on that node are force-merged one at a time. + If you expand the force_merge threadpool on a node then it will force merge its shards in parallel

+

Force merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case max_num_segments parameter is set to 1, to rewrite all segments into a new one.

+

Data streams and time-based indices

+

Force-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover. + In these cases, each index only receives indexing traffic for a certain period of time. + Once an index receive no more writes, its shards can be force-merged to a single segment. + This can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches. + For example:

+
POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1
+          
+ ``_ @@ -1941,8 +1995,12 @@ def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index information. Get information about one or more indices. For data streams, - the API returns information about the stream’s backing indices. + .. raw:: html + +

Get index information. + Get information about one or more indices. For data streams, the API returns information about the + stream’s backing indices.

+ ``_ @@ -2031,7 +2089,11 @@ def get_alias( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get aliases. Retrieves information for one or more data stream or index aliases. + .. raw:: html + +

Get aliases. + Retrieves information for one or more data stream or index aliases.

+ ``_ @@ -2113,8 +2175,11 @@ def get_data_lifecycle( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get data stream lifecycles. Retrieves the data stream lifecycle configuration - of one or more data streams. + .. raw:: html + +

Get data stream lifecycles. + Retrieves the data stream lifecycle configuration of one or more data streams.

+ ``_ @@ -2168,8 +2233,11 @@ def get_data_lifecycle_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get data stream lifecycle stats. Get statistics about the data streams that are - managed by a data stream lifecycle. + .. raw:: html + +

Get data stream lifecycle stats. + Get statistics about the data streams that are managed by a data stream lifecycle.

+ ``_ """ @@ -2216,7 +2284,11 @@ def get_data_stream( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get data streams. Retrieves information about one or more data streams. + .. raw:: html + +

Get data streams. + Retrieves information about one or more data streams.

+ ``_ @@ -2291,10 +2363,13 @@ def get_field_mapping( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get mapping definitions. Retrieves mapping definitions for one or more fields. - For data streams, the API retrieves field mappings for the stream’s backing indices. - This API is useful if you don't need a complete mapping or if an index mapping - contains a large number of fields. + .. raw:: html + +

Get mapping definitions. + Retrieves mapping definitions for one or more fields. + For data streams, the API retrieves field mappings for the stream’s backing indices.

+

This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields.

+ ``_ @@ -2371,7 +2446,11 @@ def get_index_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index templates. Get information about one or more index templates. + .. raw:: html + +

Get index templates. + Get information about one or more index templates.

+ ``_ @@ -2444,8 +2523,11 @@ def get_mapping( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get mapping definitions. For data streams, the API retrieves mappings for the - stream’s backing indices. + .. raw:: html + +

Get mapping definitions. + For data streams, the API retrieves mappings for the stream’s backing indices.

+ ``_ @@ -2529,8 +2611,12 @@ def get_settings( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index settings. Get setting information for one or more indices. For data - streams, it returns setting information for the stream's backing indices. + .. raw:: html + +

Get index settings. + Get setting information for one or more indices. + For data streams, it returns setting information for the stream's backing indices.

+ ``_ @@ -2617,9 +2703,12 @@ def get_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index templates. Get information about one or more index templates. IMPORTANT: - This documentation is about legacy index templates, which are deprecated and - will be replaced by the composable templates introduced in Elasticsearch 7.8. + .. raw:: html + +

Get index templates. + Get information about one or more index templates.

+

IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

+ ``_ @@ -2678,14 +2767,20 @@ def migrate_to_data_stream( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Convert an index alias to a data stream. Converts an index alias to a data stream. - You must have a matching index template that is data stream enabled. The alias - must meet the following criteria: The alias must have a write index; All indices - for the alias must have a `@timestamp` field mapping of a `date` or `date_nanos` - field type; The alias must not have any filters; The alias must not use custom - routing. If successful, the request removes the alias and creates a data stream - with the same name. The indices for the alias become hidden backing indices for - the stream. The write index for the alias becomes the write index for the stream. + .. raw:: html + +

Convert an index alias to a data stream. + Converts an index alias to a data stream. + You must have a matching index template that is data stream enabled. + The alias must meet the following criteria: + The alias must have a write index; + All indices for the alias must have a @timestamp field mapping of a date or date_nanos field type; + The alias must not have any filters; + The alias must not use custom routing. + If successful, the request removes the alias and creates a data stream with the same name. + The indices for the alias become hidden backing indices for the stream. + The write index for the alias becomes the write index for the stream.

+ ``_ @@ -2737,8 +2832,11 @@ def modify_data_stream( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update data streams. Performs one or more data stream modification actions in - a single atomic operation. + .. raw:: html + +

Update data streams. + Performs one or more data stream modification actions in a single atomic operation.

+ ``_ @@ -2798,27 +2896,26 @@ def open( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Open a closed index. For data streams, the API opens any closed backing indices. - A closed index is blocked for read/write operations and does not allow all operations - that opened indices allow. It is not possible to index documents or to search - for documents in a closed index. This allows closed indices to not have to maintain - internal data structures for indexing or searching documents, resulting in a - smaller overhead on the cluster. When opening or closing an index, the master - is responsible for restarting the index shards to reflect the new state of the - index. The shards will then go through the normal recovery process. The data - of opened or closed indices is automatically replicated by the cluster to ensure - that enough shard copies are safely kept around at all times. You can open and - close multiple indices. An error is thrown if the request explicitly refers to - a missing index. This behavior can be turned off by using the `ignore_unavailable=true` - parameter. By default, you must explicitly name the indices you are opening or - closing. To open or close indices with `_all`, `*`, or other wildcard expressions, - change the `action.destructive_requires_name` setting to `false`. This setting - can also be changed with the cluster update settings API. Closed indices consume - a significant amount of disk-space which can cause problems in managed environments. - Closing indices can be turned off with the cluster settings API by setting `cluster.indices.close.enable` - to `false`. Because opening or closing an index allocates its shards, the `wait_for_active_shards` - setting on index creation applies to the `_open` and `_close` index actions as - well. + .. raw:: html + +

Open a closed index. + For data streams, the API opens any closed backing indices.

+

A closed index is blocked for read/write operations and does not allow all operations that opened indices allow. + It is not possible to index documents or to search for documents in a closed index. + This allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster.

+

When opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index. + The shards will then go through the normal recovery process. + The data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times.

+

You can open and close multiple indices. + An error is thrown if the request explicitly refers to a missing index. + This behavior can be turned off by using the ignore_unavailable=true parameter.

+

By default, you must explicitly name the indices you are opening or closing. + To open or close indices with _all, *, or other wildcard expressions, change the action.destructive_requires_name setting to false. + This setting can also be changed with the cluster update settings API.

+

Closed indices consume a significant amount of disk-space which can cause problems in managed environments. + Closing indices can be turned off with the cluster settings API by setting cluster.indices.close.enable to false.

+

Because opening or closing an index allocates its shards, the wait_for_active_shards setting on index creation applies to the _open and _close index actions as well.

+ ``_ @@ -2893,18 +2990,18 @@ def promote_data_stream( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Promote a data stream. Promote a data stream from a replicated data stream managed - by cross-cluster replication (CCR) to a regular data stream. With CCR auto following, - a data stream from a remote cluster can be replicated to the local cluster. These - data streams can't be rolled over in the local cluster. These replicated data - streams roll over only if the upstream data stream rolls over. In the event that - the remote cluster is no longer available, the data stream in the local cluster - can be promoted to a regular data stream, which allows these data streams to - be rolled over in the local cluster. NOTE: When promoting a data stream, ensure - the local cluster has a data stream enabled index template that matches the data - stream. If this is missing, the data stream will not be able to roll over until - a matching index template is created. This will affect the lifecycle management - of the data stream and interfere with the data stream size and retention. + .. raw:: html + +

Promote a data stream. + Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream.

+

With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. + These data streams can't be rolled over in the local cluster. + These replicated data streams roll over only if the upstream data stream rolls over. + In the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster.

+

NOTE: When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream. + If this is missing, the data stream will not be able to roll over until a matching index template is created. + This will affect the lifecycle management of the data stream and interfere with the data stream size and retention.

+ ``_ @@ -2966,7 +3063,11 @@ def put_alias( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update an alias. Adds a data stream or index to an alias. + .. raw:: html + +

Create or update an alias. + Adds a data stream or index to an alias.

+ ``_ @@ -3067,8 +3168,11 @@ def put_data_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update data stream lifecycles. Update the data stream lifecycle of the specified - data streams. + .. raw:: html + +

Update data stream lifecycles. + Update the data stream lifecycle of the specified data streams.

+ ``_ @@ -3160,34 +3264,30 @@ def put_index_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update an index template. Index templates define settings, mappings, - and aliases that can be applied automatically to new indices. Elasticsearch applies - templates to new indices based on an wildcard pattern that matches the index - name. Index templates are applied during data stream or index creation. For data - streams, these settings and mappings are applied when the stream's backing indices - are created. Settings and mappings specified in a create index API request override - any settings or mappings specified in an index template. Changes to index templates - do not affect existing indices, including the existing backing indices of a data - stream. You can use C-style `/* *\\/` block comments in index templates. You - can include comments anywhere in the request body, except before the opening - curly bracket. **Multiple matching templates** If multiple index templates match - the name of a new index or data stream, the template with the highest priority - is used. Multiple templates with overlapping index patterns at the same priority - are not allowed and an error will be thrown when attempting to create a template - matching an existing index template at identical priorities. **Composing aliases, - mappings, and settings** When multiple component templates are specified in the - `composed_of` field for an index template, they are merged in the order specified, - meaning that later component templates override earlier component templates. - Any mappings, settings, or aliases from the parent index template are merged - in next. Finally, any configuration on the index request itself is merged. Mapping - definitions are merged recursively, which means that later mapping components - can introduce new field mappings and update the mapping configuration. If a field - mapping is already contained in an earlier component, its definition will be - completely overwritten by the later one. This recursive merging strategy applies - not only to field mappings, but also root options like `dynamic_templates` and - `meta`. If an earlier component contains a `dynamic_templates` block, then by - default new `dynamic_templates` entries are appended onto the end. If an entry - already exists with the same key, then it is overwritten by the new definition. + .. raw:: html + +

Create or update an index template. + Index templates define settings, mappings, and aliases that can be applied automatically to new indices.

+

Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. + Index templates are applied during data stream or index creation. + For data streams, these settings and mappings are applied when the stream's backing indices are created. + Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. + Changes to index templates do not affect existing indices, including the existing backing indices of a data stream.

+

You can use C-style /* *\\/ block comments in index templates. + You can include comments anywhere in the request body, except before the opening curly bracket.

+

Multiple matching templates

+

If multiple index templates match the name of a new index or data stream, the template with the highest priority is used.

+

Multiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities.

+

Composing aliases, mappings, and settings

+

When multiple component templates are specified in the composed_of field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. + Any mappings, settings, or aliases from the parent index template are merged in next. + Finally, any configuration on the index request itself is merged. + Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. + If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. + This recursive merging strategy applies not only to field mappings, but also root options like dynamic_templates and meta. + If an earlier component contains a dynamic_templates block, then by default new dynamic_templates entries are appended onto the end. + If an entry already exists with the same key, then it is overwritten by the new definition.

+ ``_ @@ -3351,27 +3451,29 @@ def put_mapping( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update field mappings. Add new fields to an existing data stream or index. You - can also use this API to change the search settings of existing fields and add - new properties to existing object fields. For data streams, these changes are - applied to all backing indices by default. **Add multi-fields to an existing - field** Multi-fields let you index the same field in different ways. You can - use this API to update the fields mapping parameter and enable multi-fields for - an existing field. WARNING: If an index (or data stream) contains documents when - you add a multi-field, those documents will not have values for the new multi-field. - You can populate the new multi-field with the update by query API. **Change supported - mapping parameters for an existing field** The documentation for each mapping - parameter indicates whether you can update it for an existing field using this - API. For example, you can use the update mapping API to update the `ignore_above` - parameter. **Change the mapping of an existing field** Except for supported mapping - parameters, you can't change the mapping or field type of an existing field. - Changing an existing field could invalidate data that's already indexed. If you - need to change the mapping of a field in a data stream's backing indices, refer - to documentation about modifying data streams. If you need to change the mapping - of a field in other indices, create a new index with the correct mapping and - reindex your data into that index. **Rename a field** Renaming a field would - invalidate data already indexed under the old field name. Instead, add an alias - field to create an alternate field name. + .. raw:: html + +

Update field mappings. + Add new fields to an existing data stream or index. + You can also use this API to change the search settings of existing fields and add new properties to existing object fields. + For data streams, these changes are applied to all backing indices by default.

+

Add multi-fields to an existing field

+

Multi-fields let you index the same field in different ways. + You can use this API to update the fields mapping parameter and enable multi-fields for an existing field. + WARNING: If an index (or data stream) contains documents when you add a multi-field, those documents will not have values for the new multi-field. + You can populate the new multi-field with the update by query API.

+

Change supported mapping parameters for an existing field

+

The documentation for each mapping parameter indicates whether you can update it for an existing field using this API. + For example, you can use the update mapping API to update the ignore_above parameter.

+

Change the mapping of an existing field

+

Except for supported mapping parameters, you can't change the mapping or field type of an existing field. + Changing an existing field could invalidate data that's already indexed.

+

If you need to change the mapping of a field in a data stream's backing indices, refer to documentation about modifying data streams. + If you need to change the mapping of a field in other indices, create a new index with the correct mapping and reindex your data into that index.

+

Rename a field

+

Renaming a field would invalidate data already indexed under the old field name. + Instead, add an alias field to create an alternate field name.

+ ``_ @@ -3500,21 +3602,23 @@ def put_settings( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update index settings. Changes dynamic index settings in real time. For data - streams, index setting changes are applied to all backing indices by default. - To revert a setting to the default value, use a null value. The list of per-index - settings that can be updated dynamically on live indices can be found in index - module documentation. To preserve existing settings from being updated, set the - `preserve_existing` parameter to `true`. NOTE: You can only define new analyzers - on closed indices. To add an analyzer, you must close the index, define the analyzer, - and reopen the index. You cannot close the write index of a data stream. To update - the analyzer for a data stream's write index and future backing indices, update - the analyzer in the index template used by the stream. Then roll over the data - stream to apply the new analyzer to the stream's write index and future backing - indices. This affects searches and any new data added to the stream after the - rollover. However, it does not affect the data stream's backing indices or their - existing data. To change the analyzer for existing backing indices, you must - create a new data stream and reindex your data into it. + .. raw:: html + +

Update index settings. + Changes dynamic index settings in real time. + For data streams, index setting changes are applied to all backing indices by default.

+

To revert a setting to the default value, use a null value. + The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. + To preserve existing settings from being updated, set the preserve_existing parameter to true.

+

NOTE: You can only define new analyzers on closed indices. + To add an analyzer, you must close the index, define the analyzer, and reopen the index. + You cannot close the write index of a data stream. + To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. + Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. + This affects searches and any new data added to the stream after the rollover. + However, it does not affect the data stream's backing indices or their existing data. + To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.

+ ``_ @@ -3618,24 +3722,24 @@ def put_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update an index template. Index templates define settings, mappings, - and aliases that can be applied automatically to new indices. Elasticsearch applies - templates to new indices based on an index pattern that matches the index name. - IMPORTANT: This documentation is about legacy index templates, which are deprecated - and will be replaced by the composable templates introduced in Elasticsearch - 7.8. Composable templates always take precedence over legacy templates. If no - composable template matches a new index, matching legacy templates are applied - according to their order. Index templates are only applied during index creation. - Changes to index templates do not affect existing indices. Settings and mappings - specified in create index API requests override any settings or mappings specified - in an index template. You can use C-style `/* *\\/` block comments in index templates. - You can include comments anywhere in the request body, except before the opening - curly bracket. **Indices matching multiple templates** Multiple index templates - can potentially match an index, in this case, both the settings and mappings - are merged into the final configuration of the index. The order of the merging - can be controlled using the order parameter, with lower order being applied first, - and higher orders overriding them. NOTE: Multiple matching templates with the - same order value will result in a non-deterministic merging order. + .. raw:: html + +

Create or update an index template. + Index templates define settings, mappings, and aliases that can be applied automatically to new indices. + Elasticsearch applies templates to new indices based on an index pattern that matches the index name.

+

IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

+

Composable templates always take precedence over legacy templates. + If no composable template matches a new index, matching legacy templates are applied according to their order.

+

Index templates are only applied during index creation. + Changes to index templates do not affect existing indices. + Settings and mappings specified in create index API requests override any settings or mappings specified in an index template.

+

You can use C-style /* *\\/ block comments in index templates. + You can include comments anywhere in the request body, except before the opening curly bracket.

+

Indices matching multiple templates

+

Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. + The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. + NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order.

+ ``_ @@ -3716,27 +3820,28 @@ def recovery( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index recovery information. Get information about ongoing and completed shard - recoveries for one or more indices. For data streams, the API returns information - for the stream's backing indices. All recoveries, whether ongoing or complete, - are kept in the cluster state and may be reported on at any time. Shard recovery - is the process of initializing a shard copy, such as restoring a primary shard - from a snapshot or creating a replica shard from a primary shard. When a shard - recovery completes, the recovered shard is available for search and indexing. - Recovery automatically occurs during the following processes: * When creating - an index for the first time. * When a node rejoins the cluster and starts up - any missing primary shard copies using the data that it holds in its data path. - * Creation of new replica shard copies from the primary. * Relocation of a shard - copy to a different node in the same cluster. * A snapshot restore operation. - * A clone, shrink, or split operation. You can determine the cause of a shard - recovery using the recovery or cat recovery APIs. The index recovery API reports - information about completed recoveries only for shard copies that currently exist - in the cluster. It only reports the last recovery for each shard copy and does - not report historical information about earlier recoveries, nor does it report - information about the recoveries of shard copies that no longer exist. This means - that if a shard copy completes a recovery and then Elasticsearch relocates it - onto a different node then the information about the original recovery will not - be shown in the recovery API. + .. raw:: html + +

Get index recovery information. + Get information about ongoing and completed shard recoveries for one or more indices. + For data streams, the API returns information for the stream's backing indices.

+

All recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time.

+

Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. + When a shard recovery completes, the recovered shard is available for search and indexing.

+

Recovery automatically occurs during the following processes:

+
    +
  • When creating an index for the first time.
  • +
  • When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path.
  • +
  • Creation of new replica shard copies from the primary.
  • +
  • Relocation of a shard copy to a different node in the same cluster.
  • +
  • A snapshot restore operation.
  • +
  • A clone, shrink, or split operation.
  • +
+

You can determine the cause of a shard recovery using the recovery or cat recovery APIs.

+

The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. + It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. + This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API.

+ ``_ @@ -3798,19 +3903,19 @@ def refresh( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Refresh an index. A refresh makes recent operations performed on one or more - indices available for search. For data streams, the API runs the refresh operation - on the stream’s backing indices. By default, Elasticsearch periodically refreshes - indices every second, but only on indices that have received one search request - or more in the last 30 seconds. You can change this default interval with the - `index.refresh_interval` setting. Refresh requests are synchronous and do not - return a response until the refresh operation completes. Refreshes are resource-intensive. - To ensure good cluster performance, it's recommended to wait for Elasticsearch's - periodic refresh rather than performing an explicit refresh when possible. If - your application workflow indexes documents and then runs a search to retrieve - the indexed document, it's recommended to use the index API's `refresh=wait_for` - query parameter option. This option ensures the indexing operation waits for - a periodic refresh before running the search. + .. raw:: html + +

Refresh an index. + A refresh makes recent operations performed on one or more indices available for search. + For data streams, the API runs the refresh operation on the stream’s backing indices.

+

By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. + You can change this default interval with the index.refresh_interval setting.

+

Refresh requests are synchronous and do not return a response until the refresh operation completes.

+

Refreshes are resource-intensive. + To ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.

+

If your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's refresh=wait_for query parameter option. + This option ensures the indexing operation waits for a periodic refresh before running the search.

+ ``_ @@ -3880,21 +3985,20 @@ def reload_search_analyzers( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Reload search analyzers. Reload an index's search analyzers and their resources. - For data streams, the API reloads search analyzers and resources for the stream's - backing indices. IMPORTANT: After reloading the search analyzers you should clear - the request cache to make sure it doesn't contain responses derived from the - previous versions of the analyzer. You can use the reload search analyzers API - to pick up changes to synonym files used in the `synonym_graph` or `synonym` - token filter of a search analyzer. To be eligible, the token filter must have - an `updateable` flag of `true` and only be used in search analyzers. NOTE: This - API does not perform a reload for each shard of an index. Instead, it performs - a reload for each node containing index shards. As a result, the total shard - count returned by the API can differ from the number of index shards. Because - reloading affects every node with an index shard, it is important to update the - synonym file on every data node in the cluster--including nodes that don't contain - a shard replica--before using this API. This ensures the synonym file is updated - everywhere in the cluster in case shards are relocated in the future. + .. raw:: html + +

Reload search analyzers. + Reload an index's search analyzers and their resources. + For data streams, the API reloads search analyzers and resources for the stream's backing indices.

+

IMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer.

+

You can use the reload search analyzers API to pick up changes to synonym files used in the synonym_graph or synonym token filter of a search analyzer. + To be eligible, the token filter must have an updateable flag of true and only be used in search analyzers.

+

NOTE: This API does not perform a reload for each shard of an index. + Instead, it performs a reload for each node containing index shards. + As a result, the total shard count returned by the API can differ from the number of index shards. + Because reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API. + This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future.

+ ``_ @@ -3958,38 +4062,33 @@ def resolve_cluster( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resolve the cluster. Resolve the specified index expressions to return information - about each cluster, including the local cluster, if included. Multiple patterns - and remote clusters are supported. This endpoint is useful before doing a cross-cluster - search in order to determine which remote clusters should be included in a search. - You use the same index expression with this endpoint as you would for cross-cluster - search. Index and cluster exclusions are also supported with this endpoint. For - each cluster in the index expression, information is returned about: * Whether - the querying ("local") cluster is currently connected to each remote cluster - in the index expression scope. * Whether each remote cluster is configured with - `skip_unavailable` as `true` or `false`. * Whether there are any indices, aliases, - or data streams on that cluster that match the index expression. * Whether the - search is likely to have errors returned when you do the cross-cluster search - (including any authorization errors if you do not have permission to query the - index). * Cluster version information, including the Elasticsearch server version. - For example, `GET /_resolve/cluster/my-index-*,cluster*:my-index-*` returns information - about the local cluster and all remotely configured clusters that start with - the alias `cluster*`. Each cluster returns information about whether it has any - indices, aliases or data streams that match `my-index-*`. **Advantages of using - this endpoint before a cross-cluster search** You may want to exclude a cluster - or index from a search when: * A remote cluster is not currently connected and - is configured with `skip_unavailable=false`. Running a cross-cluster search under - those conditions will cause the entire search to fail. * A cluster has no matching - indices, aliases or data streams for the index expression (or your user does - not have permissions to search them). For example, suppose your index expression - is `logs*,remote1:logs*` and the remote1 cluster has no indices, aliases or data - streams that match `logs*`. In that case, that cluster will return no results - from that cluster if you include it in a cross-cluster search. * The index expression - (combined with any query parameters you specify) will likely cause an exception - to be thrown when you do the search. In these cases, the "error" field in the - `_resolve/cluster` response will be present. (This is also where security/permission - errors will be shown.) * A remote cluster is an older version that does not support - the feature you want to use in your search. + .. raw:: html + +

Resolve the cluster. + Resolve the specified index expressions to return information about each cluster, including the local cluster, if included. + Multiple patterns and remote clusters are supported.

+

This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search.

+

You use the same index expression with this endpoint as you would for cross-cluster search. + Index and cluster exclusions are also supported with this endpoint.

+

For each cluster in the index expression, information is returned about:

+
    +
  • Whether the querying ("local") cluster is currently connected to each remote cluster in the index expression scope.
  • +
  • Whether each remote cluster is configured with skip_unavailable as true or false.
  • +
  • Whether there are any indices, aliases, or data streams on that cluster that match the index expression.
  • +
  • Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index).
  • +
  • Cluster version information, including the Elasticsearch server version.
  • +
+

For example, GET /_resolve/cluster/my-index-*,cluster*:my-index-* returns information about the local cluster and all remotely configured clusters that start with the alias cluster*. + Each cluster returns information about whether it has any indices, aliases or data streams that match my-index-*.

+

Advantages of using this endpoint before a cross-cluster search

+

You may want to exclude a cluster or index from a search when:

+
    +
  • A remote cluster is not currently connected and is configured with skip_unavailable=false. Running a cross-cluster search under those conditions will cause the entire search to fail.
  • +
  • A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is logs*,remote1:logs* and the remote1 cluster has no indices, aliases or data streams that match logs*. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search.
  • +
  • The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the _resolve/cluster response will be present. (This is also where security/permission errors will be shown.)
  • +
  • A remote cluster is an older version that does not support the feature you want to use in your search.
  • +
+ ``_ @@ -4062,8 +4161,12 @@ def resolve_index( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Resolve indices. Resolve the names and/or index patterns for indices, aliases, - and data streams. Multiple patterns and remote clusters are supported. + .. raw:: html + +

Resolve indices. + Resolve the names and/or index patterns for indices, aliases, and data streams. + Multiple patterns and remote clusters are supported.

+ ``_ @@ -4136,33 +4239,35 @@ def rollover( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Roll over to a new index. TIP: It is recommended to use the index lifecycle rollover - action to automate rollovers. The rollover API creates a new index for a data - stream or index alias. The API behavior depends on the rollover target. **Roll - over a data stream** If you roll over a data stream, the API creates a new write - index for the stream. The stream's previous write index becomes a regular backing - index. A rollover also increments the data stream's generation. **Roll over an - index alias with a write index** TIP: Prior to Elasticsearch 7.9, you'd typically - use an index alias with a write index to manage time series data. Data streams - replace this functionality, require less maintenance, and automatically integrate - with data tiers. If an index alias points to multiple indices, one of the indices - must be a write index. The rollover API creates a new write index for the alias - with `is_write_index` set to `true`. The API also `sets is_write_index` to `false` - for the previous write index. **Roll over an index alias with one index** If - you roll over an index alias that points to only one index, the API creates a - new index for the alias and removes the original index from the alias. NOTE: - A rollover creates a new index and is subject to the `wait_for_active_shards` - setting. **Increment index names for an alias** When you roll over an index alias, - you can specify a name for the new index. If you don't specify a name and the - current index ends with `-` and a number, such as `my-index-000001` or `my-index-3`, - the new index name increments that number. For example, if you roll over an alias - with a current index of `my-index-000001`, the rollover creates a new index named - `my-index-000002`. This number is always six characters and zero-padded, regardless - of the previous index's name. If you use an index alias for time series data, - you can use date math in the index name to track the rollover date. For example, - you can create an alias that points to an index named ``. - If you create the index on May 6, 2099, the index's name is `my-index-2099.05.06-000001`. - If you roll over the alias on May 7, 2099, the new index's name is `my-index-2099.05.07-000002`. + .. raw:: html + +

Roll over to a new index. + TIP: It is recommended to use the index lifecycle rollover action to automate rollovers.

+

The rollover API creates a new index for a data stream or index alias. + The API behavior depends on the rollover target.

+

Roll over a data stream

+

If you roll over a data stream, the API creates a new write index for the stream. + The stream's previous write index becomes a regular backing index. + A rollover also increments the data stream's generation.

+

Roll over an index alias with a write index

+

TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data. + Data streams replace this functionality, require less maintenance, and automatically integrate with data tiers.

+

If an index alias points to multiple indices, one of the indices must be a write index. + The rollover API creates a new write index for the alias with is_write_index set to true. + The API also sets is_write_index to false for the previous write index.

+

Roll over an index alias with one index

+

If you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias.

+

NOTE: A rollover creates a new index and is subject to the wait_for_active_shards setting.

+

Increment index names for an alias

+

When you roll over an index alias, you can specify a name for the new index. + If you don't specify a name and the current index ends with - and a number, such as my-index-000001 or my-index-3, the new index name increments that number. + For example, if you roll over an alias with a current index of my-index-000001, the rollover creates a new index named my-index-000002. + This number is always six characters and zero-padded, regardless of the previous index's name.

+

If you use an index alias for time series data, you can use date math in the index name to track the rollover date. + For example, you can create an alias that points to an index named <my-index-{now/d}-000001>. + If you create the index on May 6, 2099, the index's name is my-index-2099.05.06-000001. + If you roll over the alias on May 7, 2099, the new index's name is my-index-2099.05.07-000002.

+ ``_ @@ -4267,9 +4372,12 @@ def segments( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index segments. Get low-level information about the Lucene segments in index - shards. For data streams, the API returns information about the stream's backing - indices. + .. raw:: html + +

Get index segments. + Get low-level information about the Lucene segments in index shards. + For data streams, the API returns information about the stream's backing indices.

+ ``_ @@ -4348,14 +4456,20 @@ def shard_stores( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index shard stores. Get store information about replica shards in one or - more indices. For data streams, the API retrieves store information for the stream's - backing indices. The index shard stores API returns the following information: - * The node on which each replica shard exists. * The allocation ID for each replica - shard. * A unique ID for each replica shard. * Any errors encountered while opening - the shard index or from an earlier failure. By default, the API returns store - information only for primary shards that are unassigned or have one or more unassigned - replica shards. + .. raw:: html + +

Get index shard stores. + Get store information about replica shards in one or more indices. + For data streams, the API retrieves store information for the stream's backing indices.

+

The index shard stores API returns the following information:

+
    +
  • The node on which each replica shard exists.
  • +
  • The allocation ID for each replica shard.
  • +
  • A unique ID for each replica shard.
  • +
  • Any errors encountered while opening the shard index or from an earlier failure.
  • +
+

By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards.

+ ``_ @@ -4426,39 +4540,38 @@ def shrink( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Shrink an index. Shrink an index into a new index with fewer primary shards. - Before you can shrink an index: * The index must be read-only. * A copy of every - shard in the index must reside on the same node. * The index must have a green - health status. To make shard allocation easier, we recommend you also remove - the index's replica shards. You can later re-add replica shards as part of the - shrink operation. The requested number of primary shards in the target index - must be a factor of the number of shards in the source index. For example an - index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an - index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards - in the index is a prime number it can only be shrunk into a single primary shard - Before shrinking, a (primary or replica) copy of every shard in the index must - be present on the same node. The current write index on a data stream cannot - be shrunk. In order to shrink the current write index, the data stream must first - be rolled over so that a new write index is created and then the previous write - index can be shrunk. A shrink operation: * Creates a new target index with the - same definition as the source index, but with a smaller number of primary shards. - * Hard-links segments from the source index into the target index. If the file - system does not support hard-linking, then all segments are copied into the new - index, which is a much more time consuming process. Also if using multiple data - paths, shards on different data paths require a full copy of segment files if - they are not on the same disk since hardlinks do not work across disks. * Recovers - the target index as though it were a closed index which had just been re-opened. - Recovers shards to the `.routing.allocation.initial_recovery._id` index setting. - IMPORTANT: Indices can only be shrunk if they satisfy the following requirements: - * The target index must not exist. * The source index must have more primary - shards than the target index. * The number of primary shards in the target index - must be a factor of the number of primary shards in the source index. The source - index must have more primary shards than the target index. * The index must not - contain more than 2,147,483,519 documents in total across all shards that will - be shrunk into a single shard on the target index as this is the maximum number - of docs that can fit into a single shard. * The node handling the shrink process - must have sufficient free disk space to accommodate a second copy of the existing - index. + .. raw:: html + +

Shrink an index. + Shrink an index into a new index with fewer primary shards.

+

Before you can shrink an index:

+
    +
  • The index must be read-only.
  • +
  • A copy of every shard in the index must reside on the same node.
  • +
  • The index must have a green health status.
  • +
+

To make shard allocation easier, we recommend you also remove the index's replica shards. + You can later re-add replica shards as part of the shrink operation.

+

The requested number of primary shards in the target index must be a factor of the number of shards in the source index. + For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. + If the number of shards in the index is a prime number it can only be shrunk into a single primary shard + Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node.

+

The current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk.

+

A shrink operation:

+
    +
  • Creates a new target index with the same definition as the source index, but with a smaller number of primary shards.
  • +
  • Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks.
  • +
  • Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the .routing.allocation.initial_recovery._id index setting.
  • +
+

IMPORTANT: Indices can only be shrunk if they satisfy the following requirements:

+
    +
  • The target index must not exist.
  • +
  • The source index must have more primary shards than the target index.
  • +
  • The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index.
  • +
  • The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard.
  • +
  • The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index.
  • +
+ ``_ @@ -4533,8 +4646,11 @@ def simulate_index_template( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Simulate an index. Get the index configuration that would be applied to the specified - index from an existing index template. + .. raw:: html + +

Simulate an index. + Get the index configuration that would be applied to the specified index from an existing index template.

+ ``_ @@ -4611,8 +4727,11 @@ def simulate_template( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Simulate an index template. Get the index configuration that would be applied - by a particular index template. + .. raw:: html + +

Simulate an index template. + Get the index configuration that would be applied by a particular index template.

+ ``_ @@ -4743,31 +4862,44 @@ def split( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Split an index. Split an index into a new index with more primary shards. * Before - you can split an index: * The index must be read-only. * The cluster health status - must be green. You can do make an index read-only with the following request - using the add index block API: ``` PUT /my_source_index/_block/write ``` The - current write index on a data stream cannot be split. In order to split the current - write index, the data stream must first be rolled over so that a new write index - is created and then the previous write index can be split. The number of times - the index can be split (and the number of shards that each original shard can - be split into) is determined by the `index.number_of_routing_shards` setting. - The number of routing shards specifies the hashing space that is used internally - to distribute documents across shards with consistent hashing. For instance, - a 5 shard index with `number_of_routing_shards` set to 30 (5 x 2 x 3) could be - split by a factor of 2 or 3. A split operation: * Creates a new target index - with the same definition as the source index, but with a larger number of primary - shards. * Hard-links segments from the source index into the target index. If - the file system doesn't support hard-linking, all segments are copied into the - new index, which is a much more time consuming process. * Hashes all documents - again, after low level files are created, to delete documents that belong to - a different shard. * Recovers the target index as though it were a closed index - which had just been re-opened. IMPORTANT: Indices can only be split if they satisfy - the following requirements: * The target index must not exist. * The source index - must have fewer primary shards than the target index. * The number of primary - shards in the target index must be a multiple of the number of primary shards - in the source index. * The node handling the split process must have sufficient - free disk space to accommodate a second copy of the existing index. + .. raw:: html + +

Split an index. + Split an index into a new index with more primary shards.

+
    +
  • +

    Before you can split an index:

    +
  • +
  • +

    The index must be read-only.

    +
  • +
  • +

    The cluster health status must be green.

    +
  • +
+

You can do make an index read-only with the following request using the add index block API:

+
PUT /my_source_index/_block/write
+          
+

The current write index on a data stream cannot be split. + In order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split.

+

The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the index.number_of_routing_shards setting. + The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. + For instance, a 5 shard index with number_of_routing_shards set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3.

+

A split operation:

+
    +
  • Creates a new target index with the same definition as the source index, but with a larger number of primary shards.
  • +
  • Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process.
  • +
  • Hashes all documents again, after low level files are created, to delete documents that belong to a different shard.
  • +
  • Recovers the target index as though it were a closed index which had just been re-opened.
  • +
+

IMPORTANT: Indices can only be split if they satisfy the following requirements:

+
    +
  • The target index must not exist.
  • +
  • The source index must have fewer primary shards than the target index.
  • +
  • The number of primary shards in the target index must be a multiple of the number of primary shards in the source index.
  • +
  • The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index.
  • +
+ ``_ @@ -4859,14 +4991,17 @@ def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get index statistics. For data streams, the API retrieves statistics for the - stream's backing indices. By default, the returned statistics are index-level - with `primaries` and `total` aggregations. `primaries` are the values for only - the primary shards. `total` are the accumulated values for both primary and replica - shards. To get shard-level statistics, set the `level` parameter to `shards`. - NOTE: When moving to another node, the shard-level statistics for a shard are - cleared. Although the shard is no longer part of the node, that node retains - any node-level statistics to which the shard contributed. + .. raw:: html + +

Get index statistics. + For data streams, the API retrieves statistics for the stream's backing indices.

+

By default, the returned statistics are index-level with primaries and total aggregations. + primaries are the values for only the primary shards. + total are the accumulated values for both primary and replica shards.

+

To get shard-level statistics, set the level parameter to shards.

+

NOTE: When moving to another node, the shard-level statistics for a shard are cleared. + Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed.

+ ``_ @@ -4969,8 +5104,11 @@ def unfreeze( wait_for_active_shards: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Unfreeze an index. When a frozen index is unfrozen, the index goes through the - normal recovery process and becomes writeable again. + .. raw:: html + +

Unfreeze an index. + When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again.

+ ``_ @@ -5044,7 +5182,11 @@ def update_aliases( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update an alias. Adds a data stream or index to an alias. + .. raw:: html + +

Create or update an alias. + Adds a data stream or index to an alias.

+ ``_ @@ -5119,7 +5261,11 @@ def validate_query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Validate a query. Validates a query without running it. + .. raw:: html + +

Validate a query. + Validates a query without running it.

+ ``_ diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index 553789086..5430e7283 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -44,7 +44,10 @@ def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete an inference endpoint + .. raw:: html + +

Delete an inference endpoint

+ ``_ @@ -109,7 +112,10 @@ def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get an inference endpoint + .. raw:: html + +

Get an inference endpoint

+ ``_ @@ -172,7 +178,10 @@ def inference( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Perform inference on the service + .. raw:: html + +

Perform inference on the service

+ ``_ @@ -255,21 +264,18 @@ def put( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Create an inference endpoint. When you create an inference endpoint, the associated - machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before - using it. To verify the deployment status, use the get trained model statistics - API. Look for `"state": "fully_allocated"` in the response and ensure that the - `"allocation_count"` matches the `"target_allocation_count"`. Avoid creating - multiple endpoints for the same model unless required, as each endpoint consumes - significant resources. IMPORTANT: The inference APIs enable you to use certain - services, such as built-in machine learning models (ELSER, E5), models uploaded - through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google - Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models - uploaded through Eland, the inference APIs offer an alternative way to use and - manage trained models. However, if you do not plan to use the inference APIs - to use these models or if you want to use non-NLP models, use the machine learning - trained model APIs. + .. raw:: html + +

Create an inference endpoint. + When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

+

IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. + For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. + However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

+ ``_ @@ -339,16 +345,14 @@ def update( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Update an inference endpoint. Modify `task_settings`, secrets (within `service_settings`), - or `num_allocations` for an inference endpoint, depending on the specific endpoint - service and `task_type`. IMPORTANT: The inference APIs enable you to use certain - services, such as built-in machine learning models (ELSER, E5), models uploaded - through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, - Watsonx.ai, or Hugging Face. For built-in models and models uploaded through - Eland, the inference APIs offer an alternative way to use and manage trained - models. However, if you do not plan to use the inference APIs to use these models - or if you want to use non-NLP models, use the machine learning trained model - APIs. + .. raw:: html + +

Update an inference endpoint.

+

Modify task_settings, secrets (within service_settings), or num_allocations for an inference endpoint, depending on the specific endpoint service and task_type.

+

IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. + For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. + However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

+ ``_ diff --git a/elasticsearch/_sync/client/ingest.py b/elasticsearch/_sync/client/ingest.py index fb8f7a35f..0a909bc6b 100644 --- a/elasticsearch/_sync/client/ingest.py +++ b/elasticsearch/_sync/client/ingest.py @@ -38,8 +38,11 @@ def delete_geoip_database( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete GeoIP database configurations. Delete one or more IP geolocation database - configurations. + .. raw:: html + +

Delete GeoIP database configurations. + Delete one or more IP geolocation database configurations.

+ ``_ @@ -90,7 +93,10 @@ def delete_ip_location_database( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete IP geolocation database configurations. + .. raw:: html + +

Delete IP geolocation database configurations.

+ ``_ @@ -143,7 +149,11 @@ def delete_pipeline( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete pipelines. Delete one or more ingest pipelines. + .. raw:: html + +

Delete pipelines. + Delete one or more ingest pipelines.

+ ``_ @@ -192,8 +202,11 @@ def geo_ip_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get GeoIP statistics. Get download statistics for GeoIP2 databases that are used - with the GeoIP processor. + .. raw:: html + +

Get GeoIP statistics. + Get download statistics for GeoIP2 databases that are used with the GeoIP processor.

+ ``_ """ @@ -229,8 +242,11 @@ def get_geoip_database( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get GeoIP database configurations. Get information about one or more IP geolocation - database configurations. + .. raw:: html + +

Get GeoIP database configurations. + Get information about one or more IP geolocation database configurations.

+ ``_ @@ -276,7 +292,10 @@ def get_ip_location_database( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get IP geolocation database configurations. + .. raw:: html + +

Get IP geolocation database configurations.

+ ``_ @@ -329,8 +348,12 @@ def get_pipeline( summary: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get pipelines. Get information about one or more ingest pipelines. This API returns - a local reference of the pipeline. + .. raw:: html + +

Get pipelines. + Get information about one or more ingest pipelines. + This API returns a local reference of the pipeline.

+ ``_ @@ -381,10 +404,13 @@ def processor_grok( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Run a grok processor. Extract structured fields out of a single text field within - a document. You must choose which field to extract matched fields from, as well - as the grok pattern you expect will match. A grok pattern is like a regular expression - that supports aliased expressions that can be reused. + .. raw:: html + +

Run a grok processor. + Extract structured fields out of a single text field within a document. + You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. + A grok pattern is like a regular expression that supports aliased expressions that can be reused.

+ ``_ """ @@ -427,8 +453,11 @@ def put_geoip_database( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a GeoIP database configuration. Refer to the create or update - IP geolocation database configuration API. + .. raw:: html + +

Create or update a GeoIP database configuration. + Refer to the create or update IP geolocation database configuration API.

+ ``_ @@ -500,7 +529,10 @@ def put_ip_location_database( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update an IP geolocation database configuration. + .. raw:: html + +

Create or update an IP geolocation database configuration.

+ ``_ @@ -582,7 +614,11 @@ def put_pipeline( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a pipeline. Changes made using this API take effect immediately. + .. raw:: html + +

Create or update a pipeline. + Changes made using this API take effect immediately.

+ ``_ @@ -674,9 +710,12 @@ def simulate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Simulate a pipeline. Run an ingest pipeline against a set of provided documents. - You can either specify an existing pipeline to use with the provided documents - or supply a pipeline definition in the body of the request. + .. raw:: html + +

Simulate a pipeline. + Run an ingest pipeline against a set of provided documents. + You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.

+ ``_ diff --git a/elasticsearch/_sync/client/license.py b/elasticsearch/_sync/client/license.py index 03462e864..dac9f6b88 100644 --- a/elasticsearch/_sync/client/license.py +++ b/elasticsearch/_sync/client/license.py @@ -37,9 +37,12 @@ def delete( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete the license. When the license expires, your subscription level reverts - to Basic. If the operator privileges feature is enabled, only operator users - can use this API. + .. raw:: html + +

Delete the license. + When the license expires, your subscription level reverts to Basic.

+

If the operator privileges feature is enabled, only operator users can use this API.

+ ``_ @@ -84,11 +87,13 @@ def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get license information. Get information about your Elastic license including - its type, its status, when it was issued, and when it expires. NOTE: If the master - node is generating a new cluster state, the get license API may return a `404 - Not Found` response. If you receive an unexpected 404 response after cluster - startup, wait a short period and retry the request. + .. raw:: html + +

Get license information. + Get information about your Elastic license including its type, its status, when it was issued, and when it expires.

+

NOTE: If the master node is generating a new cluster state, the get license API may return a 404 Not Found response. + If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request.

+ ``_ @@ -134,7 +139,10 @@ def get_basic_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the basic license status. + .. raw:: html + +

Get the basic license status.

+ ``_ """ @@ -169,7 +177,10 @@ def get_trial_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the trial status. + .. raw:: html + +

Get the trial status.

+ ``_ """ @@ -212,14 +223,16 @@ def post( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update the license. You can update your license at runtime without shutting down - your nodes. License updates take effect immediately. If the license you are installing - does not support all of the features that were available with your previous license, - however, you are notified in the response. You must then re-submit the API request - with the acknowledge parameter set to true. NOTE: If Elasticsearch security features - are enabled and you are installing a gold or higher license, you must enable - TLS on the transport networking layer before you install the license. If the - operator privileges feature is enabled, only operator users can use this API. + .. raw:: html + +

Update the license. + You can update your license at runtime without shutting down your nodes. + License updates take effect immediately. + If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. + You must then re-submit the API request with the acknowledge parameter set to true.

+

NOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license. + If the operator privileges feature is enabled, only operator users can use this API.

+ ``_ @@ -282,13 +295,15 @@ def post_start_basic( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Start a basic license. Start an indefinite basic license, which gives access - to all the basic features. NOTE: In order to start a basic license, you must - not currently have a basic license. If the basic license does not support all - of the features that are available with your current license, however, you are - notified in the response. You must then re-submit the API request with the `acknowledge` - parameter set to `true`. To check the status of your basic license, use the get - basic license API. + .. raw:: html + +

Start a basic license. + Start an indefinite basic license, which gives access to all the basic features.

+

NOTE: In order to start a basic license, you must not currently have a basic license.

+

If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. + You must then re-submit the API request with the acknowledge parameter set to true.

+

To check the status of your basic license, use the get basic license API.

+ ``_ @@ -338,12 +353,14 @@ def post_start_trial( type_query_string: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Start a trial. Start a 30-day trial, which gives access to all subscription features. - NOTE: You are allowed to start a trial only if your cluster has not already activated - a trial for the current major product version. For example, if you have already - activated a trial for v8.0, you cannot start a new trial until v9.0. You can, - however, request an extended trial at https://www.elastic.co/trialextension. - To check the status of your trial, use the get trial status API. + .. raw:: html + +

Start a trial. + Start a 30-day trial, which gives access to all subscription features.

+

NOTE: You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version. + For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension.

+

To check the status of your trial, use the get trial status API.

+ ``_ diff --git a/elasticsearch/_sync/client/logstash.py b/elasticsearch/_sync/client/logstash.py index 7bd02551f..e329058a3 100644 --- a/elasticsearch/_sync/client/logstash.py +++ b/elasticsearch/_sync/client/logstash.py @@ -36,9 +36,12 @@ def delete_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central - Management. If the request succeeds, you receive an empty response with an appropriate - status code. + .. raw:: html + +

Delete a Logstash pipeline. + Delete a pipeline that is used for Logstash Central Management. + If the request succeeds, you receive an empty response with an appropriate status code.

+ ``_ @@ -78,7 +81,11 @@ def get_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get Logstash pipelines. Get pipelines that are used for Logstash Central Management. + .. raw:: html + +

Get Logstash pipelines. + Get pipelines that are used for Logstash Central Management.

+ ``_ @@ -125,8 +132,12 @@ def put_pipeline( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a Logstash pipeline. Create a pipeline that is used for Logstash - Central Management. If the specified pipeline exists, it is replaced. + .. raw:: html + +

Create or update a Logstash pipeline.

+

Create a pipeline that is used for Logstash Central Management. + If the specified pipeline exists, it is replaced.

+ ``_ diff --git a/elasticsearch/_sync/client/migration.py b/elasticsearch/_sync/client/migration.py index a1d3160c5..a9476162c 100644 --- a/elasticsearch/_sync/client/migration.py +++ b/elasticsearch/_sync/client/migration.py @@ -36,10 +36,13 @@ def deprecations( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get deprecation information. Get information about different cluster, node, and - index level settings that use deprecated features that will be removed or changed - in the next major version. TIP: This APIs is designed for indirect use by the - Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant. + .. raw:: html + +

Get deprecation information. + Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version.

+

TIP: This APIs is designed for indirect use by the Upgrade Assistant. + You are strongly recommended to use the Upgrade Assistant.

+ ``_ @@ -82,11 +85,14 @@ def get_feature_upgrade_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get feature migration information. Version upgrades sometimes require changes - to how features store configuration information and data in system indices. Check - which features need to be migrated and the status of any migrations that are - in progress. TIP: This API is designed for indirect use by the Upgrade Assistant. - You are strongly recommended to use the Upgrade Assistant. + .. raw:: html + +

Get feature migration information. + Version upgrades sometimes require changes to how features store configuration information and data in system indices. + Check which features need to be migrated and the status of any migrations that are in progress.

+

TIP: This API is designed for indirect use by the Upgrade Assistant. + You are strongly recommended to use the Upgrade Assistant.

+ ``_ """ @@ -121,11 +127,14 @@ def post_feature_upgrade( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Start the feature migration. Version upgrades sometimes require changes to how - features store configuration information and data in system indices. This API - starts the automatic migration process. Some functionality might be temporarily - unavailable during the migration process. TIP: The API is designed for indirect - use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant. + .. raw:: html + +

Start the feature migration. + Version upgrades sometimes require changes to how features store configuration information and data in system indices. + This API starts the automatic migration process.

+

Some functionality might be temporarily unavailable during the migration process.

+

TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant.

+ ``_ """ diff --git a/elasticsearch/_sync/client/ml.py b/elasticsearch/_sync/client/ml.py index 6013c1dc7..f10b79e2e 100644 --- a/elasticsearch/_sync/client/ml.py +++ b/elasticsearch/_sync/client/ml.py @@ -36,11 +36,14 @@ def clear_trained_model_deployment_cache( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear trained model deployment cache. Cache will be cleared on all nodes where - the trained model is assigned. A trained model deployment may have an inference - cache enabled. As requests are handled by each allocated node, their responses - may be cached on that individual node. Calling this API clears the caches without - restarting the deployment. + .. raw:: html + +

Clear trained model deployment cache. + Cache will be cleared on all nodes where the trained model is assigned. + A trained model deployment may have an inference cache enabled. + As requests are handled by each allocated node, their responses may be cached on that individual node. + Calling this API clears the caches without restarting the deployment.

+ ``_ @@ -88,19 +91,14 @@ def close_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Close anomaly detection jobs. A job can be opened and closed multiple times throughout - its lifecycle. A closed job cannot receive data or perform analysis operations, - but you can still explore and navigate results. When you close a job, it runs - housekeeping tasks such as pruning the model history, flushing buffers, calculating - final results and persisting the model snapshots. Depending upon the size of - the job, it could take several minutes to close and the equivalent time to re-open. - After it is closed, the job has a minimal overhead on the cluster except for - maintaining its meta data. Therefore it is a best practice to close jobs that - are no longer required to process data. If you close an anomaly detection job - whose datafeed is running, the request first tries to stop the datafeed. This - behavior is equivalent to calling stop datafeed API with the same timeout and - force parameters as the close job request. When a datafeed that has a specified - end date stops, it automatically closes its associated job. + .. raw:: html + +

Close anomaly detection jobs. + A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. + When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. + If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. + When a datafeed that has a specified end date stops, it automatically closes its associated job.

+ ``_ @@ -161,8 +159,11 @@ def delete_calendar( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a calendar. Removes all scheduled events from a calendar, then deletes - it. + .. raw:: html + +

Delete a calendar. + Removes all scheduled events from a calendar, then deletes it.

+ ``_ @@ -203,7 +204,10 @@ def delete_calendar_event( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete events from a calendar. + .. raw:: html + +

Delete events from a calendar.

+ ``_ @@ -251,7 +255,10 @@ def delete_calendar_job( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete anomaly jobs from a calendar. + .. raw:: html + +

Delete anomaly jobs from a calendar.

+ ``_ @@ -300,7 +307,10 @@ def delete_data_frame_analytics( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a data frame analytics job. + .. raw:: html + +

Delete a data frame analytics job.

+ ``_ @@ -348,7 +358,10 @@ def delete_datafeed( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a datafeed. + .. raw:: html + +

Delete a datafeed.

+ ``_ @@ -400,13 +413,18 @@ def delete_expired_data( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete expired ML data. Deletes all job results, model snapshots and forecast - data that have exceeded their retention days period. Machine learning state documents - that are not associated with any job are also deleted. You can limit the request - to a single or set of anomaly detection jobs by using a job identifier, a group - name, a comma-separated list of jobs, or a wildcard expression. You can delete - expired data for all anomaly detection jobs by using _all, by specifying * as - the , or by omitting the . + .. raw:: html + +

Delete expired ML data. + Deletes all job results, model snapshots and forecast data that have exceeded + their retention days period. Machine learning state documents that are not + associated with any job are also deleted. + You can limit the request to a single or set of anomaly detection jobs by + using a job identifier, a group name, a comma-separated list of jobs, or a + wildcard expression. You can delete expired data for all anomaly detection + jobs by using _all, by specifying * as the <job_id>, or by omitting the + <job_id>.

+ ``_ @@ -465,9 +483,12 @@ def delete_filter( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a filter. If an anomaly detection job references the filter, you cannot - delete the filter. You must update or delete the job before you can delete the - filter. + .. raw:: html + +

Delete a filter. + If an anomaly detection job references the filter, you cannot delete the + filter. You must update or delete the job before you can delete the filter.

+ ``_ @@ -510,10 +531,14 @@ def delete_forecast( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete forecasts from a job. By default, forecasts are retained for 14 days. - You can specify a different retention period with the `expires_in` parameter - in the forecast jobs API. The delete forecast API enables you to delete one or - more forecasts before they expire. + .. raw:: html + +

Delete forecasts from a job. + By default, forecasts are retained for 14 days. You can specify a + different retention period with the expires_in parameter in the forecast + jobs API. The delete forecast API enables you to delete one or more + forecasts before they expire.

+ ``_ @@ -580,12 +605,16 @@ def delete_job( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete an anomaly detection job. All job configuration, model state and results - are deleted. It is not currently possible to delete multiple jobs using wildcards - or a comma separated list. If you delete a job that has a datafeed, the request - first tries to delete the datafeed. This behavior is equivalent to calling the - delete datafeed API with the same timeout and force parameters as the delete - job request. + .. raw:: html + +

Delete an anomaly detection job. + All job configuration, model state and results are deleted. + It is not currently possible to delete multiple jobs using wildcards or a + comma separated list. If you delete a job that has a datafeed, the request + first tries to delete the datafeed. This behavior is equivalent to calling + the delete datafeed API with the same timeout and force parameters as the + delete job request.

+ ``_ @@ -639,9 +668,13 @@ def delete_model_snapshot( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a model snapshot. You cannot delete the active model snapshot. To delete - that snapshot, first revert to a different one. To identify the active model - snapshot, refer to the `model_snapshot_id` in the results from the get jobs API. + .. raw:: html + +

Delete a model snapshot. + You cannot delete the active model snapshot. To delete that snapshot, first + revert to a different one. To identify the active model snapshot, refer to + the model_snapshot_id in the results from the get jobs API.

+ ``_ @@ -689,8 +722,11 @@ def delete_trained_model( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete an unreferenced trained model. The request deletes a trained inference - model that is not referenced by an ingest pipeline. + .. raw:: html + +

Delete an unreferenced trained model. + The request deletes a trained inference model that is not referenced by an ingest pipeline.

+ ``_ @@ -739,9 +775,13 @@ def delete_trained_model_alias( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a trained model alias. This API deletes an existing model alias that refers - to a trained model. If the model alias is missing or refers to a model other - than the one identified by the `model_id`, this API returns an error. + .. raw:: html + +

Delete a trained model alias. + This API deletes an existing model alias that refers to a trained model. If + the model alias is missing or refers to a model other than the one identified + by the model_id, this API returns an error.

+ ``_ @@ -796,9 +836,13 @@ def estimate_model_memory( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Estimate job model memory usage. Makes an estimation of the memory usage for - an anomaly detection job model. It is based on analysis configuration details - for the job and cardinality estimates for the fields it references. + .. raw:: html + +

Estimate job model memory usage. + Makes an estimation of the memory usage for an anomaly detection job model. + It is based on analysis configuration details for the job and cardinality + estimates for the fields it references.

+ ``_ @@ -863,10 +907,14 @@ def evaluate_data_frame( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Evaluate data frame analytics. The API packages together commonly used evaluation - metrics for various types of machine learning features. This has been designed - for use on indexes created by data frame analytics. Evaluation requires both - a ground truth field and an analytics result field to be present. + .. raw:: html + +

Evaluate data frame analytics. + The API packages together commonly used evaluation metrics for various types + of machine learning features. This has been designed for use on indexes + created by data frame analytics. Evaluation requires both a ground truth + field and an analytics result field to be present.

+ ``_ @@ -940,13 +988,18 @@ def explain_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Explain data frame analytics config. This API provides explanations for a data - frame analytics config that either exists already or one that has not been created - yet. The following explanations are provided: * which fields are included or - not in the analysis and why, * how much memory is estimated to be required. The - estimate can be used when deciding the appropriate value for model_memory_limit - setting later on. If you have object fields or fields that are excluded via source - filtering, they are not included in the explanation. + .. raw:: html + +

Explain data frame analytics config. + This API provides explanations for a data frame analytics config that either + exists already or one that has not been created yet. The following + explanations are provided:

+
    +
  • which fields are included or not in the analysis and why,
  • +
  • how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. + If you have object fields or fields that are excluded via source filtering, they are not included in the explanation.
  • +
+ ``_ @@ -1046,14 +1099,18 @@ def flush_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Force buffered data to be processed. The flush jobs API is only applicable when - sending data for analysis using the post data API. Depending on the content of - the buffer, then it might additionally calculate new results. Both flush and - close operations are similar, however the flush is more efficient if you are - expecting to send more data for analysis. When flushing, the job remains open - and is available to continue analyzing data. A close operation additionally prunes - and persists the model state to disk and the job must be opened again before - analyzing further data. + .. raw:: html + +

Force buffered data to be processed. + The flush jobs API is only applicable when sending data for analysis using + the post data API. Depending on the content of the buffer, then it might + additionally calculate new results. Both flush and close operations are + similar, however the flush is more efficient if you are expecting to send + more data for analysis. When flushing, the job remains open and is available + to continue analyzing data. A close operation additionally prunes and + persists the model state to disk and the job must be opened again before + analyzing further data.

+ ``_ @@ -1121,10 +1178,14 @@ def forecast( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Predict future behavior of a time series. Forecasts are not supported for jobs - that perform population analysis; an error occurs if you try to create a forecast - for a job that has an `over_field_name` in its configuration. Forcasts predict - future behavior based on historical data. + .. raw:: html + +

Predict future behavior of a time series.

+

Forecasts are not supported for jobs that perform population analysis; an + error occurs if you try to create a forecast for a job that has an + over_field_name in its configuration. Forcasts predict future behavior + based on historical data.

+ ``_ @@ -1206,8 +1267,11 @@ def get_buckets( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get anomaly detection job results for buckets. The API presents a chronological - view of the records, grouped by bucket. + .. raw:: html + +

Get anomaly detection job results for buckets. + The API presents a chronological view of the records, grouped by bucket.

+ ``_ @@ -1302,7 +1366,10 @@ def get_calendar_events( start: t.Optional[t.Union[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get info about events in calendars. + .. raw:: html + +

Get info about events in calendars.

+ ``_ @@ -1368,7 +1435,10 @@ def get_calendars( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get calendar configuration info. + .. raw:: html + +

Get calendar configuration info.

+ ``_ @@ -1441,7 +1511,10 @@ def get_categories( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get anomaly detection job results for categories. + .. raw:: html + +

Get anomaly detection job results for categories.

+ ``_ @@ -1523,9 +1596,13 @@ def get_data_frame_analytics( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Get data frame analytics job configuration info. You can get information for - multiple data frame analytics jobs in a single API request by using a comma-separated - list of data frame analytics jobs or a wildcard expression. + .. raw:: html + +

Get data frame analytics job configuration info. + You can get information for multiple data frame analytics jobs in a single + API request by using a comma-separated list of data frame analytics jobs or a + wildcard expression.

+ ``_ @@ -1597,7 +1674,10 @@ def get_data_frame_analytics_stats( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get data frame analytics jobs usage info. + .. raw:: html + +

Get data frame analytics jobs usage info.

+ ``_ @@ -1662,12 +1742,16 @@ def get_datafeed_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get datafeeds usage info. You can get statistics for multiple datafeeds in a - single API request by using a comma-separated list of datafeeds or a wildcard - expression. You can get statistics for all datafeeds by using `_all`, by specifying - `*` as the ``, or by omitting the ``. If the datafeed is stopped, - the only information you receive is the `datafeed_id` and the `state`. This API - returns a maximum of 10,000 datafeeds. + .. raw:: html + +

Get datafeeds usage info. + You can get statistics for multiple datafeeds in a single API request by + using a comma-separated list of datafeeds or a wildcard expression. You can + get statistics for all datafeeds by using _all, by specifying * as the + <feed_id>, or by omitting the <feed_id>. If the datafeed is stopped, the + only information you receive is the datafeed_id and the state. + This API returns a maximum of 10,000 datafeeds.

+ ``_ @@ -1723,11 +1807,15 @@ def get_datafeeds( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get datafeeds configuration info. You can get information for multiple datafeeds - in a single API request by using a comma-separated list of datafeeds or a wildcard - expression. You can get information for all datafeeds by using `_all`, by specifying - `*` as the ``, or by omitting the ``. This API returns a maximum - of 10,000 datafeeds. + .. raw:: html + +

Get datafeeds configuration info. + You can get information for multiple datafeeds in a single API request by + using a comma-separated list of datafeeds or a wildcard expression. You can + get information for all datafeeds by using _all, by specifying * as the + <feed_id>, or by omitting the <feed_id>. + This API returns a maximum of 10,000 datafeeds.

+ ``_ @@ -1790,7 +1878,11 @@ def get_filters( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Get filters. You can get a single filter or all filters. + .. raw:: html + +

Get filters. + You can get a single filter or all filters.

+ ``_ @@ -1852,9 +1944,13 @@ def get_influencers( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get anomaly detection job results for influencers. Influencers are the entities - that have contributed to, or are to blame for, the anomalies. Influencer results - are available only if an `influencer_field_name` is specified in the job configuration. + .. raw:: html + +

Get anomaly detection job results for influencers. + Influencers are the entities that have contributed to, or are to blame for, + the anomalies. Influencer results are available only if an + influencer_field_name is specified in the job configuration.

+ ``_ @@ -1935,7 +2031,10 @@ def get_job_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get anomaly detection jobs usage info. + .. raw:: html + +

Get anomaly detection jobs usage info.

+ ``_ @@ -1992,11 +2091,14 @@ def get_jobs( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get anomaly detection jobs configuration info. You can get information for multiple - anomaly detection jobs in a single API request by using a group name, a comma-separated - list of jobs, or a wildcard expression. You can get information for all anomaly - detection jobs by using `_all`, by specifying `*` as the ``, or by omitting - the ``. + .. raw:: html + +

Get anomaly detection jobs configuration info. + You can get information for multiple anomaly detection jobs in a single API + request by using a group name, a comma-separated list of jobs, or a wildcard + expression. You can get information for all anomaly detection jobs by using + _all, by specifying * as the <job_id>, or by omitting the <job_id>.

+ ``_ @@ -2057,9 +2159,12 @@ def get_memory_stats( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get machine learning memory usage info. Get information about how machine learning - jobs and trained models are using memory, on each node, both within the JVM heap, - and natively, outside of the JVM. + .. raw:: html + +

Get machine learning memory usage info. + Get information about how machine learning jobs and trained models are using memory, + on each node, both within the JVM heap, and natively, outside of the JVM.

+ ``_ @@ -2114,7 +2219,10 @@ def get_model_snapshot_upgrade_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get anomaly detection job model snapshot upgrade usage info. + .. raw:: html + +

Get anomaly detection job model snapshot upgrade usage info.

+ ``_ @@ -2185,7 +2293,10 @@ def get_model_snapshots( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get model snapshots info. + .. raw:: html + +

Get model snapshots info.

+ ``_ @@ -2286,19 +2397,26 @@ def get_overall_buckets( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get overall bucket results. Retrievs overall bucket results that summarize the - bucket results of multiple anomaly detection jobs. The `overall_score` is calculated - by combining the scores of all the buckets within the overall bucket span. First, - the maximum `anomaly_score` per anomaly detection job in the overall bucket is - calculated. Then the `top_n` of those scores are averaged to result in the `overall_score`. - This means that you can fine-tune the `overall_score` so that it is more or less - sensitive to the number of jobs that detect an anomaly at the same time. For - example, if you set `top_n` to `1`, the `overall_score` is the maximum bucket - score in the overall bucket. Alternatively, if you set `top_n` to the number - of jobs, the `overall_score` is high only when all jobs detect anomalies in that - overall bucket. If you set the `bucket_span` parameter (to a value greater than - its default), the `overall_score` is the maximum `overall_score` of the overall - buckets that have a span equal to the jobs' largest bucket span. + .. raw:: html + +

Get overall bucket results.

+

Retrievs overall bucket results that summarize the bucket results of + multiple anomaly detection jobs.

+

The overall_score is calculated by combining the scores of all the + buckets within the overall bucket span. First, the maximum + anomaly_score per anomaly detection job in the overall bucket is + calculated. Then the top_n of those scores are averaged to result in + the overall_score. This means that you can fine-tune the + overall_score so that it is more or less sensitive to the number of + jobs that detect an anomaly at the same time. For example, if you set + top_n to 1, the overall_score is the maximum bucket score in the + overall bucket. Alternatively, if you set top_n to the number of jobs, + the overall_score is high only when all jobs detect anomalies in that + overall bucket. If you set the bucket_span parameter (to a value + greater than its default), the overall_score is the maximum + overall_score of the overall buckets that have a span equal to the + jobs' largest bucket span.

+ ``_ @@ -2395,15 +2513,20 @@ def get_records( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get anomaly records for an anomaly detection job. Records contain the detailed - analytical results. They describe the anomalous activity that has been identified - in the input data based on the detector configuration. There can be many anomaly - records depending on the characteristics and size of the input data. In practice, - there are often too many to be able to manually process them. The machine learning - features therefore perform a sophisticated aggregation of the anomaly records - into buckets. The number of record results depends on the number of anomalies - found in each bucket, which relates to the number of time series being modeled - and the number of detectors. + .. raw:: html + +

Get anomaly records for an anomaly detection job. + Records contain the detailed analytical results. They describe the anomalous + activity that has been identified in the input data based on the detector + configuration. + There can be many anomaly records depending on the characteristics and size + of the input data. In practice, there are often too many to be able to + manually process them. The machine learning features therefore perform a + sophisticated aggregation of the anomaly records into buckets. + The number of record results depends on the number of anomalies found in each + bucket, which relates to the number of time series being modeled and the + number of detectors.

+ ``_ @@ -2499,7 +2622,10 @@ def get_trained_models( tags: t.Optional[t.Union[str, t.Sequence[str]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get trained model configuration info. + .. raw:: html + +

Get trained model configuration info.

+ ``_ @@ -2585,9 +2711,12 @@ def get_trained_models_stats( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Get trained models usage info. You can get usage information for multiple trained - models in a single API request by using a comma-separated list of model IDs or - a wildcard expression. + .. raw:: html + +

Get trained models usage info. + You can get usage information for multiple trained + models in a single API request by using a comma-separated list of model IDs or a wildcard expression.

+ ``_ @@ -2650,7 +2779,10 @@ def infer_trained_model( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Evaluate a trained model. + .. raw:: html + +

Evaluate a trained model.

+ ``_ @@ -2707,12 +2839,17 @@ def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get machine learning information. Get defaults and limits used by machine learning. - This endpoint is designed to be used by a user interface that needs to fully - understand machine learning configurations where some options are not specified, - meaning that the defaults should be used. This endpoint may be used to find out - what those defaults are. It also provides information about the maximum size - of machine learning jobs that could run in the current cluster configuration. + .. raw:: html + +

Get machine learning information. + Get defaults and limits used by machine learning. + This endpoint is designed to be used by a user interface that needs to fully + understand machine learning configurations where some options are not + specified, meaning that the defaults should be used. This endpoint may be + used to find out what those defaults are. It also provides information about + the maximum size of machine learning jobs that could run in the current + cluster configuration.

+ ``_ """ @@ -2752,12 +2889,16 @@ def open_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Open anomaly detection jobs. An anomaly detection job must be opened to be ready - to receive and analyze data. It can be opened and closed multiple times throughout - its lifecycle. When you open a new job, it starts with an empty model. When you - open an existing job, the most recent model state is automatically loaded. The - job is ready to resume its analysis from where it left off, once new data is - received. + .. raw:: html + +

Open anomaly detection jobs. + An anomaly detection job must be opened to be ready to receive and analyze + data. It can be opened and closed multiple times throughout its lifecycle. + When you open a new job, it starts with an empty model. + When you open an existing job, the most recent model state is automatically + loaded. The job is ready to resume its analysis from where it left off, once + new data is received.

+ ``_ @@ -2811,7 +2952,10 @@ def post_calendar_events( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Add scheduled events to the calendar. + .. raw:: html + +

Add scheduled events to the calendar.

+ ``_ @@ -2867,9 +3011,12 @@ def post_data( reset_start: t.Optional[t.Union[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Send data to an anomaly detection job for analysis. IMPORTANT: For each job, - data can be accepted from only a single connection at a time. It is not currently - possible to post data to multiple jobs using wildcards or a comma-separated list. + .. raw:: html + +

Send data to an anomaly detection job for analysis.

+

IMPORTANT: For each job, data can be accepted from only a single connection at a time. + It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list.

+ ``_ @@ -2932,8 +3079,11 @@ def preview_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Preview features used by data frame analytics. Previews the extracted features - used by a data frame analytics config. + .. raw:: html + +

Preview features used by data frame analytics. + Previews the extracted features used by a data frame analytics config.

+ ``_ @@ -2995,15 +3145,18 @@ def preview_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Preview a datafeed. This API returns the first "page" of search results from - a datafeed. You can preview an existing datafeed or provide configuration details - for a datafeed and anomaly detection job in the API. The preview shows the structure - of the data that will be passed to the anomaly detection engine. IMPORTANT: When - Elasticsearch security features are enabled, the preview uses the credentials - of the user that called the API. However, when the datafeed starts it uses the - roles of the last user that created or updated the datafeed. To get a preview - that accurately reflects the behavior of the datafeed, use the appropriate credentials. - You can also use secondary authorization headers to supply the credentials. + .. raw:: html + +

Preview a datafeed. + This API returns the first "page" of search results from a datafeed. + You can preview an existing datafeed or provide configuration details for a datafeed + and anomaly detection job in the API. The preview shows the structure of the data + that will be passed to the anomaly detection engine. + IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that + called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the + datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. + You can also use secondary authorization headers to supply the credentials.

+ ``_ @@ -3079,7 +3232,10 @@ def put_calendar( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a calendar. + .. raw:: html + +

Create a calendar.

+ ``_ @@ -3133,7 +3289,10 @@ def put_calendar_job( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Add anomaly detection job to calendar. + .. raw:: html + +

Add anomaly detection job to calendar.

+ ``_ @@ -3208,13 +3367,15 @@ def put_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a data frame analytics job. This API creates a data frame analytics job - that performs an analysis on the source indices and stores the outcome in a destination - index. By default, the query used in the source configuration is `{"match_all": - {}}`. If the destination index does not exist, it is created automatically when - you start the job. If you supply only a subset of the regression or classification - parameters, hyperparameter optimization occurs. It determines a value for each - of the undefined parameters. + .. raw:: html + +

Create a data frame analytics job. + This API creates a data frame analytics job that performs an analysis on the + source indices and stores the outcome in a destination index. + By default, the query used in the source configuration is {"match_all": {}}.

+

If the destination index does not exist, it is created automatically when you start the job.

+

If you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters.

+ ``_ @@ -3387,18 +3548,19 @@ def put_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by - an anomaly detection job. You can associate only one datafeed with each anomaly - detection job. The datafeed contains a query that runs at a defined interval - (`frequency`). If you are concerned about delayed data, you can add a delay (`query_delay') - at each interval. By default, the datafeed uses the following query: `{"match_all": - {"boost": 1}}`. When Elasticsearch security features are enabled, your datafeed - remembers which roles the user who created it had at the time of creation and - runs the query using those same roles. If you provide secondary authorization - headers, those credentials are used instead. You must use Kibana, this API, or - the create anomaly detection jobs API to create a datafeed. Do not add a datafeed - directly to the `.ml-config` index. Do not give users `write` privileges on the - `.ml-config` index. + .. raw:: html + +

Create a datafeed. + Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. + You can associate only one datafeed with each anomaly detection job. + The datafeed contains a query that runs at a defined interval (frequency). + If you are concerned about delayed data, you can add a delay (query_delay') at each interval. By default, the datafeed uses the following query: {"match_all": {"boost": 1}}`.

+

When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had + at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, + those credentials are used instead. + You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed + directly to the .ml-config index. Do not give users write privileges on the .ml-config index.

+ ``_ @@ -3555,9 +3717,12 @@ def put_filter( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a filter. A filter contains a list of strings. It can be used by one or - more anomaly detection jobs. Specifically, filters are referenced in the `custom_rules` - property of detector configuration objects. + .. raw:: html + +

Create a filter. + A filter contains a list of strings. It can be used by one or more anomaly detection jobs. + Specifically, filters are referenced in the custom_rules property of detector configuration objects.

+ ``_ @@ -3654,9 +3819,12 @@ def put_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create an anomaly detection job. If you include a `datafeed_config`, you must - have read index privileges on the source index. If you include a `datafeed_config` - but do not provide a query, the datafeed uses `{"match_all": {"boost": 1}}`. + .. raw:: html + +

Create an anomaly detection job. + If you include a datafeed_config, you must have read index privileges on the source index. + If you include a datafeed_config but do not provide a query, the datafeed uses {"match_all": {"boost": 1}}.

+ ``_ @@ -3860,8 +4028,11 @@ def put_trained_model( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a trained model. Enable you to supply a trained model that is not created - by data frame analytics. + .. raw:: html + +

Create a trained model. + Enable you to supply a trained model that is not created by data frame analytics.

+ ``_ @@ -3963,19 +4134,26 @@ def put_trained_model_alias( reassign: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a trained model alias. A trained model alias is a logical name - used to reference a single trained model. You can use aliases instead of trained - model identifiers to make it easier to reference your models. For example, you - can use aliases in inference aggregations and processors. An alias must be unique - and refer to only a single trained model. However, you can have multiple aliases - for each trained model. If you use this API to update an alias such that it references - a different trained model ID and the model uses a different type of data frame - analytics, an error occurs. For example, this situation occurs if you have a - trained model for regression analysis and a trained model for classification - analysis; you cannot reassign an alias from one type of trained model to another. - If you use this API to update an alias and there are very few input fields in - common between the old and new trained models for the model alias, the API returns - a warning. + .. raw:: html + +

Create or update a trained model alias. + A trained model alias is a logical name used to reference a single trained + model. + You can use aliases instead of trained model identifiers to make it easier to + reference your models. For example, you can use aliases in inference + aggregations and processors. + An alias must be unique and refer to only a single trained model. However, + you can have multiple aliases for each trained model. + If you use this API to update an alias such that it references a different + trained model ID and the model uses a different type of data frame analytics, + an error occurs. For example, this situation occurs if you have a trained + model for regression analysis and a trained model for classification + analysis; you cannot reassign an alias from one type of trained model to + another. + If you use this API to update an alias and there are very few input fields in + common between the old and new trained models for the model alias, the API + returns a warning.

+ ``_ @@ -4033,7 +4211,10 @@ def put_trained_model_definition_part( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create part of a trained model definition. + .. raw:: html + +

Create part of a trained model definition.

+ ``_ @@ -4110,9 +4291,12 @@ def put_trained_model_vocabulary( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a trained model vocabulary. This API is supported only for natural language - processing (NLP) models. The vocabulary is stored in the index as described in - `inference_config.*.vocabulary` of the trained model definition. + .. raw:: html + +

Create a trained model vocabulary. + This API is supported only for natural language processing (NLP) models. + The vocabulary is stored in the index as described in inference_config.*.vocabulary of the trained model definition.

+ ``_ @@ -4168,9 +4352,14 @@ def reset_job( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Reset an anomaly detection job. All model state and results are deleted. The - job is ready to start over as if it had just been created. It is not currently - possible to reset multiple jobs using wildcards or a comma separated list. + .. raw:: html + +

Reset an anomaly detection job. + All model state and results are deleted. The job is ready to start over as if + it had just been created. + It is not currently possible to reset multiple jobs using wildcards or a + comma separated list.

+ ``_ @@ -4224,13 +4413,17 @@ def revert_model_snapshot( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Revert to a snapshot. The machine learning features react quickly to anomalous - input, learning new behaviors in data. Highly anomalous input increases the variance - in the models whilst the system learns whether this is a new step-change in behavior - or a one-off event. In the case where this anomalous input is known to be a one-off, - then it might be appropriate to reset the model state to a time before this event. - For example, you might consider reverting to a saved snapshot after Black Friday - or a critical system failure. + .. raw:: html + +

Revert to a snapshot. + The machine learning features react quickly to anomalous input, learning new + behaviors in data. Highly anomalous input increases the variance in the + models whilst the system learns whether this is a new step-change in behavior + or a one-off event. In the case where this anomalous input is known to be a + one-off, then it might be appropriate to reset the model state to a time + before this event. For example, you might consider reverting to a saved + snapshot after Black Friday or a critical system failure.

+ ``_ @@ -4290,17 +4483,22 @@ def set_upgrade_mode( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Set upgrade_mode for ML indices. Sets a cluster wide upgrade_mode setting that - prepares machine learning indices for an upgrade. When upgrading your cluster, - in some circumstances you must restart your nodes and reindex your machine learning - indices. In those circumstances, there must be no machine learning jobs running. - You can close the machine learning jobs, do the upgrade, then open all the jobs - again. Alternatively, you can use this API to temporarily halt tasks associated - with the jobs and datafeeds and prevent new jobs from opening. You can also use - this API during upgrades that do not require you to reindex your machine learning - indices, though stopping jobs is not a requirement in that case. You can see - the current value for the upgrade_mode setting by using the get machine learning - info API. + .. raw:: html + +

Set upgrade_mode for ML indices. + Sets a cluster wide upgrade_mode setting that prepares machine learning + indices for an upgrade. + When upgrading your cluster, in some circumstances you must restart your + nodes and reindex your machine learning indices. In those circumstances, + there must be no machine learning jobs running. You can close the machine + learning jobs, do the upgrade, then open all the jobs again. Alternatively, + you can use this API to temporarily halt tasks associated with the jobs and + datafeeds and prevent new jobs from opening. You can also use this API + during upgrades that do not require you to reindex your machine learning + indices, though stopping jobs is not a requirement in that case. + You can see the current value for the upgrade_mode setting by using the get + machine learning info API.

+ ``_ @@ -4346,16 +4544,21 @@ def start_data_frame_analytics( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Start a data frame analytics job. A data frame analytics job can be started and - stopped multiple times throughout its lifecycle. If the destination index does - not exist, it is created automatically the first time you start the data frame - analytics job. The `index.number_of_shards` and `index.number_of_replicas` settings - for the destination index are copied from the source index. If there are multiple - source indices, the destination index copies the highest setting values. The - mappings for the destination index are also copied from the source indices. If - there are any mapping conflicts, the job fails to start. If the destination index - exists, it is used as is. You can therefore set up the destination index in advance - with custom settings and mappings. + .. raw:: html + +

Start a data frame analytics job. + A data frame analytics job can be started and stopped multiple times + throughout its lifecycle. + If the destination index does not exist, it is created automatically the + first time you start the data frame analytics job. The + index.number_of_shards and index.number_of_replicas settings for the + destination index are copied from the source index. If there are multiple + source indices, the destination index copies the highest setting values. The + mappings for the destination index are also copied from the source indices. + If there are any mapping conflicts, the job fails to start. + If the destination index exists, it is used as is. You can therefore set up + the destination index in advance with custom settings and mappings.

+ ``_ @@ -4407,17 +4610,18 @@ def start_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Start datafeeds. A datafeed must be started in order to retrieve data from Elasticsearch. - A datafeed can be started and stopped multiple times throughout its lifecycle. - Before you can start a datafeed, the anomaly detection job must be open. Otherwise, - an error occurs. If you restart a stopped datafeed, it continues processing input - data from the next millisecond after it was stopped. If new data was indexed - for that exact millisecond between stopping and starting, it will be ignored. - When Elasticsearch security features are enabled, your datafeed remembers which - roles the last user to create or update it had at the time of creation or update - and runs the query using those same roles. If you provided secondary authorization - headers when you created or updated the datafeed, those credentials are used - instead. + .. raw:: html + +

Start datafeeds.

+

A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped + multiple times throughout its lifecycle.

+

Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs.

+

If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. + If new data was indexed for that exact millisecond between stopping and starting, it will be ignored.

+

When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or + update it had at the time of creation or update and runs the query using those same roles. If you provided secondary + authorization headers when you created or updated the datafeed, those credentials are used instead.

+ ``_ @@ -4486,8 +4690,11 @@ def start_trained_model_deployment( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Start a trained model deployment. It allocates the model to every machine learning - node. + .. raw:: html + +

Start a trained model deployment. + It allocates the model to every machine learning node.

+ ``_ @@ -4570,8 +4777,12 @@ def stop_data_frame_analytics( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Stop data frame analytics jobs. A data frame analytics job can be started and - stopped multiple times throughout its lifecycle. + .. raw:: html + +

Stop data frame analytics jobs. + A data frame analytics job can be started and stopped multiple times + throughout its lifecycle.

+ ``_ @@ -4636,8 +4847,12 @@ def stop_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. - A datafeed can be started and stopped multiple times throughout its lifecycle. + .. raw:: html + +

Stop datafeeds. + A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped + multiple times throughout its lifecycle.

+ ``_ @@ -4699,7 +4914,10 @@ def stop_trained_model_deployment( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stop a trained model deployment. + .. raw:: html + +

Stop a trained model deployment.

+ ``_ @@ -4764,7 +4982,10 @@ def update_data_frame_analytics( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update a data frame analytics job. + .. raw:: html + +

Update a data frame analytics job.

+ ``_ @@ -4872,11 +5093,14 @@ def update_datafeed( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update a datafeed. You must stop and start the datafeed for the changes to be - applied. When Elasticsearch security features are enabled, your datafeed remembers - which roles the user who updated it had at the time of the update and runs the - query using those same roles. If you provide secondary authorization headers, - those credentials are used instead. + .. raw:: html + +

Update a datafeed. + You must stop and start the datafeed for the changes to be applied. + When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at + the time of the update and runs the query using those same roles. If you provide secondary authorization headers, + those credentials are used instead.

+ ``_ @@ -5039,8 +5263,11 @@ def update_filter( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update a filter. Updates the description of a filter, adds items, or removes - items from the list. + .. raw:: html + +

Update a filter. + Updates the description of a filter, adds items, or removes items from the list.

+ ``_ @@ -5130,8 +5357,11 @@ def update_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update an anomaly detection job. Updates certain properties of an anomaly detection - job. + .. raw:: html + +

Update an anomaly detection job. + Updates certain properties of an anomaly detection job.

+ ``_ @@ -5259,7 +5489,11 @@ def update_model_snapshot( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update a snapshot. Updates certain properties of a snapshot. + .. raw:: html + +

Update a snapshot. + Updates certain properties of a snapshot.

+ ``_ @@ -5320,7 +5554,10 @@ def update_trained_model_deployment( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update a trained model deployment. + .. raw:: html + +

Update a trained model deployment.

+ ``_ @@ -5379,14 +5616,19 @@ def upgrade_job_snapshot( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Upgrade a snapshot. Upgrades an anomaly detection model snapshot to the latest - major version. Over time, older snapshot formats are deprecated and removed. - Anomaly detection jobs support only snapshots that are from the current or previous - major version. This API provides a means to upgrade a snapshot to the current - major version. This aids in preparing the cluster for an upgrade to the next - major version. Only one snapshot per anomaly detection job can be upgraded at - a time and the upgraded snapshot cannot be the current snapshot of the anomaly - detection job. + .. raw:: html + +

Upgrade a snapshot. + Upgrades an anomaly detection model snapshot to the latest major version. + Over time, older snapshot formats are deprecated and removed. Anomaly + detection jobs support only snapshots that are from the current or previous + major version. + This API provides a means to upgrade a snapshot to the current major version. + This aids in preparing the cluster for an upgrade to the next major version. + Only one snapshot per anomaly detection job can be upgraded at a time and the + upgraded snapshot cannot be the current snapshot of the anomaly detection + job.

+ ``_ @@ -5462,7 +5704,10 @@ def validate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Validate an anomaly detection job. + .. raw:: html + +

Validate an anomaly detection job.

+ ``_ @@ -5532,7 +5777,10 @@ def validate_detector( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Validate an anomaly detection job. + .. raw:: html + +

Validate an anomaly detection job.

+ ``_ diff --git a/elasticsearch/_sync/client/monitoring.py b/elasticsearch/_sync/client/monitoring.py index 455a78304..eae014b19 100644 --- a/elasticsearch/_sync/client/monitoring.py +++ b/elasticsearch/_sync/client/monitoring.py @@ -42,8 +42,11 @@ def bulk( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Send monitoring data. This API is used by the monitoring features to send monitoring - data. + .. raw:: html + +

Send monitoring data. + This API is used by the monitoring features to send monitoring data.

+ ``_ diff --git a/elasticsearch/_sync/client/nodes.py b/elasticsearch/_sync/client/nodes.py index a466586be..61bf40d31 100644 --- a/elasticsearch/_sync/client/nodes.py +++ b/elasticsearch/_sync/client/nodes.py @@ -44,8 +44,11 @@ def clear_repositories_metering_archive( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear the archived repositories metering. Clear the archived repositories metering - information in the cluster. + .. raw:: html + +

Clear the archived repositories metering. + Clear the archived repositories metering information in the cluster.

+ ``_ @@ -94,11 +97,13 @@ def get_repositories_metering_info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get cluster repositories metering. Get repositories metering information for - a cluster. This API exposes monotonically non-decreasing counters and it is expected - that clients would durably store the information needed to compute aggregations - over a period of time. Additionally, the information exposed by this API is volatile, - meaning that it will not be present after node restarts. + .. raw:: html + +

Get cluster repositories metering. + Get repositories metering information for a cluster. + This API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time. + Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts.

+ ``_ @@ -150,9 +155,12 @@ def hot_threads( ] = None, ) -> TextApiResponse: """ - Get the hot threads for nodes. Get a breakdown of the hot threads on each selected - node in the cluster. The output is plain text with a breakdown of the top hot - threads for each node. + .. raw:: html + +

Get the hot threads for nodes. + Get a breakdown of the hot threads on each selected node in the cluster. + The output is plain text with a breakdown of the top hot threads for each node.

+ ``_ @@ -221,8 +229,11 @@ def info( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get node information. By default, the API returns all attributes and core settings - for cluster nodes. + .. raw:: html + +

Get node information. + By default, the API returns all attributes and core settings for cluster nodes.

+ ``_ @@ -286,18 +297,16 @@ def reload_secure_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reload the keystore on nodes in the cluster. Secure settings are stored in an - on-disk keystore. Certain of these settings are reloadable. That is, you can - change them on disk and reload them without restarting any nodes in the cluster. - When you have updated reloadable secure settings in your keystore, you can use - this API to reload those settings on each node. When the Elasticsearch keystore - is password protected and not simply obfuscated, you must provide the password - for the keystore when you reload the secure settings. Reloading the settings - for the whole cluster assumes that the keystores for all nodes are protected - with the same password; this method is allowed only when inter-node communications - are encrypted. Alternatively, you can reload the secure settings on each node - by locally accessing the API and passing the node-specific Elasticsearch keystore - password. + .. raw:: html + +

Reload the keystore on nodes in the cluster.

+

Secure settings are stored in an on-disk keystore. Certain of these settings are reloadable. + That is, you can change them on disk and reload them without restarting any nodes in the cluster. + When you have updated reloadable secure settings in your keystore, you can use this API to reload those settings on each node.

+

When the Elasticsearch keystore is password protected and not simply obfuscated, you must provide the password for the keystore when you reload the secure settings. + Reloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted. + Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password.

+ ``_ @@ -367,8 +376,12 @@ def stats( types: t.Optional[t.Sequence[str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get node statistics. Get statistics for nodes in a cluster. By default, all stats - are returned. You can limit the returned information by using metrics. + .. raw:: html + +

Get node statistics. + Get statistics for nodes in a cluster. + By default, all stats are returned. You can limit the returned information by using metrics.

+ ``_ @@ -480,7 +493,10 @@ def usage( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get feature usage information. + .. raw:: html + +

Get feature usage information.

+ ``_ diff --git a/elasticsearch/_sync/client/query_rules.py b/elasticsearch/_sync/client/query_rules.py index 147642436..2b322949c 100644 --- a/elasticsearch/_sync/client/query_rules.py +++ b/elasticsearch/_sync/client/query_rules.py @@ -37,9 +37,12 @@ def delete_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a query rule. Delete a query rule within a query ruleset. This is a destructive - action that is only recoverable by re-adding the same rule with the create or - update query rule API. + .. raw:: html + +

Delete a query rule. + Delete a query rule within a query ruleset. + This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API.

+ ``_ @@ -87,8 +90,12 @@ def delete_ruleset( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a query ruleset. Remove a query ruleset and its associated data. This - is a destructive action that is not recoverable. + .. raw:: html + +

Delete a query ruleset. + Remove a query ruleset and its associated data. + This is a destructive action that is not recoverable.

+ ``_ @@ -129,7 +136,11 @@ def get_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a query rule. Get details about a query rule within a query ruleset. + .. raw:: html + +

Get a query rule. + Get details about a query rule within a query ruleset.

+ ``_ @@ -177,7 +188,11 @@ def get_ruleset( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a query ruleset. Get details about a query ruleset. + .. raw:: html + +

Get a query ruleset. + Get details about a query ruleset.

+ ``_ @@ -220,7 +235,11 @@ def list_rulesets( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Get all query rulesets. Get summarized information about the query rulesets. + .. raw:: html + +

Get all query rulesets. + Get summarized information about the query rulesets.

+ ``_ @@ -273,13 +292,15 @@ def put_rule( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a query rule. Create or update a query rule within a query ruleset. - IMPORTANT: Due to limitations within pinned queries, you can only pin documents - using ids or docs, but cannot use both in single rule. It is advised to use one - or the other in query rulesets, to avoid errors. Additionally, pinned queries - have a maximum limit of 100 pinned hits. If multiple matching rules pin more - than 100 documents, only the first 100 documents are pinned in the order they - are specified in the ruleset. + .. raw:: html + +

Create or update a query rule. + Create or update a query rule within a query ruleset.

+

IMPORTANT: Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule. + It is advised to use one or the other in query rulesets, to avoid errors. + Additionally, pinned queries have a maximum limit of 100 pinned hits. + If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.

+ ``_ @@ -357,14 +378,16 @@ def put_ruleset( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a query ruleset. There is a limit of 100 rules per ruleset. - This limit can be increased by using the `xpack.applications.rules.max_rules_per_ruleset` - cluster setting. IMPORTANT: Due to limitations within pinned queries, you can - only select documents using `ids` or `docs`, but cannot use both in single rule. - It is advised to use one or the other in query rulesets, to avoid errors. Additionally, - pinned queries have a maximum limit of 100 pinned hits. If multiple matching - rules pin more than 100 documents, only the first 100 documents are pinned in - the order they are specified in the ruleset. + .. raw:: html + +

Create or update a query ruleset. + There is a limit of 100 rules per ruleset. + This limit can be increased by using the xpack.applications.rules.max_rules_per_ruleset cluster setting.

+

IMPORTANT: Due to limitations within pinned queries, you can only select documents using ids or docs, but cannot use both in single rule. + It is advised to use one or the other in query rulesets, to avoid errors. + Additionally, pinned queries have a maximum limit of 100 pinned hits. + If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.

+ ``_ @@ -417,8 +440,11 @@ def test( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Test a query ruleset. Evaluate match criteria against a query ruleset to identify - the rules that would match that criteria. + .. raw:: html + +

Test a query ruleset. + Evaluate match criteria against a query ruleset to identify the rules that would match that criteria.

+ ``_ diff --git a/elasticsearch/_sync/client/rollup.py b/elasticsearch/_sync/client/rollup.py index 3baa6c10c..84ca3d410 100644 --- a/elasticsearch/_sync/client/rollup.py +++ b/elasticsearch/_sync/client/rollup.py @@ -43,20 +43,29 @@ def delete_job( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a rollup job. A job must be stopped before it can be deleted. If you attempt - to delete a started job, an error occurs. Similarly, if you attempt to delete - a nonexistent job, an exception occurs. IMPORTANT: When you delete a job, you - remove only the process that is actively monitoring and rolling up data. The - API does not delete any previously rolled up data. This is by design; a user - may wish to roll up a static data set. Because the data set is static, after - it has been fully rolled up there is no need to keep the indexing rollup job - around (as there will be no new data). Thus the job can be deleted, leaving behind - the rolled up data for analysis. If you wish to also remove the rollup data and - the rollup index contains the data for only a single job, you can delete the - whole rollup index. If the rollup index stores data from several jobs, you must - issue a delete-by-query that targets the rollup job's identifier in the rollup - index. For example: ``` POST my_rollup_index/_delete_by_query { "query": { "term": - { "_rollup.id": "the_rollup_job_id" } } } ``` + .. raw:: html + +

Delete a rollup job.

+

A job must be stopped before it can be deleted. + If you attempt to delete a started job, an error occurs. + Similarly, if you attempt to delete a nonexistent job, an exception occurs.

+

IMPORTANT: When you delete a job, you remove only the process that is actively monitoring and rolling up data. + The API does not delete any previously rolled up data. + This is by design; a user may wish to roll up a static data set. + Because the data set is static, after it has been fully rolled up there is no need to keep the indexing rollup job around (as there will be no new data). + Thus the job can be deleted, leaving behind the rolled up data for analysis. + If you wish to also remove the rollup data and the rollup index contains the data for only a single job, you can delete the whole rollup index. + If the rollup index stores data from several jobs, you must issue a delete-by-query that targets the rollup job's identifier in the rollup index. For example:

+
POST my_rollup_index/_delete_by_query
+          {
+            "query": {
+              "term": {
+                "_rollup.id": "the_rollup_job_id"
+              }
+            }
+          }
+          
+ ``_ @@ -97,11 +106,14 @@ def get_jobs( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get rollup job information. Get the configuration, stats, and status of rollup - jobs. NOTE: This API returns only active (both `STARTED` and `STOPPED`) jobs. - If a job was created, ran for a while, then was deleted, the API does not return - any details about it. For details about a historical rollup job, the rollup capabilities - API may be more useful. + .. raw:: html + +

Get rollup job information. + Get the configuration, stats, and status of rollup jobs.

+

NOTE: This API returns only active (both STARTED and STOPPED) jobs. + If a job was created, ran for a while, then was deleted, the API does not return any details about it. + For details about a historical rollup job, the rollup capabilities API may be more useful.

+ ``_ @@ -146,15 +158,18 @@ def get_rollup_caps( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the rollup job capabilities. Get the capabilities of any rollup jobs that - have been configured for a specific index or index pattern. This API is useful - because a rollup job is often configured to rollup only a subset of fields from - the source index. Furthermore, only certain aggregations can be configured for - various fields, leading to a limited subset of functionality depending on that - configuration. This API enables you to inspect an index and determine: 1. Does - this index have associated rollup data somewhere in the cluster? 2. If yes to - the first question, what fields were rolled up, what aggregations can be performed, - and where does the data live? + .. raw:: html + +

Get the rollup job capabilities. + Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern.

+

This API is useful because a rollup job is often configured to rollup only a subset of fields from the source index. + Furthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration. + This API enables you to inspect an index and determine:

+
    +
  1. Does this index have associated rollup data somewhere in the cluster?
  2. +
  3. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live?
  4. +
+ ``_ @@ -199,12 +214,16 @@ def get_rollup_index_caps( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the rollup index capabilities. Get the rollup capabilities of all jobs inside - of a rollup index. A single rollup index may store the data for multiple rollup - jobs and may have a variety of capabilities depending on those jobs. This API - enables you to determine: * What jobs are stored in an index (or indices specified - via a pattern)? * What target indices were rolled up, what fields were used in - those rollups, and what aggregations can be performed on each job? + .. raw:: html + +

Get the rollup index capabilities. + Get the rollup capabilities of all jobs inside of a rollup index. + A single rollup index may store the data for multiple rollup jobs and may have a variety of capabilities depending on those jobs. This API enables you to determine:

+
    +
  • What jobs are stored in an index (or indices specified via a pattern)?
  • +
  • What target indices were rolled up, what fields were used in those rollups, and what aggregations can be performed on each job?
  • +
+ ``_ @@ -267,16 +286,14 @@ def put_job( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a rollup job. WARNING: From 8.15.0, calling this API in a cluster with - no rollup usage will fail with a message about the deprecation and planned removal - of rollup features. A cluster needs to contain either a rollup job or a rollup - index in order for this API to be allowed to run. The rollup job configuration - contains all the details about how the job should run, when it indexes documents, - and what future queries will be able to run against the rollup index. There are - three main sections to the job configuration: the logistical details about the - job (for example, the cron schedule), the fields that are used for grouping, - and what metrics to collect for each group. Jobs are created in a `STOPPED` state. - You can start them with the start rollup jobs API. + .. raw:: html + +

Create a rollup job.

+

WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will fail with a message about the deprecation and planned removal of rollup features. A cluster needs to contain either a rollup job or a rollup index in order for this API to be allowed to run.

+

The rollup job configuration contains all the details about how the job should run, when it indexes documents, and what future queries will be able to run against the rollup index.

+

There are three main sections to the job configuration: the logistical details about the job (for example, the cron schedule), the fields that are used for grouping, and what metrics to collect for each group.

+

Jobs are created in a STOPPED state. You can start them with the start rollup jobs API.

+ ``_ @@ -393,25 +410,38 @@ def rollup_search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Search rolled-up data. The rollup search endpoint is needed because, internally, - rolled-up documents utilize a different document structure than the original - data. It rewrites standard Query DSL into a format that matches the rollup documents - then takes the response and rewrites it back to what a client would expect given - the original query. The request body supports a subset of features from the regular - search API. The following functionality is not available: `size`: Because rollups - work on pre-aggregated data, no search hits can be returned and so size must - be set to zero or omitted entirely. `highlighter`, `suggestors`, `post_filter`, - `profile`, `explain`: These are similarly disallowed. **Searching both historical - rollup and non-rollup data** The rollup search API has the capability to search - across both "live" non-rollup data and the aggregated rollup data. This is done - by simply adding the live indices to the URI. For example: ``` GET sensor-1,sensor_rollup/_rollup_search - { "size": 0, "aggregations": { "max_temperature": { "max": { "field": "temperature" - } } } } ``` The rollup search endpoint does two things when the search runs: - * The original request is sent to the non-rollup index unaltered. * A rewritten - version of the original request is sent to the rollup index. When the two responses - are received, the endpoint rewrites the rollup response and merges the two together. - During the merging process, if there is any overlap in buckets between the two - responses, the buckets from the non-rollup index are used. + .. raw:: html + +

Search rolled-up data. + The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. + It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query.

+

The request body supports a subset of features from the regular search API. + The following functionality is not available:

+

size: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. + highlighter, suggestors, post_filter, profile, explain: These are similarly disallowed.

+

Searching both historical rollup and non-rollup data

+

The rollup search API has the capability to search across both "live" non-rollup data and the aggregated rollup data. + This is done by simply adding the live indices to the URI. For example:

+
GET sensor-1,sensor_rollup/_rollup_search
+          {
+            "size": 0,
+            "aggregations": {
+               "max_temperature": {
+                "max": {
+                  "field": "temperature"
+                }
+              }
+            }
+          }
+          
+

The rollup search endpoint does two things when the search runs:

+
    +
  • The original request is sent to the non-rollup index unaltered.
  • +
  • A rewritten version of the original request is sent to the rollup index.
  • +
+

When the two responses are received, the endpoint rewrites the rollup response and merges the two together. + During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used.

+ ``_ @@ -484,8 +514,12 @@ def start_job( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Start rollup jobs. If you try to start a job that does not exist, an exception - occurs. If you try to start a job that is already started, nothing happens. + .. raw:: html + +

Start rollup jobs. + If you try to start a job that does not exist, an exception occurs. + If you try to start a job that is already started, nothing happens.

+ ``_ @@ -528,14 +562,18 @@ def stop_job( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stop rollup jobs. If you try to stop a job that does not exist, an exception - occurs. If you try to stop a job that is already stopped, nothing happens. Since - only a stopped job can be deleted, it can be useful to block the API until the - indexer has fully stopped. This is accomplished with the `wait_for_completion` - query parameter, and optionally a timeout. For example: ``` POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s - ``` The parameter blocks the API call from returning until either the job has - moved to STOPPED or the specified time has elapsed. If the specified time elapses - without the job moving to STOPPED, a timeout exception occurs. + .. raw:: html + +

Stop rollup jobs. + If you try to stop a job that does not exist, an exception occurs. + If you try to stop a job that is already stopped, nothing happens.

+

Since only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped. + This is accomplished with the wait_for_completion query parameter, and optionally a timeout. For example:

+
POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s
+          
+

The parameter blocks the API call from returning until either the job has moved to STOPPED or the specified time has elapsed. + If the specified time elapses without the job moving to STOPPED, a timeout exception occurs.

+ ``_ diff --git a/elasticsearch/_sync/client/search_application.py b/elasticsearch/_sync/client/search_application.py index 64858faae..76ef2d456 100644 --- a/elasticsearch/_sync/client/search_application.py +++ b/elasticsearch/_sync/client/search_application.py @@ -43,8 +43,11 @@ def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a search application. Remove a search application and its associated alias. - Indices attached to the search application are not removed. + .. raw:: html + +

Delete a search application. + Remove a search application and its associated alias. Indices attached to the search application are not removed.

+ ``_ @@ -85,8 +88,11 @@ def delete_behavioral_analytics( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a behavioral analytics collection. The associated data stream is also - deleted. + .. raw:: html + +

Delete a behavioral analytics collection. + The associated data stream is also deleted.

+ ``_ @@ -127,7 +133,10 @@ def get( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get search application details. + .. raw:: html + +

Get search application details.

+ ``_ @@ -168,7 +177,10 @@ def get_behavioral_analytics( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get behavioral analytics collections. + .. raw:: html + +

Get behavioral analytics collections.

+ ``_ @@ -216,7 +228,11 @@ def list( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Get search applications. Get information about search applications. + .. raw:: html + +

Get search applications. + Get information about search applications.

+ ``_ @@ -269,7 +285,10 @@ def post_behavioral_analytics_event( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a behavioral analytics collection event. + .. raw:: html + +

Create a behavioral analytics collection event.

+ ``_ @@ -333,7 +352,10 @@ def put( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a search application. + .. raw:: html + +

Create or update a search application.

+ ``_ @@ -387,7 +409,10 @@ def put_behavioral_analytics( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a behavioral analytics collection. + .. raw:: html + +

Create a behavioral analytics collection.

+ ``_ @@ -433,13 +458,14 @@ def render_query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Render a search application query. Generate an Elasticsearch query using the - specified query parameters and the search template associated with the search - application or a default template if none is specified. If a parameter used in - the search template is not specified in `params`, the parameter's default value - will be used. The API returns the specific Elasticsearch query that would be - generated and run by calling the search application search API. You must have - `read` privileges on the backing alias of the search application. + .. raw:: html + +

Render a search application query. + Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified. + If a parameter used in the search template is not specified in params, the parameter's default value will be used. + The API returns the specific Elasticsearch query that would be generated and run by calling the search application search API.

+

You must have read privileges on the backing alias of the search application.

+ ``_ @@ -498,10 +524,12 @@ def search( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Run a search application search. Generate and run an Elasticsearch query that - uses the specified query parameteter and the search template associated with - the search application or default template. Unspecified template parameters are - assigned their default values if applicable. + .. raw:: html + +

Run a search application search. + Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. + Unspecified template parameters are assigned their default values if applicable.

+ ``_ diff --git a/elasticsearch/_sync/client/searchable_snapshots.py b/elasticsearch/_sync/client/searchable_snapshots.py index 63c1d4fda..0d5575c0c 100644 --- a/elasticsearch/_sync/client/searchable_snapshots.py +++ b/elasticsearch/_sync/client/searchable_snapshots.py @@ -44,8 +44,11 @@ def cache_stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get cache statistics. Get statistics about the shared cache for partially mounted - indices. + .. raw:: html + +

Get cache statistics. + Get statistics about the shared cache for partially mounted indices.

+ ``_ @@ -102,8 +105,11 @@ def clear_cache( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear the cache. Clear indices and data streams from the shared cache for partially - mounted indices. + .. raw:: html + +

Clear the cache. + Clear indices and data streams from the shared cache for partially mounted indices.

+ ``_ @@ -176,9 +182,13 @@ def mount( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use - this API for snapshots managed by index lifecycle management (ILM). Manually - mounting ILM-managed snapshots can interfere with ILM processes. + .. raw:: html + +

Mount a snapshot. + Mount a snapshot as a searchable snapshot index. + Do not use this API for snapshots managed by index lifecycle management (ILM). + Manually mounting ILM-managed snapshots can interfere with ILM processes.

+ ``_ @@ -263,7 +273,10 @@ def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get searchable snapshot statistics. + .. raw:: html + +

Get searchable snapshot statistics.

+ ``_ diff --git a/elasticsearch/_sync/client/security.py b/elasticsearch/_sync/client/security.py index 8c506ac40..c8bb4cf4e 100644 --- a/elasticsearch/_sync/client/security.py +++ b/elasticsearch/_sync/client/security.py @@ -44,21 +44,19 @@ def activate_user_profile( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Activate a user profile. Create or update a user profile on behalf of another - user. NOTE: The user profile feature is designed only for use by Kibana and Elastic's - Observability, Enterprise Search, and Elastic Security solutions. Individual - users and external applications should not call this API directly. The calling - application must have either an `access_token` or a combination of `username` - and `password` for the user that the profile document is intended for. Elastic - reserves the right to change or remove this feature in future releases without - prior notice. This API creates or updates a profile document for end users with - information that is extracted from the user's authentication object including - `username`, `full_name,` `roles`, and the authentication realm. For example, - in the JWT `access_token` case, the profile user's `username` is extracted from - the JWT token claim pointed to by the `claims.principal` setting of the JWT realm - that authenticated the token. When updating a profile document, the API enables - the document if it was disabled. Any updates do not change existing content for - either the `labels` or `data` fields. + .. raw:: html + +

Activate a user profile.

+

Create or update a user profile on behalf of another user.

+

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. + Individual users and external applications should not call this API directly. + The calling application must have either an access_token or a combination of username and password for the user that the profile document is intended for. + Elastic reserves the right to change or remove this feature in future releases without prior notice.

+

This API creates or updates a profile document for end users with information that is extracted from the user's authentication object including username, full_name, roles, and the authentication realm. + For example, in the JWT access_token case, the profile user's username is extracted from the JWT token claim pointed to by the claims.principal setting of the JWT realm that authenticated the token.

+

When updating a profile document, the API enables the document if it was disabled. + Any updates do not change existing content for either the labels or data fields.

+ ``_ @@ -117,12 +115,14 @@ def authenticate( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Authenticate a user. Authenticates a user and returns information about the authenticated - user. Include the user information in a [basic auth header](https://en.wikipedia.org/wiki/Basic_access_authentication). - A successful call returns a JSON structure that shows user information such as - their username, the roles that are assigned to the user, any assigned metadata, - and information about the realms that authenticated and authorized the user. - If the user cannot be authenticated, this API returns a 401 status code. + .. raw:: html + +

Authenticate a user.

+

Authenticates a user and returns information about the authenticated user. + Include the user information in a basic auth header. + A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. + If the user cannot be authenticated, this API returns a 401 status code.

+ ``_ """ @@ -164,9 +164,12 @@ def bulk_delete_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Bulk delete roles. The role management APIs are generally the preferred way to - manage roles, rather than using file-based role management. The bulk delete roles - API cannot delete roles that are defined in roles files. + .. raw:: html + +

Bulk delete roles.

+

The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. + The bulk delete roles API cannot delete roles that are defined in roles files.

+ ``_ @@ -222,9 +225,12 @@ def bulk_put_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Bulk create or update roles. The role management APIs are generally the preferred - way to manage roles, rather than using file-based role management. The bulk create - or update roles API cannot update roles that are defined in roles files. + .. raw:: html + +

Bulk create or update roles.

+

The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. + The bulk create or update roles API cannot update roles that are defined in roles files.

+ ``_ @@ -280,23 +286,19 @@ def bulk_update_api_keys( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Bulk update API keys. Update the attributes for multiple API keys. IMPORTANT: - It is not possible to use an API key as the authentication credential for this - API. To update API keys, the owner user's credentials are required. This API - is similar to the update API key API but enables you to apply the same update - to multiple API keys in one API call. This operation can greatly improve performance - over making individual updates. It is not possible to update expired or invalidated - API keys. This API supports updates to API key access scope, metadata and expiration. - The access scope of each API key is derived from the `role_descriptors` you specify - in the request and a snapshot of the owner user's permissions at the time of - the request. The snapshot of the owner's permissions is updated automatically - on every call. IMPORTANT: If you don't specify `role_descriptors` in the request, - a call to this API might still change an API key's access scope. This change - can occur if the owner user's permissions have changed since the API key was - created or last modified. A successful request returns a JSON structure that - contains the IDs of all updated API keys, the IDs of API keys that already had - the requested changes and did not require an update, and error details for any - failed update. + .. raw:: html + +

Bulk update API keys. + Update the attributes for multiple API keys.

+

IMPORTANT: It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user's credentials are required.

+

This API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates.

+

It is not possible to update expired or invalidated API keys.

+

This API supports updates to API key access scope, metadata and expiration. + The access scope of each API key is derived from the role_descriptors you specify in the request and a snapshot of the owner user's permissions at the time of the request. + The snapshot of the owner's permissions is updated automatically on every call.

+

IMPORTANT: If you don't specify role_descriptors in the request, a call to this API might still change an API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified.

+

A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update.

+ ``_ @@ -370,8 +372,11 @@ def change_password( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Change passwords. Change the passwords of users in the native realm and built-in - users. + .. raw:: html + +

Change passwords.

+

Change the passwords of users in the native realm and built-in users.

+ ``_ @@ -433,8 +438,12 @@ def clear_api_key_cache( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear the API key cache. Evict a subset of all entries from the API key cache. - The cache is also automatically cleared on state changes of the security index. + .. raw:: html + +

Clear the API key cache.

+

Evict a subset of all entries from the API key cache. + The cache is also automatically cleared on state changes of the security index.

+ ``_ @@ -475,9 +484,12 @@ def clear_cached_privileges( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear the privileges cache. Evict privileges from the native application privilege - cache. The cache is also automatically cleared for applications that have their - privileges updated. + .. raw:: html + +

Clear the privileges cache.

+

Evict privileges from the native application privilege cache. + The cache is also automatically cleared for applications that have their privileges updated.

+ ``_ @@ -519,12 +531,15 @@ def clear_cached_realms( usernames: t.Optional[t.Sequence[str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear the user cache. Evict users from the user cache. You can completely clear - the cache or evict specific users. User credentials are cached in memory on each - node to avoid connecting to a remote authentication service or hitting the disk - for every incoming request. There are realm settings that you can use to configure - the user cache. For more information, refer to the documentation about controlling - the user cache. + .. raw:: html + +

Clear the user cache.

+

Evict users from the user cache. + You can completely clear the cache or evict specific users.

+

User credentials are cached in memory on each node to avoid connecting to a remote authentication service or hitting the disk for every incoming request. + There are realm settings that you can use to configure the user cache. + For more information, refer to the documentation about controlling the user cache.

+ ``_ @@ -570,7 +585,11 @@ def clear_cached_roles( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear the roles cache. Evict roles from the native role cache. + .. raw:: html + +

Clear the roles cache.

+

Evict roles from the native role cache.

+ ``_ @@ -614,13 +633,15 @@ def clear_cached_service_tokens( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear service account token caches. Evict a subset of all entries from the service - account token caches. Two separate caches exist for service account tokens: one - cache for tokens backed by the `service_tokens` file, and another for tokens - backed by the `.security` index. This API clears matching entries from both caches. - The cache for service account tokens backed by the `.security` index is cleared - automatically on state changes of the security index. The cache for tokens backed - by the `service_tokens` file is cleared automatically on file changes. + .. raw:: html + +

Clear service account token caches.

+

Evict a subset of all entries from the service account token caches. + Two separate caches exist for service account tokens: one cache for tokens backed by the service_tokens file, and another for tokens backed by the .security index. + This API clears matching entries from both caches.

+

The cache for service account tokens backed by the .security index is cleared automatically on state changes of the security index. + The cache for tokens backed by the service_tokens file is cleared automatically on file changes.

+ ``_ @@ -681,16 +702,18 @@ def create_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create an API key. Create an API key for access without requiring basic authentication. - IMPORTANT: If the credential that is used to authenticate this request is an - API key, the derived API key cannot have any privileges. If you specify privileges, - the API returns an error. A successful request returns a JSON structure that - contains the API key, its unique id, and its name. If applicable, it also returns - expiration information for the API key in milliseconds. NOTE: By default, API - keys never expire. You can specify expiration information when you create the - API keys. The API keys are created by the Elasticsearch API key service, which - is automatically enabled. To configure or turn off the API key service, refer - to API key service setting documentation. + .. raw:: html + +

Create an API key.

+

Create an API key for access without requiring basic authentication.

+

IMPORTANT: If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges. + If you specify privileges, the API returns an error.

+

A successful request returns a JSON structure that contains the API key, its unique id, and its name. + If applicable, it also returns expiration information for the API key in milliseconds.

+

NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys.

+

The API keys are created by the Elasticsearch API key service, which is automatically enabled. + To configure or turn off the API key service, refer to API key service setting documentation.

+ ``_ @@ -768,21 +791,19 @@ def create_cross_cluster_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a cross-cluster API key. Create an API key of the `cross_cluster` type - for the API key based remote cluster access. A `cross_cluster` API key cannot - be used to authenticate through the REST interface. IMPORTANT: To authenticate - this request you must use a credential that is not an API key. Even if you use - an API key that has the required privilege, the API returns an error. Cross-cluster - API keys are created by the Elasticsearch API key service, which is automatically - enabled. NOTE: Unlike REST API keys, a cross-cluster API key does not capture - permissions of the authenticated user. The API key’s effective permission is - exactly as specified with the `access` property. A successful request returns - a JSON structure that contains the API key, its unique ID, and its name. If applicable, - it also returns expiration information for the API key in milliseconds. By default, - API keys never expire. You can specify expiration information when you create - the API keys. Cross-cluster API keys can only be updated with the update cross-cluster - API key API. Attempting to update them with the update REST API key API or the - bulk update REST API keys API will result in an error. + .. raw:: html + +

Create a cross-cluster API key.

+

Create an API key of the cross_cluster type for the API key based remote cluster access. + A cross_cluster API key cannot be used to authenticate through the REST interface.

+

IMPORTANT: To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error.

+

Cross-cluster API keys are created by the Elasticsearch API key service, which is automatically enabled.

+

NOTE: Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the access property.

+

A successful request returns a JSON structure that contains the API key, its unique ID, and its name. If applicable, it also returns expiration information for the API key in milliseconds.

+

By default, API keys never expire. You can specify expiration information when you create the API keys.

+

Cross-cluster API keys can only be updated with the update cross-cluster API key API. + Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error.

+ ``_ @@ -851,9 +872,13 @@ def create_service_token( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a service account token. Create a service accounts token for access without - requiring basic authentication. NOTE: Service account tokens never expire. You - must actively delete them if they are no longer needed. + .. raw:: html + +

Create a service account token.

+

Create a service accounts token for access without requiring basic authentication.

+

NOTE: Service account tokens never expire. + You must actively delete them if they are no longer needed.

+ ``_ @@ -929,20 +954,17 @@ def delegate_pki( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delegate PKI authentication. This API implements the exchange of an X509Certificate - chain for an Elasticsearch access token. The certificate chain is validated, - according to RFC 5280, by sequentially considering the trust configuration of - every installed PKI realm that has `delegation.enabled` set to `true`. A successfully - trusted client certificate is also subject to the validation of the subject distinguished - name according to thw `username_pattern` of the respective realm. This API is - called by smart and trusted proxies, such as Kibana, which terminate the user's - TLS session but still want to authenticate the user by using a PKI realm—-​as - if the user connected directly to Elasticsearch. IMPORTANT: The association between - the subject public key in the target certificate and the corresponding private - key is not validated. This is part of the TLS authentication process and it is - delegated to the proxy that calls this API. The proxy is trusted to have performed - the TLS authentication and this API translates that authentication into an Elasticsearch - access token. + .. raw:: html + +

Delegate PKI authentication.

+

This API implements the exchange of an X509Certificate chain for an Elasticsearch access token. + The certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has delegation.enabled set to true. + A successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw username_pattern of the respective realm.

+

This API is called by smart and trusted proxies, such as Kibana, which terminate the user's TLS session but still want to authenticate the user by using a PKI realm—-​as if the user connected directly to Elasticsearch.

+

IMPORTANT: The association between the subject public key in the target certificate and the corresponding private key is not validated. + This is part of the TLS authentication process and it is delegated to the proxy that calls this API. + The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token.

+ ``_ @@ -998,10 +1020,15 @@ def delete_privileges( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete application privileges. To use this API, you must have one of the following - privileges: * The `manage_security` cluster privilege (or a greater privilege - such as `all`). * The "Manage Application Privileges" global privilege for the - application being referenced in the request. + .. raw:: html + +

Delete application privileges.

+

To use this API, you must have one of the following privileges:

+
    +
  • The manage_security cluster privilege (or a greater privilege such as all).
  • +
  • The "Manage Application Privileges" global privilege for the application being referenced in the request.
  • +
+ ``_ @@ -1058,10 +1085,13 @@ def delete_role( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete roles. Delete roles in the native realm. The role management APIs are - generally the preferred way to manage roles, rather than using file-based role - management. The delete roles API cannot remove roles that are defined in roles - files. + .. raw:: html + +

Delete roles.

+

Delete roles in the native realm. + The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. + The delete roles API cannot remove roles that are defined in roles files.

+ ``_ @@ -1109,10 +1139,13 @@ def delete_role_mapping( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete role mappings. Role mappings define which roles are assigned to each user. - The role mapping APIs are generally the preferred way to manage role mappings - rather than using role mapping files. The delete role mappings API cannot remove - role mappings that are defined in role mapping files. + .. raw:: html + +

Delete role mappings.

+

Role mappings define which roles are assigned to each user. + The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. + The delete role mappings API cannot remove role mappings that are defined in role mapping files.

+ ``_ @@ -1164,8 +1197,11 @@ def delete_service_token( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete service account tokens. Delete service account tokens for a service in - a specified namespace. + .. raw:: html + +

Delete service account tokens.

+

Delete service account tokens for a service in a specified namespace.

+ ``_ @@ -1223,7 +1259,11 @@ def delete_user( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete users. Delete users from the native realm. + .. raw:: html + +

Delete users.

+

Delete users from the native realm.

+ ``_ @@ -1271,8 +1311,13 @@ def disable_user( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Disable users. Disable users in the native realm. By default, when you create - users, they are enabled. You can use this API to revoke a user's access to Elasticsearch. + .. raw:: html + +

Disable users.

+

Disable users in the native realm. + By default, when you create users, they are enabled. + You can use this API to revoke a user's access to Elasticsearch.

+ ``_ @@ -1320,15 +1365,16 @@ def disable_user_profile( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Disable a user profile. Disable user profiles so that they are not visible in - user profile searches. NOTE: The user profile feature is designed only for use - by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security - solutions. Individual users and external applications should not call this API - directly. Elastic reserves the right to change or remove this feature in future - releases without prior notice. When you activate a user profile, its automatically - enabled and visible in user profile searches. You can use the disable user profile - API to disable a user profile so it’s not visible in these searches. To re-enable - a disabled user profile, use the enable user profile API . + .. raw:: html + +

Disable a user profile.

+

Disable user profiles so that they are not visible in user profile searches.

+

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. + Individual users and external applications should not call this API directly. + Elastic reserves the right to change or remove this feature in future releases without prior notice.

+

When you activate a user profile, its automatically enabled and visible in user profile searches. You can use the disable user profile API to disable a user profile so it’s not visible in these searches. + To re-enable a disabled user profile, use the enable user profile API .

+ ``_ @@ -1376,8 +1422,12 @@ def enable_user( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Enable users. Enable users in the native realm. By default, when you create users, - they are enabled. + .. raw:: html + +

Enable users.

+

Enable users in the native realm. + By default, when you create users, they are enabled.

+ ``_ @@ -1425,14 +1475,16 @@ def enable_user_profile( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Enable a user profile. Enable user profiles to make them visible in user profile - searches. NOTE: The user profile feature is designed only for use by Kibana and - Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual - users and external applications should not call this API directly. Elastic reserves - the right to change or remove this feature in future releases without prior notice. - When you activate a user profile, it's automatically enabled and visible in user - profile searches. If you later disable the user profile, you can use the enable - user profile API to make the profile visible in these searches again. + .. raw:: html + +

Enable a user profile.

+

Enable user profiles to make them visible in user profile searches.

+

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. + Individual users and external applications should not call this API directly. + Elastic reserves the right to change or remove this feature in future releases without prior notice.

+

When you activate a user profile, it's automatically enabled and visible in user profile searches. + If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again.

+ ``_ @@ -1476,11 +1528,13 @@ def enroll_kibana( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Enroll Kibana. Enable a Kibana instance to configure itself for communication - with a secured Elasticsearch cluster. NOTE: This API is currently intended for - internal use only by Kibana. Kibana uses this API internally to configure itself - for communications with an Elasticsearch cluster that already has security features - enabled. + .. raw:: html + +

Enroll Kibana.

+

Enable a Kibana instance to configure itself for communication with a secured Elasticsearch cluster.

+

NOTE: This API is currently intended for internal use only by Kibana. + Kibana uses this API internally to configure itself for communications with an Elasticsearch cluster that already has security features enabled.

+ ``_ """ @@ -1515,12 +1569,13 @@ def enroll_node( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Enroll a node. Enroll a new node to allow it to join an existing cluster with - security features enabled. The response contains all the necessary information - for the joining node to bootstrap discovery and security related settings so - that it can successfully join the cluster. The response contains key and certificate - material that allows the caller to generate valid signed certificates for the - HTTP layer of all nodes in the cluster. + .. raw:: html + +

Enroll a node.

+

Enroll a new node to allow it to join an existing cluster with security features enabled.

+

The response contains all the necessary information for the joining node to bootstrap discovery and security related settings so that it can successfully join the cluster. + The response contains key and certificate material that allows the caller to generate valid signed certificates for the HTTP layer of all nodes in the cluster.

+ ``_ """ @@ -1563,11 +1618,13 @@ def get_api_key( with_profile_uid: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get API key information. Retrieves information for one or more API keys. NOTE: - If you have only the `manage_own_api_key` privilege, this API returns only the - API keys that you own. If you have `read_security`, `manage_api_key` or greater - privileges (including `manage_security`), this API returns all API keys regardless - of ownership. + .. raw:: html + +

Get API key information.

+

Retrieves information for one or more API keys. + NOTE: If you have only the manage_own_api_key privilege, this API returns only the API keys that you own. + If you have read_security, manage_api_key or greater privileges (including manage_security), this API returns all API keys regardless of ownership.

+ ``_ @@ -1641,8 +1698,11 @@ def get_builtin_privileges( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get builtin privileges. Get the list of cluster privileges and index privileges - that are available in this version of Elasticsearch. + .. raw:: html + +

Get builtin privileges.

+

Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch.

+ ``_ """ @@ -1679,10 +1739,15 @@ def get_privileges( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get application privileges. To use this API, you must have one of the following - privileges: * The `read_security` cluster privilege (or a greater privilege such - as `manage_security` or `all`). * The "Manage Application Privileges" global - privilege for the application being referenced in the request. + .. raw:: html + +

Get application privileges.

+

To use this API, you must have one of the following privileges:

+
    +
  • The read_security cluster privilege (or a greater privilege such as manage_security or all).
  • +
  • The "Manage Application Privileges" global privilege for the application being referenced in the request.
  • +
+ ``_ @@ -1732,9 +1797,13 @@ def get_role( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get roles. Get roles in the native realm. The role management APIs are generally - the preferred way to manage roles, rather than using file-based role management. - The get roles API cannot retrieve roles that are defined in roles files. + .. raw:: html + +

Get roles.

+

Get roles in the native realm. + The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. + The get roles API cannot retrieve roles that are defined in roles files.

+ ``_ @@ -1779,10 +1848,13 @@ def get_role_mapping( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get role mappings. Role mappings define which roles are assigned to each user. - The role mapping APIs are generally the preferred way to manage role mappings - rather than using role mapping files. The get role mappings API cannot retrieve - role mappings that are defined in role mapping files. + .. raw:: html + +

Get role mappings.

+

Role mappings define which roles are assigned to each user. + The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. + The get role mappings API cannot retrieve role mappings that are defined in role mapping files.

+ ``_ @@ -1830,9 +1902,12 @@ def get_service_accounts( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get service accounts. Get a list of service accounts that match the provided - path parameters. NOTE: Currently, only the `elastic/fleet-server` service account - is available. + .. raw:: html + +

Get service accounts.

+

Get a list of service accounts that match the provided path parameters.

+

NOTE: Currently, only the elastic/fleet-server service account is available.

+ ``_ @@ -1883,14 +1958,14 @@ def get_service_credentials( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get service account credentials. To use this API, you must have at least the - `read_security` cluster privilege (or a greater privilege such as `manage_service_account` - or `manage_security`). The response includes service account tokens that were - created with the create service account tokens API as well as file-backed tokens - from all nodes of the cluster. NOTE: For tokens backed by the `service_tokens` - file, the API collects them from all nodes of the cluster. Tokens with the same - name from different nodes are assumed to be the same token and are only counted - once towards the total number of service tokens. + .. raw:: html + +

Get service account credentials.

+

To use this API, you must have at least the read_security cluster privilege (or a greater privilege such as manage_service_account or manage_security).

+

The response includes service account tokens that were created with the create service account tokens API as well as file-backed tokens from all nodes of the cluster.

+

NOTE: For tokens backed by the service_tokens file, the API collects them from all nodes of the cluster. + Tokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens.

+ ``_ @@ -1936,10 +2011,17 @@ def get_settings( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get security index settings. Get the user-configurable settings for the security - internal index (`.security` and associated indices). Only a subset of the index - settings — those that are user-configurable—will be shown. This includes: * `index.auto_expand_replicas` - * `index.number_of_replicas` + .. raw:: html + +

Get security index settings.

+

Get the user-configurable settings for the security internal index (.security and associated indices). + Only a subset of the index settings — those that are user-configurable—will be shown. + This includes:

+
    +
  • index.auto_expand_replicas
  • +
  • index.number_of_replicas
  • +
+ ``_ @@ -2003,20 +2085,19 @@ def get_token( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a token. Create a bearer token for access without requiring basic authentication. - The tokens are created by the Elasticsearch Token Service, which is automatically - enabled when you configure TLS on the HTTP interface. Alternatively, you can - explicitly enable the `xpack.security.authc.token.enabled` setting. When you - are running in production mode, a bootstrap check prevents you from enabling - the token service unless you also enable TLS on the HTTP interface. The get token - API takes the same parameters as a typical OAuth 2.0 token API except for the - use of a JSON request body. A successful get token API call returns a JSON structure - that contains the access token, the amount of time (seconds) that the token expires - in, the type, and the scope if available. The tokens returned by the get token - API have a finite period of time for which they are valid and after that time - period, they can no longer be used. That time period is defined by the `xpack.security.authc.token.timeout` - setting. If you want to invalidate a token immediately, you can do so by using - the invalidate token API. + .. raw:: html + +

Get a token.

+

Create a bearer token for access without requiring basic authentication. + The tokens are created by the Elasticsearch Token Service, which is automatically enabled when you configure TLS on the HTTP interface. + Alternatively, you can explicitly enable the xpack.security.authc.token.enabled setting. + When you are running in production mode, a bootstrap check prevents you from enabling the token service unless you also enable TLS on the HTTP interface.

+

The get token API takes the same parameters as a typical OAuth 2.0 token API except for the use of a JSON request body.

+

A successful get token API call returns a JSON structure that contains the access token, the amount of time (seconds) that the token expires in, the type, and the scope if available.

+

The tokens returned by the get token API have a finite period of time for which they are valid and after that time period, they can no longer be used. + That time period is defined by the xpack.security.authc.token.timeout setting. + If you want to invalidate a token immediately, you can do so by using the invalidate token API.

+ ``_ @@ -2086,7 +2167,11 @@ def get_user( with_profile_uid: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get users. Get information about users in the native realm and built-in users. + .. raw:: html + +

Get users.

+

Get information about users in the native realm and built-in users.

+ ``_ @@ -2137,10 +2222,14 @@ def get_user_privileges( username: t.Optional[t.Union[None, str]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get user privileges. Get the security privileges for the logged in user. All - users can use this API, but only to determine their own privileges. To check - the privileges of other users, you must use the run as feature. To check whether - a user has a specific list of privileges, use the has privileges API. + .. raw:: html + +

Get user privileges.

+

Get the security privileges for the logged in user. + All users can use this API, but only to determine their own privileges. + To check the privileges of other users, you must use the run as feature. + To check whether a user has a specific list of privileges, use the has privileges API.

+ ``_ @@ -2190,11 +2279,14 @@ def get_user_profile( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a user profile. Get a user's profile using the unique profile ID. NOTE: The - user profile feature is designed only for use by Kibana and Elastic's Observability, - Enterprise Search, and Elastic Security solutions. Individual users and external - applications should not call this API directly. Elastic reserves the right to - change or remove this feature in future releases without prior notice. + .. raw:: html + +

Get a user profile.

+

Get a user's profile using the unique profile ID.

+

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. + Individual users and external applications should not call this API directly. + Elastic reserves the right to change or remove this feature in future releases without prior notice.

+ ``_ @@ -2258,23 +2350,27 @@ def grant_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Grant an API key. Create an API key on behalf of another user. This API is similar - to the create API keys API, however it creates the API key for a user that is - different than the user that runs the API. The caller must have authentication - credentials for the user on whose behalf the API key will be created. It is not - possible to use this API to create an API key without that user's credentials. - The supported user authentication credential types are: * username and password - * Elasticsearch access tokens * JWTs The user, for whom the authentication credentials - is provided, can optionally "run as" (impersonate) another user. In this case, - the API key will be created on behalf of the impersonated user. This API is intended - be used by applications that need to create and manage API keys for end users, - but cannot guarantee that those users have permission to create API keys on their - own behalf. The API keys are created by the Elasticsearch API key service, which - is automatically enabled. A successful grant API key API call returns a JSON - structure that contains the API key, its unique id, and its name. If applicable, - it also returns expiration information for the API key in milliseconds. By default, - API keys never expire. You can specify expiration information when you create - the API keys. + .. raw:: html + +

Grant an API key.

+

Create an API key on behalf of another user. + This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. + The caller must have authentication credentials for the user on whose behalf the API key will be created. + It is not possible to use this API to create an API key without that user's credentials. + The supported user authentication credential types are:

+
    +
  • username and password
  • +
  • Elasticsearch access tokens
  • +
  • JWTs
  • +
+

The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. + In this case, the API key will be created on behalf of the impersonated user.

+

This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. + The API keys are created by the Elasticsearch API key service, which is automatically enabled.

+

A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. + If applicable, it also returns expiration information for the API key in milliseconds.

+

By default, API keys never expire. You can specify expiration information when you create the API keys.

+ ``_ @@ -2415,9 +2511,13 @@ def has_privileges( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Check user privileges. Determine whether the specified user has a specified list - of privileges. All users can use this API, but only to determine their own privileges. - To check the privileges of other users, you must use the run as feature. + .. raw:: html + +

Check user privileges.

+

Determine whether the specified user has a specified list of privileges. + All users can use this API, but only to determine their own privileges. + To check the privileges of other users, you must use the run as feature.

+ ``_ @@ -2476,12 +2576,13 @@ def has_privileges_user_profile( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Check user profile privileges. Determine whether the users associated with the - specified user profile IDs have all the requested privileges. NOTE: The user - profile feature is designed only for use by Kibana and Elastic's Observability, - Enterprise Search, and Elastic Security solutions. Individual users and external - applications should not call this API directly. Elastic reserves the right to - change or remove this feature in future releases without prior notice. + .. raw:: html + +

Check user profile privileges.

+

Determine whether the users associated with the specified user profile IDs have all the requested privileges.

+

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. + Elastic reserves the right to change or remove this feature in future releases without prior notice.

+ ``_ @@ -2540,20 +2641,22 @@ def invalidate_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Invalidate API keys. This API invalidates API keys created by the create API - key or grant API key APIs. Invalidated API keys fail authentication, but they - can still be viewed using the get API key information and query API key information - APIs, for at least the configured retention period, until they are automatically - deleted. To use this API, you must have at least the `manage_security`, `manage_api_key`, - or `manage_own_api_key` cluster privileges. The `manage_security` privilege allows - deleting any API key, including both REST and cross cluster API keys. The `manage_api_key` - privilege allows deleting any REST API key, but not cross cluster API keys. The - `manage_own_api_key` only allows deleting REST API keys that are owned by the - user. In addition, with the `manage_own_api_key` privilege, an invalidation request - must be issued in one of the three formats: - Set the parameter `owner=true`. - - Or, set both `username` and `realm_name` to match the user's identity. - Or, - if the request is issued by an API key, that is to say an API key invalidates - itself, specify its ID in the `ids` field. + .. raw:: html + +

Invalidate API keys.

+

This API invalidates API keys created by the create API key or grant API key APIs. + Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted.

+

To use this API, you must have at least the manage_security, manage_api_key, or manage_own_api_key cluster privileges. + The manage_security privilege allows deleting any API key, including both REST and cross cluster API keys. + The manage_api_key privilege allows deleting any REST API key, but not cross cluster API keys. + The manage_own_api_key only allows deleting REST API keys that are owned by the user. + In addition, with the manage_own_api_key privilege, an invalidation request must be issued in one of the three formats:

+
    +
  • Set the parameter owner=true.
  • +
  • Or, set both username and realm_name to match the user's identity.
  • +
  • Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the ids field.
  • +
+ ``_ @@ -2625,15 +2728,19 @@ def invalidate_token( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Invalidate a token. The access tokens returned by the get token API have a finite - period of time for which they are valid. After that time period, they can no - longer be used. The time period is defined by the `xpack.security.authc.token.timeout` - setting. The refresh tokens returned by the get token API are only valid for - 24 hours. They can also be used exactly once. If you want to invalidate one or - more access or refresh tokens immediately, use this invalidate token API. NOTE: - While all parameters are optional, at least one of them is required. More specifically, - either one of `token` or `refresh_token` parameters is required. If none of these - two are specified, then `realm_name` and/or `username` need to be specified. + .. raw:: html + +

Invalidate a token.

+

The access tokens returned by the get token API have a finite period of time for which they are valid. + After that time period, they can no longer be used. + The time period is defined by the xpack.security.authc.token.timeout setting.

+

The refresh tokens returned by the get token API are only valid for 24 hours. + They can also be used exactly once. + If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API.

+

NOTE: While all parameters are optional, at least one of them is required. + More specifically, either one of token or refresh_token parameters is required. + If none of these two are specified, then realm_name and/or username need to be specified.

+ ``_ @@ -2695,12 +2802,13 @@ def oidc_authenticate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Authenticate OpenID Connect. Exchange an OpenID Connect authentication response - message for an Elasticsearch internal access token and refresh token that can - be subsequently used for authentication. Elasticsearch exposes all the necessary - OpenID Connect related functionality with the OpenID Connect APIs. These APIs - are used internally by Kibana in order to provide OpenID Connect based authentication, - but can also be used by other, custom web applications or other clients. + .. raw:: html + +

Authenticate OpenID Connect.

+

Exchange an OpenID Connect authentication response message for an Elasticsearch internal access token and refresh token that can be subsequently used for authentication.

+

Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. + These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.

+ ``_ @@ -2773,15 +2881,14 @@ def oidc_logout( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Logout of OpenID Connect. Invalidate an access token and a refresh token that - were generated as a response to the `/_security/oidc/authenticate` API. If the - OpenID Connect authentication realm in Elasticsearch is accordingly configured, - the response to this call will contain a URI pointing to the end session endpoint - of the OpenID Connect Provider in order to perform single logout. Elasticsearch - exposes all the necessary OpenID Connect related functionality with the OpenID - Connect APIs. These APIs are used internally by Kibana in order to provide OpenID - Connect based authentication, but can also be used by other, custom web applications - or other clients. + .. raw:: html + +

Logout of OpenID Connect.

+

Invalidate an access token and a refresh token that were generated as a response to the /_security/oidc/authenticate API.

+

If the OpenID Connect authentication realm in Elasticsearch is accordingly configured, the response to this call will contain a URI pointing to the end session endpoint of the OpenID Connect Provider in order to perform single logout.

+

Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. + These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.

+ ``_ @@ -2836,15 +2943,14 @@ def oidc_prepare_authentication( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Prepare OpenID connect authentication. Create an oAuth 2.0 authentication request - as a URL string based on the configuration of the OpenID Connect authentication - realm in Elasticsearch. The response of this API is a URL pointing to the Authorization - Endpoint of the configured OpenID Connect Provider, which can be used to redirect - the browser of the user in order to continue the authentication process. Elasticsearch - exposes all the necessary OpenID Connect related functionality with the OpenID - Connect APIs. These APIs are used internally by Kibana in order to provide OpenID - Connect based authentication, but can also be used by other, custom web applications - or other clients. + .. raw:: html + +

Prepare OpenID connect authentication.

+

Create an oAuth 2.0 authentication request as a URL string based on the configuration of the OpenID Connect authentication realm in Elasticsearch.

+

The response of this API is a URL pointing to the Authorization Endpoint of the configured OpenID Connect Provider, which can be used to redirect the browser of the user in order to continue the authentication process.

+

Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. + These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.

+ ``_ @@ -2921,20 +3027,26 @@ def put_privileges( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update application privileges. To use this API, you must have one of - the following privileges: * The `manage_security` cluster privilege (or a greater - privilege such as `all`). * The "Manage Application Privileges" global privilege - for the application being referenced in the request. Application names are formed - from a prefix, with an optional suffix that conform to the following rules: * - The prefix must begin with a lowercase ASCII letter. * The prefix must contain - only ASCII letters or digits. * The prefix must be at least 3 characters long. - * If the suffix exists, it must begin with either a dash `-` or `_`. * The suffix - cannot contain any of the following characters: `\\`, `/`, `*`, `?`, `"`, `<`, - `>`, `|`, `,`, `*`. * No part of the name can contain whitespace. Privilege names - must begin with a lowercase ASCII letter and must contain only ASCII letters - and digits along with the characters `_`, `-`, and `.`. Action names can contain - any number of printable ASCII characters and must contain at least one of the - following characters: `/`, `*`, `:`. + .. raw:: html + +

Create or update application privileges.

+

To use this API, you must have one of the following privileges:

+
    +
  • The manage_security cluster privilege (or a greater privilege such as all).
  • +
  • The "Manage Application Privileges" global privilege for the application being referenced in the request.
  • +
+

Application names are formed from a prefix, with an optional suffix that conform to the following rules:

+
    +
  • The prefix must begin with a lowercase ASCII letter.
  • +
  • The prefix must contain only ASCII letters or digits.
  • +
  • The prefix must be at least 3 characters long.
  • +
  • If the suffix exists, it must begin with either a dash - or _.
  • +
  • The suffix cannot contain any of the following characters: \\, /, *, ?, ", <, >, |, ,, *.
  • +
  • No part of the name can contain whitespace.
  • +
+

Privilege names must begin with a lowercase ASCII letter and must contain only ASCII letters and digits along with the characters _, -, and ..

+

Action names can contain any number of printable ASCII characters and must contain at least one of the following characters: /, *, :.

+ ``_ @@ -3080,10 +3192,13 @@ def put_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update roles. The role management APIs are generally the preferred - way to manage roles in the native realm, rather than using file-based role management. - The create or update roles API cannot update roles that are defined in roles - files. File-based role management is not available in Elastic Serverless. + .. raw:: html + +

Create or update roles.

+

The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management. + The create or update roles API cannot update roles that are defined in roles files. + File-based role management is not available in Elastic Serverless.

+ ``_ @@ -3195,29 +3310,27 @@ def put_role_mapping( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update role mappings. Role mappings define which roles are assigned - to each user. Each mapping has rules that identify users and a list of roles - that are granted to those users. The role mapping APIs are generally the preferred - way to manage role mappings rather than using role mapping files. The create - or update role mappings API cannot update role mappings that are defined in role - mapping files. NOTE: This API does not create roles. Rather, it maps users to - existing roles. Roles can be created by using the create or update roles API - or roles files. **Role templates** The most common use for role mappings is to - create a mapping from a known value on the user to a fixed role name. For example, - all users in the `cn=admin,dc=example,dc=com` LDAP group should be given the - superuser role in Elasticsearch. The `roles` field is used for this purpose. - For more complex needs, it is possible to use Mustache templates to dynamically - determine the names of the roles that should be granted to the user. The `role_templates` - field is used for this purpose. NOTE: To use role templates successfully, the - relevant scripting feature must be enabled. Otherwise, all attempts to create - a role mapping with role templates fail. All of the user fields that are available - in the role mapping rules are also available in the role templates. Thus it is - possible to assign a user to a role that reflects their username, their groups, - or the name of the realm to which they authenticated. By default a template is - evaluated to produce a single string that is the name of the role which should - be assigned to the user. If the format of the template is set to "json" then - the template is expected to produce a JSON string or an array of JSON strings - for the role names. + .. raw:: html + +

Create or update role mappings.

+

Role mappings define which roles are assigned to each user. + Each mapping has rules that identify users and a list of roles that are granted to those users. + The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files.

+

NOTE: This API does not create roles. Rather, it maps users to existing roles. + Roles can be created by using the create or update roles API or roles files.

+

Role templates

+

The most common use for role mappings is to create a mapping from a known value on the user to a fixed role name. + For example, all users in the cn=admin,dc=example,dc=com LDAP group should be given the superuser role in Elasticsearch. + The roles field is used for this purpose.

+

For more complex needs, it is possible to use Mustache templates to dynamically determine the names of the roles that should be granted to the user. + The role_templates field is used for this purpose.

+

NOTE: To use role templates successfully, the relevant scripting feature must be enabled. + Otherwise, all attempts to create a role mapping with role templates fail.

+

All of the user fields that are available in the role mapping rules are also available in the role templates. + Thus it is possible to assign a user to a role that reflects their username, their groups, or the name of the realm to which they authenticated.

+

By default a template is evaluated to produce a single string that is the name of the role which should be assigned to the user. + If the format of the template is set to "json" then the template is expected to produce a JSON string or an array of JSON strings for the role names.

+ ``_ @@ -3313,10 +3426,13 @@ def put_user( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update users. Add and update users in the native realm. A password - is required for adding a new user but is optional when updating an existing user. - To change a user's password without updating any other fields, use the change - password API. + .. raw:: html + +

Create or update users.

+

Add and update users in the native realm. + A password is required for adding a new user but is optional when updating an existing user. + To change a user's password without updating any other fields, use the change password API.

+ ``_ @@ -3427,13 +3543,15 @@ def query_api_keys( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Find API keys with a query. Get a paginated list of API keys and their information. - You can optionally filter the results with a query. To use this API, you must - have at least the `manage_own_api_key` or the `read_security` cluster privileges. - If you have only the `manage_own_api_key` privilege, this API returns only the - API keys that you own. If you have the `read_security`, `manage_api_key`, or - greater privileges (including `manage_security`), this API returns all API keys - regardless of ownership. + .. raw:: html + +

Find API keys with a query.

+

Get a paginated list of API keys and their information. + You can optionally filter the results with a query.

+

To use this API, you must have at least the manage_own_api_key or the read_security cluster privileges. + If you have only the manage_own_api_key privilege, this API returns only the API keys that you own. + If you have the read_security, manage_api_key, or greater privileges (including manage_security), this API returns all API keys regardless of ownership.

+ ``_ @@ -3568,11 +3686,15 @@ def query_role( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Find roles with a query. Get roles in a paginated manner. The role management - APIs are generally the preferred way to manage roles, rather than using file-based - role management. The query roles API does not retrieve roles that are defined - in roles files, nor built-in ones. You can optionally filter the results with - a query. Also, the results can be paginated and sorted. + .. raw:: html + +

Find roles with a query.

+

Get roles in a paginated manner. + The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. + The query roles API does not retrieve roles that are defined in roles files, nor built-in ones. + You can optionally filter the results with a query. + Also, the results can be paginated and sorted.

+ ``_ @@ -3658,10 +3780,14 @@ def query_user( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Find users with a query. Get information for users in a paginated manner. You - can optionally filter the results with a query. NOTE: As opposed to the get user - API, built-in users are excluded from the result. This API is only for native - users. + .. raw:: html + +

Find users with a query.

+

Get information for users in a paginated manner. + You can optionally filter the results with a query.

+

NOTE: As opposed to the get user API, built-in users are excluded from the result. + This API is only for native users.

+ ``_ @@ -3740,19 +3866,21 @@ def saml_authenticate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Authenticate SAML. Submit a SAML response message to Elasticsearch for consumption. - NOTE: This API is intended for use by custom web applications other than Kibana. - If you are using Kibana, refer to the documentation for configuring SAML single-sign-on - on the Elastic Stack. The SAML message that is submitted can be: * A response - to a SAML authentication request that was previously created using the SAML prepare - authentication API. * An unsolicited SAML message in the case of an IdP-initiated - single sign-on (SSO) flow. In either case, the SAML message needs to be a base64 - encoded XML document with a root element of ``. After successful validation, - Elasticsearch responds with an Elasticsearch internal access token and refresh - token that can be subsequently used for authentication. This API endpoint essentially - exchanges SAML responses that indicate successful authentication in the IdP for - Elasticsearch access and refresh tokens, which can be used for authentication - against Elasticsearch. + .. raw:: html + +

Authenticate SAML.

+

Submit a SAML response message to Elasticsearch for consumption.

+

NOTE: This API is intended for use by custom web applications other than Kibana. + If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack.

+

The SAML message that is submitted can be:

+
    +
  • A response to a SAML authentication request that was previously created using the SAML prepare authentication API.
  • +
  • An unsolicited SAML message in the case of an IdP-initiated single sign-on (SSO) flow.
  • +
+

In either case, the SAML message needs to be a base64 encoded XML document with a root element of <Response>.

+

After successful validation, Elasticsearch responds with an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. + This API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch.

+ ``_ @@ -3814,16 +3942,18 @@ def saml_complete_logout( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Logout of SAML completely. Verifies the logout response sent from the SAML IdP. - NOTE: This API is intended for use by custom web applications other than Kibana. - If you are using Kibana, refer to the documentation for configuring SAML single-sign-on - on the Elastic Stack. The SAML IdP may send a logout response back to the SP - after handling the SP-initiated SAML Single Logout. This API verifies the response - by ensuring the content is relevant and validating its signature. An empty response - is returned if the verification process is successful. The response can be sent - by the IdP with either the HTTP-Redirect or the HTTP-Post binding. The caller - of this API must prepare the request accordingly so that this API can handle - either of them. + .. raw:: html + +

Logout of SAML completely.

+

Verifies the logout response sent from the SAML IdP.

+

NOTE: This API is intended for use by custom web applications other than Kibana. + If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack.

+

The SAML IdP may send a logout response back to the SP after handling the SP-initiated SAML Single Logout. + This API verifies the response by ensuring the content is relevant and validating its signature. + An empty response is returned if the verification process is successful. + The response can be sent by the IdP with either the HTTP-Redirect or the HTTP-Post binding. + The caller of this API must prepare the request accordingly so that this API can handle either of them.

+ ``_ @@ -3889,15 +4019,17 @@ def saml_invalidate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Invalidate SAML. Submit a SAML LogoutRequest message to Elasticsearch for consumption. - NOTE: This API is intended for use by custom web applications other than Kibana. - If you are using Kibana, refer to the documentation for configuring SAML single-sign-on - on the Elastic Stack. The logout request comes from the SAML IdP during an IdP - initiated Single Logout. The custom web application can use this API to have - Elasticsearch process the `LogoutRequest`. After successful validation of the - request, Elasticsearch invalidates the access token and refresh token that corresponds - to that specific SAML principal and provides a URL that contains a SAML LogoutResponse - message. Thus the user can be redirected back to their IdP. + .. raw:: html + +

Invalidate SAML.

+

Submit a SAML LogoutRequest message to Elasticsearch for consumption.

+

NOTE: This API is intended for use by custom web applications other than Kibana. + If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack.

+

The logout request comes from the SAML IdP during an IdP initiated Single Logout. + The custom web application can use this API to have Elasticsearch process the LogoutRequest. + After successful validation of the request, Elasticsearch invalidates the access token and refresh token that corresponds to that specific SAML principal and provides a URL that contains a SAML LogoutResponse message. + Thus the user can be redirected back to their IdP.

+ ``_ @@ -3964,14 +4096,15 @@ def saml_logout( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Logout of SAML. Submits a request to invalidate an access token and refresh token. - NOTE: This API is intended for use by custom web applications other than Kibana. - If you are using Kibana, refer to the documentation for configuring SAML single-sign-on - on the Elastic Stack. This API invalidates the tokens that were generated for - a user by the SAML authenticate API. If the SAML realm in Elasticsearch is configured - accordingly and the SAML IdP supports this, the Elasticsearch response contains - a URL to redirect the user to the IdP that contains a SAML logout request (starting - an SP-initiated SAML Single Logout). + .. raw:: html + +

Logout of SAML.

+

Submits a request to invalidate an access token and refresh token.

+

NOTE: This API is intended for use by custom web applications other than Kibana. + If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack.

+

This API invalidates the tokens that were generated for a user by the SAML authenticate API. + If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP supports this, the Elasticsearch response contains a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout).

+ ``_ @@ -4028,20 +4161,20 @@ def saml_prepare_authentication( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Prepare SAML authentication. Create a SAML authentication request (``) - as a URL string based on the configuration of the respective SAML realm in Elasticsearch. - NOTE: This API is intended for use by custom web applications other than Kibana. - If you are using Kibana, refer to the documentation for configuring SAML single-sign-on - on the Elastic Stack. This API returns a URL pointing to the SAML Identity Provider. - You can use the URL to redirect the browser of the user in order to continue - the authentication process. The URL includes a single parameter named `SAMLRequest`, - which contains a SAML Authentication request that is deflated and Base64 encoded. - If the configuration dictates that SAML authentication requests should be signed, - the URL has two extra parameters named `SigAlg` and `Signature`. These parameters - contain the algorithm used for the signature and the signature value itself. - It also returns a random string that uniquely identifies this SAML Authentication - request. The caller of this API needs to store this identifier as it needs to - be used in a following step of the authentication process. + .. raw:: html + +

Prepare SAML authentication.

+

Create a SAML authentication request (<AuthnRequest>) as a URL string based on the configuration of the respective SAML realm in Elasticsearch.

+

NOTE: This API is intended for use by custom web applications other than Kibana. + If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack.

+

This API returns a URL pointing to the SAML Identity Provider. + You can use the URL to redirect the browser of the user in order to continue the authentication process. + The URL includes a single parameter named SAMLRequest, which contains a SAML Authentication request that is deflated and Base64 encoded. + If the configuration dictates that SAML authentication requests should be signed, the URL has two extra parameters named SigAlg and Signature. + These parameters contain the algorithm used for the signature and the signature value itself. + It also returns a random string that uniquely identifies this SAML Authentication request. + The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process.

+ ``_ @@ -4096,11 +4229,13 @@ def saml_service_provider_metadata( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Create SAML service provider metadata. Generate SAML metadata for a SAML 2.0 - Service Provider. The SAML 2.0 specification provides a mechanism for Service - Providers to describe their capabilities and configuration using a metadata file. - This API generates Service Provider metadata based on the configuration of a - SAML realm in Elasticsearch. + .. raw:: html + +

Create SAML service provider metadata.

+

Generate SAML metadata for a SAML 2.0 Service Provider.

+

The SAML 2.0 specification provides a mechanism for Service Providers to describe their capabilities and configuration using a metadata file. + This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch.

+ ``_ @@ -4146,12 +4281,14 @@ def suggest_user_profiles( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Suggest a user profile. Get suggestions for user profiles that match specified - search criteria. NOTE: The user profile feature is designed only for use by Kibana - and Elastic's Observability, Enterprise Search, and Elastic Security solutions. - Individual users and external applications should not call this API directly. - Elastic reserves the right to change or remove this feature in future releases - without prior notice. + .. raw:: html + +

Suggest a user profile.

+

Get suggestions for user profiles that match specified search criteria.

+

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. + Individual users and external applications should not call this API directly. + Elastic reserves the right to change or remove this feature in future releases without prior notice.

+ ``_ @@ -4222,24 +4359,23 @@ def update_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update an API key. Update attributes of an existing API key. This API supports - updates to an API key's access scope, expiration, and metadata. To use this API, - you must have at least the `manage_own_api_key` cluster privilege. Users can - only update API keys that they created or that were granted to them. To update - another user’s API key, use the `run_as` feature to submit a request on behalf - of another user. IMPORTANT: It's not possible to use an API key as the authentication - credential for this API. The owner user’s credentials are required. Use this - API to update API keys created by the create API key or grant API Key APIs. If - you need to apply the same update to many API keys, you can use the bulk update - API keys API to reduce overhead. It's not possible to update expired API keys - or API keys that have been invalidated by the invalidate API key API. The access - scope of an API key is derived from the `role_descriptors` you specify in the - request and a snapshot of the owner user's permissions at the time of the request. - The snapshot of the owner's permissions is updated automatically on every call. - IMPORTANT: If you don't specify `role_descriptors` in the request, a call to - this API might still change the API key's access scope. This change can occur - if the owner user's permissions have changed since the API key was created or - last modified. + .. raw:: html + +

Update an API key.

+

Update attributes of an existing API key. + This API supports updates to an API key's access scope, expiration, and metadata.

+

To use this API, you must have at least the manage_own_api_key cluster privilege. + Users can only update API keys that they created or that were granted to them. + To update another user’s API key, use the run_as feature to submit a request on behalf of another user.

+

IMPORTANT: It's not possible to use an API key as the authentication credential for this API. The owner user’s credentials are required.

+

Use this API to update API keys created by the create API key or grant API Key APIs. + If you need to apply the same update to many API keys, you can use the bulk update API keys API to reduce overhead. + It's not possible to update expired API keys or API keys that have been invalidated by the invalidate API key API.

+

The access scope of an API key is derived from the role_descriptors you specify in the request and a snapshot of the owner user's permissions at the time of the request. + The snapshot of the owner's permissions is updated automatically on every call.

+

IMPORTANT: If you don't specify role_descriptors in the request, a call to this API might still change the API key's access scope. + This change can occur if the owner user's permissions have changed since the API key was created or last modified.

+ ``_ @@ -4314,19 +4450,20 @@ def update_cross_cluster_api_key( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update a cross-cluster API key. Update the attributes of an existing cross-cluster - API key, which is used for API key based remote cluster access. To use this API, - you must have at least the `manage_security` cluster privilege. Users can only - update API keys that they created. To update another user's API key, use the - `run_as` feature to submit a request on behalf of another user. IMPORTANT: It's - not possible to use an API key as the authentication credential for this API. - To update an API key, the owner user's credentials are required. It's not possible - to update expired API keys, or API keys that have been invalidated by the invalidate - API key API. This API supports updates to an API key's access scope, metadata, - and expiration. The owner user's information, such as the `username` and `realm`, - is also updated automatically on every call. NOTE: This API cannot update REST - API keys, which should be updated by either the update API key or bulk update - API keys API. + .. raw:: html + +

Update a cross-cluster API key.

+

Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access.

+

To use this API, you must have at least the manage_security cluster privilege. + Users can only update API keys that they created. + To update another user's API key, use the run_as feature to submit a request on behalf of another user.

+

IMPORTANT: It's not possible to use an API key as the authentication credential for this API. + To update an API key, the owner user's credentials are required.

+

It's not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API.

+

This API supports updates to an API key's access scope, metadata, and expiration. + The owner user's information, such as the username and realm, is also updated automatically on every call.

+

NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API.

+ ``_ @@ -4398,14 +4535,14 @@ def update_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update security index settings. Update the user-configurable settings for the - security internal index (`.security` and associated indices). Only a subset of - settings are allowed to be modified. This includes `index.auto_expand_replicas` - and `index.number_of_replicas`. NOTE: If `index.auto_expand_replicas` is set, - `index.number_of_replicas` will be ignored during updates. If a specific index - is not in use on the system and settings are provided for it, the request will - be rejected. This API does not yet support configuring the settings for indices - before they are in use. + .. raw:: html + +

Update security index settings.

+

Update the user-configurable settings for the security internal index (.security and associated indices). Only a subset of settings are allowed to be modified. This includes index.auto_expand_replicas and index.number_of_replicas.

+

NOTE: If index.auto_expand_replicas is set, index.number_of_replicas will be ignored during updates.

+

If a specific index is not in use on the system and settings are provided for it, the request will be rejected. + This API does not yet support configuring the settings for indices before they are in use.

+ ``_ @@ -4474,19 +4611,23 @@ def update_user_profile_data( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update user profile data. Update specific data for the user profile that is associated - with a unique ID. NOTE: The user profile feature is designed only for use by - Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. - Individual users and external applications should not call this API directly. - Elastic reserves the right to change or remove this feature in future releases - without prior notice. To use this API, you must have one of the following privileges: - * The `manage_user_profile` cluster privilege. * The `update_profile_data` global - privilege for the namespaces that are referenced in the request. This API updates - the `labels` and `data` fields of an existing user profile document with JSON - objects. New keys and their values are added to the profile document and conflicting - keys are replaced by data that's included in the request. For both labels and - data, content is namespaced by the top-level fields. The `update_profile_data` - global privilege grants privileges for updating only the allowed namespaces. + .. raw:: html + +

Update user profile data.

+

Update specific data for the user profile that is associated with a unique ID.

+

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. + Individual users and external applications should not call this API directly. + Elastic reserves the right to change or remove this feature in future releases without prior notice.

+

To use this API, you must have one of the following privileges:

+
    +
  • The manage_user_profile cluster privilege.
  • +
  • The update_profile_data global privilege for the namespaces that are referenced in the request.
  • +
+

This API updates the labels and data fields of an existing user profile document with JSON objects. + New keys and their values are added to the profile document and conflicting keys are replaced by data that's included in the request.

+

For both labels and data, content is namespaced by the top-level fields. + The update_profile_data global privilege grants privileges for updating only the allowed namespaces.

+ ``_ diff --git a/elasticsearch/_sync/client/shutdown.py b/elasticsearch/_sync/client/shutdown.py index 573ba579c..9b30f3f51 100644 --- a/elasticsearch/_sync/client/shutdown.py +++ b/elasticsearch/_sync/client/shutdown.py @@ -42,13 +42,16 @@ def delete_node( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Cancel node shutdown preparations. Remove a node from the shutdown list so it - can resume normal operations. You must explicitly clear the shutdown request - when a node rejoins the cluster or when a node has permanently left the cluster. - Shutdown requests are never removed automatically by Elasticsearch. NOTE: This - feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, - and Elastic Cloud on Kubernetes. Direct use is not supported. If the operator - privileges feature is enabled, you must be an operator to use this API. + .. raw:: html + +

Cancel node shutdown preparations. + Remove a node from the shutdown list so it can resume normal operations. + You must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster. + Shutdown requests are never removed automatically by Elasticsearch.

+

NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. + Direct use is not supported.

+

If the operator privileges feature is enabled, you must be an operator to use this API.

+ ``_ @@ -100,13 +103,14 @@ def get_node( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the shutdown status. Get information about nodes that are ready to be shut - down, have shut down preparations still in progress, or have stalled. The API - returns status information for each part of the shut down process. NOTE: This - feature is designed for indirect use by Elasticsearch Service, Elastic Cloud - Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported. If - the operator privileges feature is enabled, you must be an operator to use this - API. + .. raw:: html + +

Get the shutdown status.

+

Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled. + The API returns status information for each part of the shut down process.

+

NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

+

If the operator privileges feature is enabled, you must be an operator to use this API.

+ ``_ @@ -169,18 +173,19 @@ def put_node( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Prepare a node to be shut down. NOTE: This feature is designed for indirect use - by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. - Direct use is not supported. If you specify a node that is offline, it will be - prepared for shut down when it rejoins the cluster. If the operator privileges - feature is enabled, you must be an operator to use this API. The API migrates - ongoing tasks and index shards to other nodes as needed to prepare a node to - be restarted or shut down and removed from the cluster. This ensures that Elasticsearch - can be stopped safely with minimal disruption to the cluster. You must specify - the type of shutdown: `restart`, `remove`, or `replace`. If a node is already - being prepared for shutdown, you can use this API to change the shutdown type. - IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the - node shutdown status to determine when it is safe to stop Elasticsearch. + .. raw:: html + +

Prepare a node to be shut down.

+

NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

+

If you specify a node that is offline, it will be prepared for shut down when it rejoins the cluster.

+

If the operator privileges feature is enabled, you must be an operator to use this API.

+

The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. + This ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster.

+

You must specify the type of shutdown: restart, remove, or replace. + If a node is already being prepared for shutdown, you can use this API to change the shutdown type.

+

IMPORTANT: This API does NOT terminate the Elasticsearch process. + Monitor the node shutdown status to determine when it is safe to stop Elasticsearch.

+ ``_ diff --git a/elasticsearch/_sync/client/simulate.py b/elasticsearch/_sync/client/simulate.py index 0139e229f..36339f412 100644 --- a/elasticsearch/_sync/client/simulate.py +++ b/elasticsearch/_sync/client/simulate.py @@ -64,28 +64,22 @@ def ingest( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Simulate data ingestion. Run ingest pipelines against a set of provided documents, - optionally with substitute pipeline definitions, to simulate ingesting data into - an index. This API is meant to be used for troubleshooting or pipeline development, - as it does not actually index any data into Elasticsearch. The API runs the default - and final pipeline for that index against a set of documents provided in the - body of the request. If a pipeline contains a reroute processor, it follows that - reroute processor to the new index, running that index's pipelines as well the - same way that a non-simulated ingest would. No data is indexed into Elasticsearch. - Instead, the transformed document is returned, along with the list of pipelines - that have been run and the name of the index where the document would have been - indexed if this were not a simulation. The transformed document is validated - against the mappings that would apply to this index, and any validation error - is reported in the result. This API differs from the simulate pipeline API in - that you specify a single pipeline for that API, and it runs only that one pipeline. - The simulate pipeline API is more useful for developing a single pipeline, while - the simulate ingest API is more useful for troubleshooting the interaction of - the various pipelines that get applied when ingesting into an index. By default, - the pipeline definitions that are currently in the system are used. However, - you can supply substitute pipeline definitions in the body of the request. These - will be used in place of the pipeline definitions that are already in the system. - This can be used to replace existing pipeline definitions or to create new ones. - The pipeline substitutions are used only within this request. + .. raw:: html + +

Simulate data ingestion. + Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index.

+

This API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch.

+

The API runs the default and final pipeline for that index against a set of documents provided in the body of the request. + If a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would. + No data is indexed into Elasticsearch. + Instead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation. + The transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result.

+

This API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline. + The simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index.

+

By default, the pipeline definitions that are currently in the system are used. + However, you can supply substitute pipeline definitions in the body of the request. + These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request.

+ ``_ diff --git a/elasticsearch/_sync/client/slm.py b/elasticsearch/_sync/client/slm.py index ff7c59c8d..9196bc57c 100644 --- a/elasticsearch/_sync/client/slm.py +++ b/elasticsearch/_sync/client/slm.py @@ -38,9 +38,12 @@ def delete_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a policy. Delete a snapshot lifecycle policy definition. This operation - prevents any future snapshots from being taken but does not cancel in-progress - snapshots or remove previously-taken snapshots. + .. raw:: html + +

Delete a policy. + Delete a snapshot lifecycle policy definition. + This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots.

+ ``_ @@ -91,10 +94,12 @@ def execute_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Run a policy. Immediately create a snapshot according to the snapshot lifecycle - policy without waiting for the scheduled time. The snapshot policy is normally - applied according to its schedule, but you might want to manually run a policy - before performing an upgrade or other maintenance. + .. raw:: html + +

Run a policy. + Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. + The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance.

+ ``_ @@ -144,9 +149,12 @@ def execute_retention( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Run a retention policy. Manually apply the retention policy to force immediate - removal of snapshots that are expired according to the snapshot lifecycle policy - retention rules. The retention policy is normally applied according to its schedule. + .. raw:: html + +

Run a retention policy. + Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. + The retention policy is normally applied according to its schedule.

+ ``_ @@ -194,8 +202,11 @@ def get_lifecycle( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get policy information. Get snapshot lifecycle policy definitions and information - about the latest snapshot attempts. + .. raw:: html + +

Get policy information. + Get snapshot lifecycle policy definitions and information about the latest snapshot attempts.

+ ``_ @@ -248,8 +259,11 @@ def get_stats( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get snapshot lifecycle management statistics. Get global and policy-level statistics - about actions taken by snapshot lifecycle management. + .. raw:: html + +

Get snapshot lifecycle management statistics. + Get global and policy-level statistics about actions taken by snapshot lifecycle management.

+ ``_ @@ -296,7 +310,10 @@ def get_status( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the snapshot lifecycle management status. + .. raw:: html + +

Get the snapshot lifecycle management status.

+ ``_ @@ -354,9 +371,13 @@ def put_lifecycle( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a policy. Create or update a snapshot lifecycle policy. If the - policy already exists, this request increments the policy version. Only the latest - version of a policy is stored. + .. raw:: html + +

Create or update a policy. + Create or update a snapshot lifecycle policy. + If the policy already exists, this request increments the policy version. + Only the latest version of a policy is stored.

+ ``_ @@ -437,9 +458,12 @@ def start( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Start snapshot lifecycle management. Snapshot lifecycle management (SLM) starts - automatically when a cluster is formed. Manually starting SLM is necessary only - if it has been stopped using the stop SLM API. + .. raw:: html + +

Start snapshot lifecycle management. + Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. + Manually starting SLM is necessary only if it has been stopped using the stop SLM API.

+ ``_ @@ -488,15 +512,16 @@ def stop( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Stop snapshot lifecycle management. Stop all snapshot lifecycle management (SLM) - operations and the SLM plugin. This API is useful when you are performing maintenance - on a cluster and need to prevent SLM from performing any actions on your data - streams or indices. Stopping SLM does not stop any snapshots that are in progress. - You can manually trigger snapshots with the run snapshot lifecycle policy API - even if SLM is stopped. The API returns a response as soon as the request is - acknowledged, but the plugin might continue to run until in-progress operations - complete and it can be safely stopped. Use the get snapshot lifecycle management - status API to see if SLM is running. + .. raw:: html + +

Stop snapshot lifecycle management. + Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. + This API is useful when you are performing maintenance on a cluster and need to prevent SLM from performing any actions on your data streams or indices. + Stopping SLM does not stop any snapshots that are in progress. + You can manually trigger snapshots with the run snapshot lifecycle policy API even if SLM is stopped.

+

The API returns a response as soon as the request is acknowledged, but the plugin might continue to run until in-progress operations complete and it can be safely stopped. + Use the get snapshot lifecycle management status API to see if SLM is running.

+ ``_ diff --git a/elasticsearch/_sync/client/snapshot.py b/elasticsearch/_sync/client/snapshot.py index 3e86e25df..99d89a35b 100644 --- a/elasticsearch/_sync/client/snapshot.py +++ b/elasticsearch/_sync/client/snapshot.py @@ -44,8 +44,11 @@ def cleanup_repository( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clean up the snapshot repository. Trigger the review of the contents of a snapshot - repository and delete any stale data not referenced by existing snapshots. + .. raw:: html + +

Clean up the snapshot repository. + Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots.

+ ``_ @@ -98,8 +101,11 @@ def clone( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clone a snapshot. Clone part of all of a snapshot into another snapshot in the - same repository. + .. raw:: html + +

Clone a snapshot. + Clone part of all of a snapshot into another snapshot in the same repository.

+ ``_ @@ -179,7 +185,11 @@ def create( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a snapshot. Take a snapshot of a cluster or of data streams and indices. + .. raw:: html + +

Create a snapshot. + Take a snapshot of a cluster or of data streams and indices.

+ ``_ @@ -283,11 +293,13 @@ def create_repository( verify: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a snapshot repository. IMPORTANT: If you are migrating searchable - snapshots, the repository name must be identical in the source and destination - clusters. To register a snapshot repository, the cluster's global metadata must - be writeable. Ensure there are no cluster blocks (for example, `cluster.blocks.read_only` - and `clsuter.blocks.read_only_allow_delete` settings) that prevent write access. + .. raw:: html + +

Create or update a snapshot repository. + IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. + To register a snapshot repository, the cluster's global metadata must be writeable. + Ensure there are no cluster blocks (for example, cluster.blocks.read_only and clsuter.blocks.read_only_allow_delete settings) that prevent write access.

+ ``_ @@ -347,7 +359,10 @@ def delete( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete snapshots. + .. raw:: html + +

Delete snapshots.

+ ``_ @@ -398,9 +413,12 @@ def delete_repository( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete snapshot repositories. When a repository is unregistered, Elasticsearch - removes only the reference to the location where the repository is storing the - snapshots. The snapshots themselves are left untouched and in place. + .. raw:: html + +

Delete snapshot repositories. + When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. + The snapshots themselves are left untouched and in place.

+ ``_ @@ -474,7 +492,10 @@ def get( verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get snapshot information. + .. raw:: html + +

Get snapshot information.

+ ``_ @@ -586,7 +607,10 @@ def get_repository( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get snapshot repository information. + .. raw:: html + +

Get snapshot repository information.

+ ``_ @@ -648,120 +672,83 @@ def repository_analyze( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Analyze a snapshot repository. Analyze the performance characteristics and any - incorrect behaviour found in a repository. The response exposes implementation - details of the analysis which may change from version to version. The response - body format is therefore not considered stable and may be different in newer - versions. There are a large number of third-party storage systems available, - not all of which are suitable for use as a snapshot repository by Elasticsearch. - Some storage systems behave incorrectly, or perform poorly, especially when accessed - concurrently by multiple clients as the nodes of an Elasticsearch cluster do. - This API performs a collection of read and write operations on your repository - which are designed to detect incorrect behaviour and to measure the performance - characteristics of your storage system. The default values for the parameters - are deliberately low to reduce the impact of running an analysis inadvertently - and to provide a sensible starting point for your investigations. Run your first - analysis with the default parameter values to check for simple problems. If successful, - run a sequence of increasingly large analyses until you encounter a failure or - you reach a `blob_count` of at least `2000`, a `max_blob_size` of at least `2gb`, - a `max_total_data_size` of at least `1tb`, and a `register_operation_count` of - at least `100`. Always specify a generous timeout, possibly `1h` or longer, to - allow time for each analysis to run to completion. Perform the analyses using - a multi-node cluster of a similar size to your production cluster so that it - can detect any problems that only arise when the repository is accessed by many - nodes at once. If the analysis fails, Elasticsearch detected that your repository - behaved unexpectedly. This usually means you are using a third-party storage - system with an incorrect or incompatible implementation of the API it claims - to support. If so, this storage system is not suitable for use as a snapshot - repository. You will need to work with the supplier of your storage system to - address the incompatibilities that Elasticsearch detects. If the analysis is - successful, the API returns details of the testing process, optionally including - how long each operation took. You can use this information to determine the performance - of your storage system. If any operation fails or returns an incorrect result, - the API returns an error. If the API returns an error, it may not have removed - all the data it wrote to the repository. The error will indicate the location - of any leftover data and this path is also recorded in the Elasticsearch logs. - You should verify that this location has been cleaned up correctly. If there - is still leftover data at the specified location, you should manually remove - it. If the connection from your client to Elasticsearch is closed while the client - is waiting for the result of the analysis, the test is cancelled. Some clients - are configured to close their connection if no response is received within a - certain timeout. An analysis takes a long time to complete so you might need - to relax any such client-side timeouts. On cancellation the analysis attempts - to clean up the data it was writing, but it may not be able to remove it all. - The path to the leftover data is recorded in the Elasticsearch logs. You should - verify that this location has been cleaned up correctly. If there is still leftover - data at the specified location, you should manually remove it. If the analysis - is successful then it detected no incorrect behaviour, but this does not mean - that correct behaviour is guaranteed. The analysis attempts to detect common - bugs but it does not offer 100% coverage. Additionally, it does not test the - following: * Your repository must perform durable writes. Once a blob has been - written it must remain in place until it is deleted, even after a power loss - or similar disaster. * Your repository must not suffer from silent data corruption. - Once a blob has been written, its contents must remain unchanged until it is - deliberately modified or deleted. * Your repository must behave correctly even - if connectivity from the cluster is disrupted. Reads and writes may fail in this - case, but they must not return incorrect results. IMPORTANT: An analysis writes - a substantial amount of data to your repository and then reads it back again. - This consumes bandwidth on the network between the cluster and the repository, - and storage space and I/O bandwidth on the repository itself. You must ensure - this load does not affect other users of these systems. Analyses respect the - repository settings `max_snapshot_bytes_per_sec` and `max_restore_bytes_per_sec` - if available and the cluster setting `indices.recovery.max_bytes_per_sec` which - you can use to limit the bandwidth they consume. NOTE: This API is intended for - exploratory use by humans. You should expect the request parameters and the response - format to vary in future versions. NOTE: Different versions of Elasticsearch - may perform different checks for repository compatibility, with newer versions - typically being stricter than older ones. A storage system that passes repository - analysis with one version of Elasticsearch may fail with a different version. - This indicates it behaves incorrectly in ways that the former version did not - detect. You must work with the supplier of your storage system to address the - incompatibilities detected by the repository analysis API in any version of Elasticsearch. - NOTE: This API may not work correctly in a mixed-version cluster. *Implementation - details* NOTE: This section of documentation describes how the repository analysis - API works in this version of Elasticsearch, but you should expect the implementation - to vary between versions. The request parameters and response format depend on - details of the implementation so may also be different in newer versions. The - analysis comprises a number of blob-level tasks, as set by the `blob_count` parameter - and a number of compare-and-exchange operations on linearizable registers, as - set by the `register_operation_count` parameter. These tasks are distributed - over the data and master-eligible nodes in the cluster for execution. For most - blob-level tasks, the executing node first writes a blob to the repository and - then instructs some of the other nodes in the cluster to attempt to read the - data it just wrote. The size of the blob is chosen randomly, according to the - `max_blob_size` and `max_total_data_size` parameters. If any of these reads fails - then the repository does not implement the necessary read-after-write semantics - that Elasticsearch requires. For some blob-level tasks, the executing node will - instruct some of its peers to attempt to read the data before the writing process - completes. These reads are permitted to fail, but must not return partial data. - If any read returns partial data then the repository does not implement the necessary - atomicity semantics that Elasticsearch requires. For some blob-level tasks, the - executing node will overwrite the blob while its peers are reading it. In this - case the data read may come from either the original or the overwritten blob, - but the read operation must not return partial data or a mix of data from the - two blobs. If any of these reads returns partial data or a mix of the two blobs - then the repository does not implement the necessary atomicity semantics that - Elasticsearch requires for overwrites. The executing node will use a variety - of different methods to write the blob. For instance, where applicable, it will - use both single-part and multi-part uploads. Similarly, the reading nodes will - use a variety of different methods to read the data back again. For instance - they may read the entire blob from start to end or may read only a subset of - the data. For some blob-level tasks, the executing node will cancel the write - before it is complete. In this case, it still instructs some of the other nodes - in the cluster to attempt to read the blob but all of these reads must fail to - find the blob. Linearizable registers are special blobs that Elasticsearch manipulates - using an atomic compare-and-exchange operation. This operation ensures correct - and strongly-consistent behavior even when the blob is accessed by multiple nodes - at the same time. The detailed implementation of the compare-and-exchange operation - on linearizable registers varies by repository type. Repository analysis verifies - that that uncontended compare-and-exchange operations on a linearizable register - blob always succeed. Repository analysis also verifies that contended operations - either succeed or report the contention but do not return incorrect results. - If an operation fails due to contention, Elasticsearch retries the operation - until it succeeds. Most of the compare-and-exchange operations performed by repository - analysis atomically increment a counter which is represented as an 8-byte blob. - Some operations also verify the behavior on small blobs with sizes other than - 8 bytes. + .. raw:: html + +

Analyze a snapshot repository. + Analyze the performance characteristics and any incorrect behaviour found in a repository.

+

The response exposes implementation details of the analysis which may change from version to version. + The response body format is therefore not considered stable and may be different in newer versions.

+

There are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch. + Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system.

+

The default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations. + Run your first analysis with the default parameter values to check for simple problems. + If successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a blob_count of at least 2000, a max_blob_size of at least 2gb, a max_total_data_size of at least 1tb, and a register_operation_count of at least 100. + Always specify a generous timeout, possibly 1h or longer, to allow time for each analysis to run to completion. + Perform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once.

+

If the analysis fails, Elasticsearch detected that your repository behaved unexpectedly. + This usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support. + If so, this storage system is not suitable for use as a snapshot repository. + You will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects.

+

If the analysis is successful, the API returns details of the testing process, optionally including how long each operation took. + You can use this information to determine the performance of your storage system. + If any operation fails or returns an incorrect result, the API returns an error. + If the API returns an error, it may not have removed all the data it wrote to the repository. + The error will indicate the location of any leftover data and this path is also recorded in the Elasticsearch logs. + You should verify that this location has been cleaned up correctly. + If there is still leftover data at the specified location, you should manually remove it.

+

If the connection from your client to Elasticsearch is closed while the client is waiting for the result of the analysis, the test is cancelled. + Some clients are configured to close their connection if no response is received within a certain timeout. + An analysis takes a long time to complete so you might need to relax any such client-side timeouts. + On cancellation the analysis attempts to clean up the data it was writing, but it may not be able to remove it all. + The path to the leftover data is recorded in the Elasticsearch logs. + You should verify that this location has been cleaned up correctly. + If there is still leftover data at the specified location, you should manually remove it.

+

If the analysis is successful then it detected no incorrect behaviour, but this does not mean that correct behaviour is guaranteed. + The analysis attempts to detect common bugs but it does not offer 100% coverage. + Additionally, it does not test the following:

+
    +
  • Your repository must perform durable writes. Once a blob has been written it must remain in place until it is deleted, even after a power loss or similar disaster.
  • +
  • Your repository must not suffer from silent data corruption. Once a blob has been written, its contents must remain unchanged until it is deliberately modified or deleted.
  • +
  • Your repository must behave correctly even if connectivity from the cluster is disrupted. Reads and writes may fail in this case, but they must not return incorrect results.
  • +
+

IMPORTANT: An analysis writes a substantial amount of data to your repository and then reads it back again. + This consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself. + You must ensure this load does not affect other users of these systems. + Analyses respect the repository settings max_snapshot_bytes_per_sec and max_restore_bytes_per_sec if available and the cluster setting indices.recovery.max_bytes_per_sec which you can use to limit the bandwidth they consume.

+

NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions.

+

NOTE: Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones. + A storage system that passes repository analysis with one version of Elasticsearch may fail with a different version. + This indicates it behaves incorrectly in ways that the former version did not detect. + You must work with the supplier of your storage system to address the incompatibilities detected by the repository analysis API in any version of Elasticsearch.

+

NOTE: This API may not work correctly in a mixed-version cluster.

+

Implementation details

+

NOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions.

+

The analysis comprises a number of blob-level tasks, as set by the blob_count parameter and a number of compare-and-exchange operations on linearizable registers, as set by the register_operation_count parameter. + These tasks are distributed over the data and master-eligible nodes in the cluster for execution.

+

For most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote. + The size of the blob is chosen randomly, according to the max_blob_size and max_total_data_size parameters. + If any of these reads fails then the repository does not implement the necessary read-after-write semantics that Elasticsearch requires.

+

For some blob-level tasks, the executing node will instruct some of its peers to attempt to read the data before the writing process completes. + These reads are permitted to fail, but must not return partial data. + If any read returns partial data then the repository does not implement the necessary atomicity semantics that Elasticsearch requires.

+

For some blob-level tasks, the executing node will overwrite the blob while its peers are reading it. + In this case the data read may come from either the original or the overwritten blob, but the read operation must not return partial data or a mix of data from the two blobs. + If any of these reads returns partial data or a mix of the two blobs then the repository does not implement the necessary atomicity semantics that Elasticsearch requires for overwrites.

+

The executing node will use a variety of different methods to write the blob. + For instance, where applicable, it will use both single-part and multi-part uploads. + Similarly, the reading nodes will use a variety of different methods to read the data back again. + For instance they may read the entire blob from start to end or may read only a subset of the data.

+

For some blob-level tasks, the executing node will cancel the write before it is complete. + In this case, it still instructs some of the other nodes in the cluster to attempt to read the blob but all of these reads must fail to find the blob.

+

Linearizable registers are special blobs that Elasticsearch manipulates using an atomic compare-and-exchange operation. + This operation ensures correct and strongly-consistent behavior even when the blob is accessed by multiple nodes at the same time. + The detailed implementation of the compare-and-exchange operation on linearizable registers varies by repository type. + Repository analysis verifies that that uncontended compare-and-exchange operations on a linearizable register blob always succeed. + Repository analysis also verifies that contended operations either succeed or report the contention but do not return incorrect results. + If an operation fails due to contention, Elasticsearch retries the operation until it succeeds. + Most of the compare-and-exchange operations performed by repository analysis atomically increment a counter which is represented as an 8-byte blob. + Some operations also verify the behavior on small blobs with sizes other than 8 bytes.

+ ``_ @@ -864,40 +851,31 @@ def repository_verify_integrity( verify_blob_contents: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Verify the repository integrity. Verify the integrity of the contents of a snapshot - repository. This API enables you to perform a comprehensive check of the contents - of a repository, looking for any anomalies in its data or metadata which might - prevent you from restoring snapshots from the repository or which might cause - future snapshot create or delete operations to fail. If you suspect the integrity - of the contents of one of your snapshot repositories, cease all write activity - to this repository immediately, set its `read_only` option to `true`, and use - this API to verify its integrity. Until you do so: * It may not be possible to - restore some snapshots from this repository. * Searchable snapshots may report - errors when searched or may have unassigned shards. * Taking snapshots into this - repository may fail or may appear to succeed but have created a snapshot which - cannot be restored. * Deleting snapshots from this repository may fail or may - appear to succeed but leave the underlying data on disk. * Continuing to write - to the repository while it is in an invalid state may causing additional damage - to its contents. If the API finds any problems with the integrity of the contents - of your repository, Elasticsearch will not be able to repair the damage. The - only way to bring the repository back into a fully working state after its contents - have been damaged is by restoring its contents from a repository backup which - was taken before the damage occurred. You must also identify what caused the - damage and take action to prevent it from happening again. If you cannot restore - a repository backup, register a new repository and use this for all future snapshot - operations. In some cases it may be possible to recover some of the contents - of a damaged repository, either by restoring as many of its snapshots as needed - and taking new snapshots of the restored data, or by using the reindex API to - copy data from any searchable snapshots mounted from the damaged repository. - Avoid all operations which write to the repository while the verify repository - integrity API is running. If something changes the repository contents while - an integrity verification is running then Elasticsearch may incorrectly report - having detected some anomalies in its contents due to the concurrent writes. - It may also incorrectly fail to report some anomalies that the concurrent writes - prevented it from detecting. NOTE: This API is intended for exploratory use by - humans. You should expect the request parameters and the response format to vary - in future versions. NOTE: This API may not work correctly in a mixed-version - cluster. + .. raw:: html + +

Verify the repository integrity. + Verify the integrity of the contents of a snapshot repository.

+

This API enables you to perform a comprehensive check of the contents of a repository, looking for any anomalies in its data or metadata which might prevent you from restoring snapshots from the repository or which might cause future snapshot create or delete operations to fail.

+

If you suspect the integrity of the contents of one of your snapshot repositories, cease all write activity to this repository immediately, set its read_only option to true, and use this API to verify its integrity. + Until you do so:

+
    +
  • It may not be possible to restore some snapshots from this repository.
  • +
  • Searchable snapshots may report errors when searched or may have unassigned shards.
  • +
  • Taking snapshots into this repository may fail or may appear to succeed but have created a snapshot which cannot be restored.
  • +
  • Deleting snapshots from this repository may fail or may appear to succeed but leave the underlying data on disk.
  • +
  • Continuing to write to the repository while it is in an invalid state may causing additional damage to its contents.
  • +
+

If the API finds any problems with the integrity of the contents of your repository, Elasticsearch will not be able to repair the damage. + The only way to bring the repository back into a fully working state after its contents have been damaged is by restoring its contents from a repository backup which was taken before the damage occurred. + You must also identify what caused the damage and take action to prevent it from happening again.

+

If you cannot restore a repository backup, register a new repository and use this for all future snapshot operations. + In some cases it may be possible to recover some of the contents of a damaged repository, either by restoring as many of its snapshots as needed and taking new snapshots of the restored data, or by using the reindex API to copy data from any searchable snapshots mounted from the damaged repository.

+

Avoid all operations which write to the repository while the verify repository integrity API is running. + If something changes the repository contents while an integrity verification is running then Elasticsearch may incorrectly report having detected some anomalies in its contents due to the concurrent writes. + It may also incorrectly fail to report some anomalies that the concurrent writes prevented it from detecting.

+

NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions.

+

NOTE: This API may not work correctly in a mixed-version cluster.

+ ``_ @@ -994,20 +972,20 @@ def restore( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Restore a snapshot. Restore a snapshot of a cluster or data streams and indices. - You can restore a snapshot only to a running cluster with an elected master node. - The snapshot repository must be registered and available to the cluster. The - snapshot and cluster versions must be compatible. To restore a snapshot, the - cluster's global metadata must be writable. Ensure there are't any cluster blocks - that prevent writes. The restore operation ignores index blocks. Before you restore - a data stream, ensure the cluster contains a matching index template with data - streams enabled. To check, use the index management feature in Kibana or the - get index template API: ``` GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream - ``` If no such template exists, you can create one or restore a cluster state - that contains one. Without a matching index template, a data stream can't roll - over or create backing indices. If your snapshot contains data from App Search - or Workplace Search, you must restore the Enterprise Search encryption key before - you restore the snapshot. + .. raw:: html + +

Restore a snapshot. + Restore a snapshot of a cluster or data streams and indices.

+

You can restore a snapshot only to a running cluster with an elected master node. + The snapshot repository must be registered and available to the cluster. + The snapshot and cluster versions must be compatible.

+

To restore a snapshot, the cluster's global metadata must be writable. Ensure there are't any cluster blocks that prevent writes. The restore operation ignores index blocks.

+

Before you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API:

+
GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream
+          
+

If no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can't roll over or create backing indices.

+

If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot.

+ ``_ @@ -1100,18 +1078,18 @@ def status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the snapshot status. Get a detailed description of the current state for - each shard participating in the snapshot. Note that this API should be used only - to obtain detailed shard-level information for ongoing snapshots. If this detail - is not needed or you want to obtain information about one or more existing snapshots, - use the get snapshot API. WARNING: Using the API to return the status of any - snapshots other than currently running snapshots can be expensive. The API requires - a read from the repository for each shard in each snapshot. For example, if you - have 100 snapshots with 1,000 shards each, an API request that includes all snapshots - will require 100,000 reads (100 snapshots x 1,000 shards). Depending on the latency - of your storage, such requests can take an extremely long time to return results. - These requests can also tax machine resources and, when using cloud storage, - incur high processing costs. + .. raw:: html + +

Get the snapshot status. + Get a detailed description of the current state for each shard participating in the snapshot. + Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. + If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API.

+

WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. + The API requires a read from the repository for each shard in each snapshot. + For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards).

+

Depending on the latency of your storage, such requests can take an extremely long time to return results. + These requests can also tax machine resources and, when using cloud storage, incur high processing costs.

+ ``_ @@ -1170,8 +1148,11 @@ def verify_repository( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Verify a snapshot repository. Check for common misconfigurations in a snapshot - repository. + .. raw:: html + +

Verify a snapshot repository. + Check for common misconfigurations in a snapshot repository.

+ ``_ diff --git a/elasticsearch/_sync/client/sql.py b/elasticsearch/_sync/client/sql.py index d56edbd03..ecdb49a22 100644 --- a/elasticsearch/_sync/client/sql.py +++ b/elasticsearch/_sync/client/sql.py @@ -39,7 +39,10 @@ def clear_cursor( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Clear an SQL search cursor. + .. raw:: html + +

Clear an SQL search cursor.

+ ``_ @@ -84,11 +87,17 @@ def delete_async( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete an async SQL search. Delete an async SQL search or a stored synchronous - SQL search. If the search is still running, the API cancels it. If the Elasticsearch - security features are enabled, only the following users can use this API to delete - a search: * Users with the `cancel_task` cluster privilege. * The user who first - submitted the search. + .. raw:: html + +

Delete an async SQL search. + Delete an async SQL search or a stored synchronous SQL search. + If the search is still running, the API cancels it.

+

If the Elasticsearch security features are enabled, only the following users can use this API to delete a search:

+
    +
  • Users with the cancel_task cluster privilege.
  • +
  • The user who first submitted the search.
  • +
+ ``_ @@ -134,10 +143,12 @@ def get_async( ] = None, ) -> ObjectApiResponse[t.Any]: """ - Get async SQL search results. Get the current status and available results for - an async SQL search or stored synchronous SQL search. If the Elasticsearch security - features are enabled, only the user who first submitted the SQL search can retrieve - the search using this API. + .. raw:: html + +

Get async SQL search results. + Get the current status and available results for an async SQL search or stored synchronous SQL search.

+

If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API.

+ ``_ @@ -195,8 +206,11 @@ def get_async_status( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get the async SQL search status. Get the current status of an async SQL search - or a stored synchronous SQL search. + .. raw:: html + +

Get the async SQL search status. + Get the current status of an async SQL search or a stored synchronous SQL search.

+ ``_ @@ -281,7 +295,11 @@ def query( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get SQL search results. Run an SQL request. + .. raw:: html + +

Get SQL search results. + Run an SQL request.

+ ``_ @@ -402,9 +420,12 @@ def translate( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Translate SQL into Elasticsearch queries. Translate an SQL search into a search - API request containing Query DSL. It accepts the same request body parameters - as the SQL search API, excluding `cursor`. + .. raw:: html + +

Translate SQL into Elasticsearch queries. + Translate an SQL search into a search API request containing Query DSL. + It accepts the same request body parameters as the SQL search API, excluding cursor.

+ ``_ diff --git a/elasticsearch/_sync/client/ssl.py b/elasticsearch/_sync/client/ssl.py index 1f3cb3bed..d65003825 100644 --- a/elasticsearch/_sync/client/ssl.py +++ b/elasticsearch/_sync/client/ssl.py @@ -35,23 +35,22 @@ def certificates( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get SSL certificates. Get information about the X.509 certificates that are used - to encrypt communications in the cluster. The API returns a list that includes - certificates from all TLS contexts including: - Settings for transport and HTTP - interfaces - TLS settings that are used within authentication realms - TLS settings - for remote monitoring exporters The list includes certificates that are used - for configuring trust, such as those configured in the `xpack.security.transport.ssl.truststore` - and `xpack.security.transport.ssl.certificate_authorities` settings. It also - includes certificates that are used for configuring server identity, such as - `xpack.security.http.ssl.keystore` and `xpack.security.http.ssl.certificate settings`. - The list does not include certificates that are sourced from the default SSL - context of the Java Runtime Environment (JRE), even if those certificates are - in use within Elasticsearch. NOTE: When a PKCS#11 token is configured as the - truststore of the JRE, the API returns all the certificates that are included - in the PKCS#11 token irrespective of whether these are used in the Elasticsearch - TLS configuration. If Elasticsearch is configured to use a keystore or truststore, - the API output includes all certificates in that store, even though some of the - certificates might not be in active use within the cluster. + .. raw:: html + +

Get SSL certificates.

+

Get information about the X.509 certificates that are used to encrypt communications in the cluster. + The API returns a list that includes certificates from all TLS contexts including:

+
    +
  • Settings for transport and HTTP interfaces
  • +
  • TLS settings that are used within authentication realms
  • +
  • TLS settings for remote monitoring exporters
  • +
+

The list includes certificates that are used for configuring trust, such as those configured in the xpack.security.transport.ssl.truststore and xpack.security.transport.ssl.certificate_authorities settings. + It also includes certificates that are used for configuring server identity, such as xpack.security.http.ssl.keystore and xpack.security.http.ssl.certificate settings.

+

The list does not include certificates that are sourced from the default SSL context of the Java Runtime Environment (JRE), even if those certificates are in use within Elasticsearch.

+

NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the API returns all the certificates that are included in the PKCS#11 token irrespective of whether these are used in the Elasticsearch TLS configuration.

+

If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster.

+ ``_ """ diff --git a/elasticsearch/_sync/client/synonyms.py b/elasticsearch/_sync/client/synonyms.py index 92639b50c..e58ab5ee4 100644 --- a/elasticsearch/_sync/client/synonyms.py +++ b/elasticsearch/_sync/client/synonyms.py @@ -36,21 +36,22 @@ def delete_synonym( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a synonym set. You can only delete a synonyms set that is not in use by - any index analyzer. Synonyms sets can be used in synonym graph token filters - and synonym token filters. These synonym filters can be used as part of search - analyzers. Analyzers need to be loaded when an index is restored (such as when - a node starts, or the index becomes open). Even if the analyzer is not used on - any field mapping, it still needs to be loaded on the index recovery phase. If - any analyzers cannot be loaded, the index becomes unavailable and the cluster - status becomes red or yellow as index shards are not available. To prevent that, - synonyms sets that are used in analyzers can't be deleted. A delete request in - this case will return a 400 response code. To remove a synonyms set, you must - first remove all indices that contain analyzers using it. You can migrate an - index by creating a new index that does not contain the token filter with the - synonyms set, and use the reindex API in order to copy over the index data. Once - finished, you can delete the index. When the synonyms set is not used in analyzers, - you will be able to delete it. + .. raw:: html + +

Delete a synonym set.

+

You can only delete a synonyms set that is not in use by any index analyzer.

+

Synonyms sets can be used in synonym graph token filters and synonym token filters. + These synonym filters can be used as part of search analyzers.

+

Analyzers need to be loaded when an index is restored (such as when a node starts, or the index becomes open). + Even if the analyzer is not used on any field mapping, it still needs to be loaded on the index recovery phase.

+

If any analyzers cannot be loaded, the index becomes unavailable and the cluster status becomes red or yellow as index shards are not available. + To prevent that, synonyms sets that are used in analyzers can't be deleted. + A delete request in this case will return a 400 response code.

+

To remove a synonyms set, you must first remove all indices that contain analyzers using it. + You can migrate an index by creating a new index that does not contain the token filter with the synonyms set, and use the reindex API in order to copy over the index data. + Once finished, you can delete the index. + When the synonyms set is not used in analyzers, you will be able to delete it.

+ ``_ @@ -91,7 +92,11 @@ def delete_synonym_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a synonym rule. Delete a synonym rule from a synonym set. + .. raw:: html + +

Delete a synonym rule. + Delete a synonym rule from a synonym set.

+ ``_ @@ -141,7 +146,10 @@ def get_synonym( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a synonym set. + .. raw:: html + +

Get a synonym set.

+ ``_ @@ -188,7 +196,11 @@ def get_synonym_rule( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a synonym rule. Get a synonym rule from a synonym set. + .. raw:: html + +

Get a synonym rule. + Get a synonym rule from a synonym set.

+ ``_ @@ -237,7 +249,11 @@ def get_synonyms_sets( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Get all synonym sets. Get a summary of all defined synonym sets. + .. raw:: html + +

Get all synonym sets. + Get a summary of all defined synonym sets.

+ ``_ @@ -286,12 +302,14 @@ def put_synonym( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 - synonym rules per set. If you need to manage more synonym rules, you can create - multiple synonym sets. When an existing synonyms set is updated, the search analyzers - that use the synonyms set are reloaded automatically for all indices. This is - equivalent to invoking the reload search analyzers API for all indices that use - the synonyms set. + .. raw:: html + +

Create or update a synonym set. + Synonyms sets are limited to a maximum of 10,000 synonym rules per set. + If you need to manage more synonym rules, you can create multiple synonym sets.

+

When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. + This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set.

+ ``_ @@ -344,10 +362,13 @@ def put_synonym_rule( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a synonym rule. Create or update a synonym rule in a synonym - set. If any of the synonym rules included is invalid, the API returns an error. - When you update a synonym rule, all analyzers using the synonyms set will be - reloaded automatically to reflect the new rule. + .. raw:: html + +

Create or update a synonym rule. + Create or update a synonym rule in a synonym set.

+

If any of the synonym rules included is invalid, the API returns an error.

+

When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule.

+ ``_ diff --git a/elasticsearch/_sync/client/tasks.py b/elasticsearch/_sync/client/tasks.py index fe0fd20be..758925370 100644 --- a/elasticsearch/_sync/client/tasks.py +++ b/elasticsearch/_sync/client/tasks.py @@ -47,19 +47,18 @@ def cancel( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Cancel a task. WARNING: The task management API is new and should still be considered - a beta feature. The API may change in ways that are not backwards compatible. - A task may continue to run for some time after it has been cancelled because - it may not be able to safely stop its current activity straight away. It is also - possible that Elasticsearch must complete its work on other tasks before it can - process the cancellation. The get task information API will continue to list - these cancelled tasks until they complete. The cancelled flag in the response - indicates that the cancellation command has been processed and the task will - stop as soon as possible. To troubleshoot why a cancelled task does not complete - promptly, use the get task information API with the `?detailed` parameter to - identify the other tasks the system is running. You can also use the node hot - threads API to obtain detailed information about the work the system is doing - instead of completing the cancelled task. + .. raw:: html + +

Cancel a task.

+

WARNING: The task management API is new and should still be considered a beta feature. + The API may change in ways that are not backwards compatible.

+

A task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away. + It is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation. + The get task information API will continue to list these cancelled tasks until they complete. + The cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible.

+

To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the ?detailed parameter to identify the other tasks the system is running. + You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task.

+ ``_ @@ -120,11 +119,14 @@ def get( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get task information. Get information about a task currently running in the cluster. - WARNING: The task management API is new and should still be considered a beta - feature. The API may change in ways that are not backwards compatible. If the - task identifier is not found, a 404 response code indicates that there are no - resources that match the request. + .. raw:: html + +

Get task information. + Get information about a task currently running in the cluster.

+

WARNING: The task management API is new and should still be considered a beta feature. + The API may change in ways that are not backwards compatible.

+

If the task identifier is not found, a 404 response code indicates that there are no resources that match the request.

+ ``_ @@ -181,27 +183,60 @@ def list( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get all tasks. Get information about the tasks currently running on one or more - nodes in the cluster. WARNING: The task management API is new and should still - be considered a beta feature. The API may change in ways that are not backwards - compatible. **Identifying running tasks** The `X-Opaque-Id header`, when provided - on the HTTP request header, is going to be returned as a header in the response - as well as in the headers field for in the task information. This enables you - to track certain calls or associate certain tasks with the client that started - them. For example: ``` curl -i -H "X-Opaque-Id: 123456" "http://localhost:9200/_tasks?group_by=parents" - ``` The API returns the following result: ``` HTTP/1.1 200 OK X-Opaque-Id: 123456 - content-type: application/json; charset=UTF-8 content-length: 831 { "tasks" : - { "u5lcZHqcQhu-rUoFaqDphA:45" : { "node" : "u5lcZHqcQhu-rUoFaqDphA", "id" : 45, - "type" : "transport", "action" : "cluster:monitor/tasks/lists", "start_time_in_millis" - : 1513823752749, "running_time_in_nanos" : 293139, "cancellable" : false, "headers" - : { "X-Opaque-Id" : "123456" }, "children" : [ { "node" : "u5lcZHqcQhu-rUoFaqDphA", - "id" : 46, "type" : "direct", "action" : "cluster:monitor/tasks/lists[n]", "start_time_in_millis" - : 1513823752750, "running_time_in_nanos" : 92133, "cancellable" : false, "parent_task_id" - : "u5lcZHqcQhu-rUoFaqDphA:45", "headers" : { "X-Opaque-Id" : "123456" } } ] } - } } ``` In this example, `X-Opaque-Id: 123456` is the ID as a part of the response - header. The `X-Opaque-Id` in the task `headers` is the ID for the task that was - initiated by the REST request. The `X-Opaque-Id` in the children `headers` is - the child task of the task that was initiated by the REST request. + .. raw:: html + +

Get all tasks. + Get information about the tasks currently running on one or more nodes in the cluster.

+

WARNING: The task management API is new and should still be considered a beta feature. + The API may change in ways that are not backwards compatible.

+

Identifying running tasks

+

The X-Opaque-Id header, when provided on the HTTP request header, is going to be returned as a header in the response as well as in the headers field for in the task information. + This enables you to track certain calls or associate certain tasks with the client that started them. + For example:

+
curl -i -H "X-Opaque-Id: 123456" "http://localhost:9200/_tasks?group_by=parents"
+          
+

The API returns the following result:

+
HTTP/1.1 200 OK
+          X-Opaque-Id: 123456
+          content-type: application/json; charset=UTF-8
+          content-length: 831
+
+          {
+            "tasks" : {
+              "u5lcZHqcQhu-rUoFaqDphA:45" : {
+                "node" : "u5lcZHqcQhu-rUoFaqDphA",
+                "id" : 45,
+                "type" : "transport",
+                "action" : "cluster:monitor/tasks/lists",
+                "start_time_in_millis" : 1513823752749,
+                "running_time_in_nanos" : 293139,
+                "cancellable" : false,
+                "headers" : {
+                  "X-Opaque-Id" : "123456"
+                },
+                "children" : [
+                  {
+                    "node" : "u5lcZHqcQhu-rUoFaqDphA",
+                    "id" : 46,
+                    "type" : "direct",
+                    "action" : "cluster:monitor/tasks/lists[n]",
+                    "start_time_in_millis" : 1513823752750,
+                    "running_time_in_nanos" : 92133,
+                    "cancellable" : false,
+                    "parent_task_id" : "u5lcZHqcQhu-rUoFaqDphA:45",
+                    "headers" : {
+                      "X-Opaque-Id" : "123456"
+                    }
+                  }
+                ]
+              }
+            }
+           }
+          
+

In this example, X-Opaque-Id: 123456 is the ID as a part of the response header. + The X-Opaque-Id in the task headers is the ID for the task that was initiated by the REST request. + The X-Opaque-Id in the children headers is the child task of the task that was initiated by the REST request.

+ ``_ diff --git a/elasticsearch/_sync/client/text_structure.py b/elasticsearch/_sync/client/text_structure.py index 2acc56893..b5c7b67d2 100644 --- a/elasticsearch/_sync/client/text_structure.py +++ b/elasticsearch/_sync/client/text_structure.py @@ -53,22 +53,24 @@ def find_field_structure( timestamp_format: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Find the structure of a text field. Find the structure of a text field in an - Elasticsearch index. This API provides a starting point for extracting further - information from log messages already ingested into Elasticsearch. For example, - if you have ingested data into a very simple index that has just `@timestamp` - and message fields, you can use this API to see what common structure exists - in the message field. The response from the API contains: * Sample messages. - * Statistics that reveal the most common values for all fields detected within - the text and basic numeric statistics for numeric fields. * Information about - the structure of the text, which is useful when you write ingest configurations - to index it or similarly formatted text. * Appropriate mappings for an Elasticsearch - index, which you could use to ingest the text. All this information can be calculated - by the structure finder with no guidance. However, you can optionally override - some of the decisions about the text structure by specifying one or more query - parameters. If the structure finder produces unexpected results, specify the - `explain` query parameter and an explanation will appear in the response. It - helps determine why the returned structure was chosen. + .. raw:: html + +

Find the structure of a text field. + Find the structure of a text field in an Elasticsearch index.

+

This API provides a starting point for extracting further information from log messages already ingested into Elasticsearch. + For example, if you have ingested data into a very simple index that has just @timestamp and message fields, you can use this API to see what common structure exists in the message field.

+

The response from the API contains:

+
    +
  • Sample messages.
  • +
  • Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields.
  • +
  • Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text.
  • +
  • Appropriate mappings for an Elasticsearch index, which you could use to ingest the text.
  • +
+

All this information can be calculated by the structure finder with no guidance. + However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.

+

If the structure finder produces unexpected results, specify the explain query parameter and an explanation will appear in the response. + It helps determine why the returned structure was chosen.

+ ``_ @@ -237,23 +239,25 @@ def find_message_structure( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Find the structure of text messages. Find the structure of a list of text messages. - The messages must contain data that is suitable to be ingested into Elasticsearch. - This API provides a starting point for ingesting data into Elasticsearch in a - format that is suitable for subsequent use with other Elastic Stack functionality. - Use this API rather than the find text structure API if your input text has already - been split up into separate messages by some other process. The response from - the API contains: * Sample messages. * Statistics that reveal the most common - values for all fields detected within the text and basic numeric statistics for - numeric fields. * Information about the structure of the text, which is useful - when you write ingest configurations to index it or similarly formatted text. - Appropriate mappings for an Elasticsearch index, which you could use to ingest - the text. All this information can be calculated by the structure finder with - no guidance. However, you can optionally override some of the decisions about - the text structure by specifying one or more query parameters. If the structure - finder produces unexpected results, specify the `explain` query parameter and - an explanation will appear in the response. It helps determine why the returned - structure was chosen. + .. raw:: html + +

Find the structure of text messages. + Find the structure of a list of text messages. + The messages must contain data that is suitable to be ingested into Elasticsearch.

+

This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. + Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process.

+

The response from the API contains:

+
    +
  • Sample messages.
  • +
  • Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields.
  • +
  • Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. + Appropriate mappings for an Elasticsearch index, which you could use to ingest the text.
  • +
+

All this information can be calculated by the structure finder with no guidance. + However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.

+

If the structure finder produces unexpected results, specify the explain query parameter and an explanation will appear in the response. + It helps determine why the returned structure was chosen.

+ ``_ @@ -410,22 +414,24 @@ def find_structure( timestamp_format: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ - Find the structure of a text file. The text file must contain data that is suitable - to be ingested into Elasticsearch. This API provides a starting point for ingesting - data into Elasticsearch in a format that is suitable for subsequent use with - other Elastic Stack functionality. Unlike other Elasticsearch endpoints, the - data that is posted to this endpoint does not need to be UTF-8 encoded and in - JSON format. It must, however, be text; binary text formats are not currently - supported. The size is limited to the Elasticsearch HTTP receive buffer size, - which defaults to 100 Mb. The response from the API contains: * A couple of messages - from the beginning of the text. * Statistics that reveal the most common values - for all fields detected within the text and basic numeric statistics for numeric - fields. * Information about the structure of the text, which is useful when you - write ingest configurations to index it or similarly formatted text. * Appropriate - mappings for an Elasticsearch index, which you could use to ingest the text. - All this information can be calculated by the structure finder with no guidance. - However, you can optionally override some of the decisions about the text structure - by specifying one or more query parameters. + .. raw:: html + +

Find the structure of a text file. + The text file must contain data that is suitable to be ingested into Elasticsearch.

+

This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. + Unlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format. + It must, however, be text; binary text formats are not currently supported. + The size is limited to the Elasticsearch HTTP receive buffer size, which defaults to 100 Mb.

+

The response from the API contains:

+
    +
  • A couple of messages from the beginning of the text.
  • +
  • Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields.
  • +
  • Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text.
  • +
  • Appropriate mappings for an Elasticsearch index, which you could use to ingest the text.
  • +
+

All this information can be calculated by the structure finder with no guidance. + However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.

+ ``_ @@ -607,9 +613,12 @@ def test_grok_pattern( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Test a Grok pattern. Test a Grok pattern on one or more lines of text. The API - indicates whether the lines match the pattern together with the offsets and lengths - of the matched substrings. + .. raw:: html + +

Test a Grok pattern. + Test a Grok pattern on one or more lines of text. + The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings.

+ ``_ diff --git a/elasticsearch/_sync/client/transform.py b/elasticsearch/_sync/client/transform.py index a94fca7b4..95427115e 100644 --- a/elasticsearch/_sync/client/transform.py +++ b/elasticsearch/_sync/client/transform.py @@ -39,7 +39,11 @@ def delete_transform( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a transform. Deletes a transform. + .. raw:: html + +

Delete a transform. + Deletes a transform.

+ ``_ @@ -99,7 +103,11 @@ def get_transform( size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ - Get transforms. Retrieves configuration information for transforms. + .. raw:: html + +

Get transforms. + Retrieves configuration information for transforms.

+ ``_ @@ -168,7 +176,11 @@ def get_transform_stats( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Get transform stats. Retrieves usage information for transforms. + .. raw:: html + +

Get transform stats. + Retrieves usage information for transforms.

+ ``_ @@ -249,12 +261,14 @@ def preview_transform( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Preview a transform. Generates a preview of the results that you will get when - you create a transform with the same configuration. It returns a maximum of 100 - results. The calculations are based on all the current data in the source index. - It also generates a list of mappings and settings for the destination index. - These values are determined based on the field types of the source index and - the transform aggregations. + .. raw:: html + +

Preview a transform. + Generates a preview of the results that you will get when you create a transform with the same configuration.

+

It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also + generates a list of mappings and settings for the destination index. These values are determined based on the field + types of the source index and the transform aggregations.

+ ``_ @@ -371,27 +385,27 @@ def put_transform( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create a transform. Creates a transform. A transform copies data from source - indices, transforms it, and persists it into an entity-centric destination index. - You can also think of the destination index as a two-dimensional tabular data - structure (known as a data frame). The ID for each document in the data frame - is generated from a hash of the entity, so there is a unique row per entity. - You must choose either the latest or pivot method for your transform; you cannot - use both in a single transform. If you choose to use the pivot method for your - transform, the entities are defined by the set of `group_by` fields in the pivot - object. If you choose to use the latest method, the entities are defined by the - `unique_key` field values in the latest object. You must have `create_index`, - `index`, and `read` privileges on the destination index and `read` and `view_index_metadata` - privileges on the source indices. When Elasticsearch security features are enabled, - the transform remembers which roles the user that created it had at the time - of creation and uses those same roles. If those roles do not have the required - privileges on the source and destination indices, the transform fails when it - attempts unauthorized operations. NOTE: You must use Kibana or this API to create - a transform. Do not add a transform directly into any `.transform-internal*` - indices using the Elasticsearch index API. If Elasticsearch security features - are enabled, do not give users any privileges on `.transform-internal*` indices. - If you used transforms prior to 7.5, also do not give users any privileges on - `.data-frame-internal*` indices. + .. raw:: html + +

Create a transform. + Creates a transform.

+

A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as + a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a + unique row per entity.

+

You must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If + you choose to use the pivot method for your transform, the entities are defined by the set of group_by fields in + the pivot object. If you choose to use the latest method, the entities are defined by the unique_key field values + in the latest object.

+

You must have create_index, index, and read privileges on the destination index and read and + view_index_metadata privileges on the source indices. When Elasticsearch security features are enabled, the + transform remembers which roles the user that created it had at the time of creation and uses those same roles. If + those roles do not have the required privileges on the source and destination indices, the transform fails when it + attempts unauthorized operations.

+

NOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any + .transform-internal* indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do + not give users any privileges on .transform-internal* indices. If you used transforms prior to 7.5, also do not + give users any privileges on .data-frame-internal* indices.

+ ``_ @@ -492,9 +506,13 @@ def reset_transform( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Reset a transform. Resets a transform. Before you can reset it, you must stop - it; alternatively, use the `force` query parameter. If the destination index - was created by the transform, it is deleted. + .. raw:: html + +

Reset a transform. + Resets a transform. + Before you can reset it, you must stop it; alternatively, use the force query parameter. + If the destination index was created by the transform, it is deleted.

+ ``_ @@ -546,11 +564,15 @@ def schedule_now_transform( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Schedule a transform to start now. Instantly runs a transform to process data. - If you _schedule_now a transform, it will process the new data instantly, without - waiting for the configured frequency interval. After _schedule_now API is called, - the transform will be processed again at now + frequency unless _schedule_now - API is called again in the meantime. + .. raw:: html + +

Schedule a transform to start now. + Instantly runs a transform to process data.

+

If you _schedule_now a transform, it will process the new data instantly, + without waiting for the configured frequency interval. After _schedule_now API is called, + the transform will be processed again at now + frequency unless _schedule_now API + is called again in the meantime.

+ ``_ @@ -597,24 +619,24 @@ def start_transform( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Start a transform. Starts a transform. When you start a transform, it creates - the destination index if it does not already exist. The `number_of_shards` is - set to `1` and the `auto_expand_replicas` is set to `0-1`. If it is a pivot transform, - it deduces the mapping definitions for the destination index from the source - indices and the transform aggregations. If fields in the destination index are - derived from scripts (as in the case of `scripted_metric` or `bucket_script` - aggregations), the transform uses dynamic mappings unless an index template exists. - If it is a latest transform, it does not deduce mapping definitions; it uses - dynamic mappings. To use explicit mappings, create the destination index before - you start the transform. Alternatively, you can create an index template, though - it does not affect the deduced mappings in a pivot transform. When the transform - starts, a series of validations occur to ensure its success. If you deferred - validation when you created the transform, they occur when you start the transform—​with - the exception of privilege checks. When Elasticsearch security features are enabled, - the transform remembers which roles the user that created it had at the time - of creation and uses those same roles. If those roles do not have the required - privileges on the source and destination indices, the transform fails when it - attempts unauthorized operations. + .. raw:: html + +

Start a transform. + Starts a transform.

+

When you start a transform, it creates the destination index if it does not already exist. The number_of_shards is + set to 1 and the auto_expand_replicas is set to 0-1. If it is a pivot transform, it deduces the mapping + definitions for the destination index from the source indices and the transform aggregations. If fields in the + destination index are derived from scripts (as in the case of scripted_metric or bucket_script aggregations), + the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce + mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you + start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings + in a pivot transform.

+

When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you + created the transform, they occur when you start the transform—​with the exception of privilege checks. When + Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the + time of creation and uses those same roles. If those roles do not have the required privileges on the source and + destination indices, the transform fails when it attempts unauthorized operations.

+ ``_ @@ -668,7 +690,11 @@ def stop_transform( wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stop transforms. Stops one or more transforms. + .. raw:: html + +

Stop transforms. + Stops one or more transforms.

+ ``_ @@ -761,14 +787,16 @@ def update_transform( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update a transform. Updates certain properties of a transform. All updated properties - except `description` do not take effect until after the transform starts the - next checkpoint, thus there is data consistency in each checkpoint. To use this - API, you must have `read` and `view_index_metadata` privileges for the source - indices. You must also have `index` and `read` privileges for the destination - index. When Elasticsearch security features are enabled, the transform remembers - which roles the user who updated it had at the time of update and runs with those - privileges. + .. raw:: html + +

Update a transform. + Updates certain properties of a transform.

+

All updated properties except description do not take effect until after the transform starts the next checkpoint, + thus there is data consistency in each checkpoint. To use this API, you must have read and view_index_metadata + privileges for the source indices. You must also have index and read privileges for the destination index. When + Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the + time of update and runs with those privileges.

+ ``_ @@ -849,20 +877,21 @@ def upgrade_transforms( timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ - Upgrade all transforms. Transforms are compatible across minor versions and between - supported major versions. However, over time, the format of transform configuration - information may change. This API identifies transforms that have a legacy configuration - format and upgrades them to the latest version. It also cleans up the internal - data structures that store the transform state and checkpoints. The upgrade does - not affect the source and destination indices. The upgrade also does not affect - the roles that transforms use when Elasticsearch security features are enabled; - the role used to read source data and write to the destination index remains - unchanged. If a transform upgrade step fails, the upgrade stops and an error - is returned about the underlying issue. Resolve the issue then re-run the process - again. A summary is returned when the upgrade is finished. To ensure continuous - transforms remain running during a major version upgrade of the cluster – for - example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading - the cluster. You may want to perform a recent cluster backup prior to the upgrade. + .. raw:: html + +

Upgrade all transforms. + Transforms are compatible across minor versions and between supported major versions. + However, over time, the format of transform configuration information may change. + This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. + It also cleans up the internal data structures that store the transform state and checkpoints. + The upgrade does not affect the source and destination indices. + The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged.

+

If a transform upgrade step fails, the upgrade stops and an error is returned about the underlying issue. + Resolve the issue then re-run the process again. + A summary is returned when the upgrade is finished.

+

To ensure continuous transforms remain running during a major version upgrade of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster. + You may want to perform a recent cluster backup prior to the upgrade.

+ ``_ diff --git a/elasticsearch/_sync/client/watcher.py b/elasticsearch/_sync/client/watcher.py index d6025c923..065accc2f 100644 --- a/elasticsearch/_sync/client/watcher.py +++ b/elasticsearch/_sync/client/watcher.py @@ -37,14 +37,16 @@ def ack_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Acknowledge a watch. Acknowledging a watch enables you to manually throttle the - execution of the watch's actions. The acknowledgement state of an action is stored - in the `status.actions..ack.state` structure. IMPORTANT: If the specified - watch is currently being executed, this API will return an error The reason for - this behavior is to prevent overwriting the watch status from a watch execution. - Acknowledging an action throttles further executions of that action until its - `ack.state` is reset to `awaits_successful_execution`. This happens when the - condition of the watch is not met (the condition evaluates to false). + .. raw:: html + +

Acknowledge a watch. + Acknowledging a watch enables you to manually throttle the execution of the watch's actions.

+

The acknowledgement state of an action is stored in the status.actions.<id>.ack.state structure.

+

IMPORTANT: If the specified watch is currently being executed, this API will return an error + The reason for this behavior is to prevent overwriting the watch status from a watch execution.

+

Acknowledging an action throttles further executions of that action until its ack.state is reset to awaits_successful_execution. + This happens when the condition of the watch is not met (the condition evaluates to false).

+ ``_ @@ -96,7 +98,11 @@ def activate_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Activate a watch. A watch can be either active or inactive. + .. raw:: html + +

Activate a watch. + A watch can be either active or inactive.

+ ``_ @@ -136,7 +142,11 @@ def deactivate_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Deactivate a watch. A watch can be either active or inactive. + .. raw:: html + +

Deactivate a watch. + A watch can be either active or inactive.

+ ``_ @@ -176,13 +186,15 @@ def delete_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Delete a watch. When the watch is removed, the document representing the watch - in the `.watches` index is gone and it will never be run again. Deleting a watch - does not delete any watch execution records related to this watch from the watch - history. IMPORTANT: Deleting a watch must be done by using only this API. Do - not delete the watch directly from the `.watches` index using the Elasticsearch - delete document API When Elasticsearch security features are enabled, make sure - no write privileges are granted to anyone for the `.watches` index. + .. raw:: html + +

Delete a watch. + When the watch is removed, the document representing the watch in the .watches index is gone and it will never be run again.

+

Deleting a watch does not delete any watch execution records related to this watch from the watch history.

+

IMPORTANT: Deleting a watch must be done by using only this API. + Do not delete the watch directly from the .watches index using the Elasticsearch delete document API + When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the .watches index.

+ ``_ @@ -251,21 +263,19 @@ def execute_watch( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Run a watch. This API can be used to force execution of the watch outside of - its triggering logic or to simulate the watch execution for debugging purposes. - For testing and debugging purposes, you also have fine-grained control on how - the watch runs. You can run the watch without running all of its actions or alternatively - by simulating them. You can also force execution by ignoring the watch condition - and control whether a watch record would be written to the watch history after - it runs. You can use the run watch API to run watches that are not yet registered - by specifying the watch definition inline. This serves as great tool for testing - and debugging your watches prior to adding them to Watcher. When Elasticsearch - security features are enabled on your cluster, watches are run with the privileges - of the user that stored the watches. If your user is allowed to read index `a`, - but not index `b`, then the exact same set of rules will apply during execution - of a watch. When using the run watch API, the authorization data of the user - that called the API will be used as a base, instead of the information who stored - the watch. + .. raw:: html + +

Run a watch. + This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes.

+

For testing and debugging purposes, you also have fine-grained control on how the watch runs. + You can run the watch without running all of its actions or alternatively by simulating them. + You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs.

+

You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. + This serves as great tool for testing and debugging your watches prior to adding them to Watcher.

+

When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. + If your user is allowed to read index a, but not index b, then the exact same set of rules will apply during execution of a watch.

+

When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch.

+ ``_ @@ -348,9 +358,12 @@ def get_settings( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get Watcher index settings. Get settings for the Watcher internal index (`.watches`). - Only a subset of settings are shown, for example `index.auto_expand_replicas` - and `index.number_of_replicas`. + .. raw:: html + +

Get Watcher index settings. + Get settings for the Watcher internal index (.watches). + Only a subset of settings are shown, for example index.auto_expand_replicas and index.number_of_replicas.

+ ``_ @@ -392,7 +405,10 @@ def get_watch( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get a watch. + .. raw:: html + +

Get a watch.

+ ``_ @@ -456,17 +472,18 @@ def put_watch( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Create or update a watch. When a watch is registered, a new document that represents - the watch is added to the `.watches` index and its trigger is immediately registered - with the relevant trigger engine. Typically for the `schedule` trigger, the scheduler - is the trigger engine. IMPORTANT: You must use Kibana or this API to create a - watch. Do not add a watch directly to the `.watches` index by using the Elasticsearch - index API. If Elasticsearch security features are enabled, do not give users - write privileges on the `.watches` index. When you add a watch you can also define - its initial active state by setting the *active* parameter. When Elasticsearch - security features are enabled, your watch can index or search only on indices - for which the user that stored the watch has privileges. If the user is able - to read index `a`, but not index `b`, the same will apply when the watch runs. + .. raw:: html + +

Create or update a watch. + When a watch is registered, a new document that represents the watch is added to the .watches index and its trigger is immediately registered with the relevant trigger engine. + Typically for the schedule trigger, the scheduler is the trigger engine.

+

IMPORTANT: You must use Kibana or this API to create a watch. + Do not add a watch directly to the .watches index by using the Elasticsearch index API. + If Elasticsearch security features are enabled, do not give users write privileges on the .watches index.

+

When you add a watch you can also define its initial active state by setting the active parameter.

+

When Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges. + If the user is able to read index a, but not index b, the same will apply when the watch runs.

+ ``_ @@ -574,9 +591,12 @@ def query_watches( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Query watches. Get all registered watches in a paginated manner and optionally - filter watches by a query. Note that only the `_id` and `metadata.*` fields are - queryable or sortable. + .. raw:: html + +

Query watches. + Get all registered watches in a paginated manner and optionally filter watches by a query.

+

Note that only the _id and metadata.* fields are queryable or sortable.

+ ``_ @@ -647,7 +667,11 @@ def start( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Start the watch service. Start the Watcher service if it is not already running. + .. raw:: html + +

Start the watch service. + Start the Watcher service if it is not already running.

+ ``_ @@ -708,8 +732,12 @@ def stats( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get Watcher statistics. This API always returns basic metrics. You retrieve more - metrics by using the metric parameter. + .. raw:: html + +

Get Watcher statistics. + This API always returns basic metrics. + You retrieve more metrics by using the metric parameter.

+ ``_ @@ -756,7 +784,11 @@ def stop( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Stop the watch service. Stop the Watcher service if it is running. + .. raw:: html + +

Stop the watch service. + Stop the Watcher service if it is running.

+ ``_ @@ -808,9 +840,13 @@ def update_settings( body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ - Update Watcher index settings. Update settings for the Watcher internal index - (`.watches`). Only a subset of settings can be modified. This includes `index.auto_expand_replicas` - and `index.number_of_replicas`. + .. raw:: html + +

Update Watcher index settings. + Update settings for the Watcher internal index (.watches). + Only a subset of settings can be modified. + This includes index.auto_expand_replicas and index.number_of_replicas.

+ ``_ diff --git a/elasticsearch/_sync/client/xpack.py b/elasticsearch/_sync/client/xpack.py index a2f26ab91..d52c8dd69 100644 --- a/elasticsearch/_sync/client/xpack.py +++ b/elasticsearch/_sync/client/xpack.py @@ -43,10 +43,16 @@ def info( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get information. The information provided by the API includes: * Build information - including the build number and timestamp. * License information about the currently - installed license. * Feature information for the features that are currently - enabled and available under the current license. + .. raw:: html + +

Get information. + The information provided by the API includes:

+
    +
  • Build information including the build number and timestamp.
  • +
  • License information about the currently installed license.
  • +
  • Feature information for the features that are currently enabled and available under the current license.
  • +
+ ``_ @@ -90,9 +96,12 @@ def usage( pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ - Get usage information. Get information about the features that are currently - enabled and available under the current license. The API also provides some usage - statistics. + .. raw:: html + +

Get usage information. + Get information about the features that are currently enabled and available under the current license. + The API also provides some usage statistics.

+ ``_ From 29a80e8179199fff8e241211197513a030f4a610 Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Thu, 23 Jan 2025 18:56:59 +0400 Subject: [PATCH 25/65] Bring 8.17.1 to 8.x (#2763) --- .../00272f75a6afea91f8554ef7cda0c1f2.asciidoc | 2 +- .../004743b9c9f61588926ccf734696b713.asciidoc | 2 +- .../008ed823c89e703c447ac89c6b689833.asciidoc | 2 +- .../00b3b6d76a368ae71277ea24af318693.asciidoc | 2 +- .../00fea15cbca83be9d5f1a024ff2ec708.asciidoc | 2 +- .../015e6e6132b6d6d44bddb06bc3b316ed.asciidoc | 46 ++++++ .../0165d22da5f2fc7678392b31d8eb5566.asciidoc | 18 ++ .../01bc0f2ed30eb3dd23511d01ce0ac6e1.asciidoc | 2 +- .../01cd0ea360282a2c591a366679d7187d.asciidoc | 2 +- .../01dc7bdc223bd651574ed2d3954a5b1c.asciidoc | 2 +- .../020c95db88ef356093f03be84893ddf9.asciidoc | 2 +- .../0246f73cc2ed3dfec577119e8cd15404.asciidoc | 2 +- .../02520ac7816b2c4cf8fb413fd16122f2.asciidoc | 2 +- .../0280247e0cf2e561c548f22c9fb31163.asciidoc | 2 +- .../02f65c6bab8f40bf3ce18160623d1870.asciidoc | 2 +- .../0308cbd85281f95fc458042afe3f587d.asciidoc | 2 +- .../032eac56b798bea29390e102538f4a26.asciidoc | 2 +- .../0350ff5ebb8207c004eb771088339cb4.asciidoc | 2 +- .../03891265df2111a38e0b6b24c1b967e1.asciidoc | 2 +- .../03c4b815bf1e6a8c5cfcc6ddf94bc093.asciidoc | 2 +- .../04412d11783dac25b5fd2ec5407078a3.asciidoc | 2 +- .../0470d7101637568b9d3d1239f06325a7.asciidoc | 2 +- .../048652b6abfe195da8ea8cef10ee01b1.asciidoc | 2 +- .../04d6ce0c903bd468afbecd3aa1c4a78a.asciidoc | 2 +- .../04de2e3a9c00c2056b07bf9cf9e63a99.asciidoc | 2 +- .../04f5dd677c777bcb15d7d5fa63275fc8.asciidoc | 2 +- .../0502284d4685c478eb68761f979f4303.asciidoc | 2 +- .../05284c8ea91769c09c8db47db8a6629a.asciidoc | 2 +- .../05f6049c677a156bdf9b83e71a3b87ed.asciidoc | 2 +- .../0601b5cb5328c9ebff30f4be1b210f93.asciidoc | 2 +- .../06454a8e85e2d3479c90390bb955eb39.asciidoc | 2 +- .../066e0bdcdfa3b8afa5d1e5777f73fccb.asciidoc | 2 +- .../06b5d3d56c4d4e3b61ae42ea26401c40.asciidoc | 2 +- .../0709a38613d2de90d418ce12b36af30e.asciidoc | 2 +- .../0721c8adec544d5ecea3fcc410e45feb.asciidoc | 2 +- .../0722b302b2b3275a988d858044f99d5d.asciidoc | 10 ++ .../073864d3f52f8f79aafdaa85a88ac46a.asciidoc | 2 +- ...074e4602d1ca54412380a40867d078bc.asciidoc} | 6 +- .../0755471d7dce4785d2e7ed0c10182ea3.asciidoc | 2 +- .../07a5fdeb7805cec1d28ba288b28f5ff5.asciidoc | 2 +- .../07ba3eaa931f2cf110052e3544db51f8.asciidoc | 2 +- .../07c07f6d497b1a3012aa4320f830e09e.asciidoc | 2 +- .../082e78c7a2061a7c4a52b494e5ede0e8.asciidoc | 48 ++++++ .../083b92e8ea264e49bf9fd40fc6a3094b.asciidoc | 2 +- .../0881397074d261ccc2db514daf116c31.asciidoc | 2 +- .../08c9af9dd519c011deedd406f3061836.asciidoc | 2 +- .../08e08feb514b24006e13f258d617d873.asciidoc | 2 +- .../0957bbd535f58c97b12ffba90813d64c.asciidoc | 2 +- .../095e3f21941a9cc75f398389a075152d.asciidoc | 2 +- .../09769561f082b50558fb7d8707719963.asciidoc | 2 +- .../099006ab11b52ea99693401dceee8bad.asciidoc | 2 +- .../09e6e06ba562f4b9bac59455e9151a80.asciidoc | 2 +- .../0a46cc8fe93e372909660a63dc52ae3b.asciidoc | 2 +- .../0a650401134f07e40216f0d0d1a66a32.asciidoc | 2 +- .../0a758d9dec74d9e942cf41a06499234f.asciidoc | 2 +- .../0ad8edd10542ec2c4d5d8700d7e2ba97.asciidoc | 2 +- .../0ade87c8cb0e3c188d2e3dce279d5cc2.asciidoc | 2 +- .../0aff04881be21eea45375ec4f4f50e66.asciidoc | 2 +- .../0b615ff4ef5a8847ee8109b2fd11619a.asciidoc | 2 +- .../0bc6155e0c88062a4d8490da49db3aa8.asciidoc | 57 +++++++ .../0bee07a581c5776e068f6f4efad5a399.asciidoc | 11 +- .../0c2d9ac7e3f28d4d802e21cbbbcfeb34.asciidoc | 2 +- .../0c464965126cc09e6812716a145991d4.asciidoc | 2 +- ...0c52af573c9401a2a687e86a4beb182b.asciidoc} | 4 +- .../0c6f9c9da75293fae69659ac1d6329de.asciidoc | 2 +- .../0c6fc67c2dd1c1771cd866ce471d74e1.asciidoc | 2 +- .../0c892d328b73d38396aaef6d9cbcd36b.asciidoc | 2 +- .../0d30077cd34e93377a3a86f2ebd69415.asciidoc | 2 +- .../0d689ac6e78be5d438f9b5d441be2b44.asciidoc | 60 +++++++ .../0d8063b484a18f8672fb5ed8712c5c97.asciidoc | 2 +- .../0d94d76b7f00d0459d1f8c962c144dcd.asciidoc | 2 +- .../0da747e9d98bae157d3520ff1b489ad4.asciidoc | 2 +- .../0dd30ffe2f900dde86cc9bb601d5e68e.asciidoc | 2 +- .../0ddf705317d9c5095b4a1419a2e3bace.asciidoc | 2 +- .../0dfa9733c94bc43c6f14c7b6984c98fb.asciidoc | 2 +- .../0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc | 8 +- .../0e31b8ad176b31028becf9500989bcbd.asciidoc | 2 +- .../0e5d25c7bb738c42d471020d678e2966.asciidoc | 2 +- .../0e83f140237d75469a428ff403564bb5.asciidoc | 15 -- .../0ea146b178561bc8b9002bed8a35641f.asciidoc | 2 +- .../0ea2167ce7c87d311b20c4f8c698a8d0.asciidoc | 2 +- .../0eae571e9e1c40a40cb4b1c9530a8987.asciidoc | 2 +- .../0f4583c56cfe5bd59eeb35bfba02957c.asciidoc | 2 +- .../0f7aa40ad26d59a9268630b980a3d594.asciidoc | 2 +- .../0fa220ee3fb267020382f74aa70eb1e9.asciidoc | 2 +- .../0fc4b589df5388da784c6d981e769e31.asciidoc | 2 +- .../0fe74ccd098c742619805a7c0bd0fae6.asciidoc | 2 +- .../10796a4efa3c2a5e9e50b6bdeb08bbb9.asciidoc | 2 +- .../109db8ff7b715aca98de8ef1ab7e44ab.asciidoc | 2 +- .../10b924bf6298aa6157ed00ce12f8edc1.asciidoc | 2 +- .../10d9da8a3b7061479be908c8c5c76cfb.asciidoc | 2 +- .../10f7a2c0a952ba3bc3d20b7d5f310f41.asciidoc | 2 +- .../114d470e752efa9672ca68d7290fada8.asciidoc | 2 +- .../115529722ba30b0b0d51a7ff87e59198.asciidoc | 2 +- .../11c395d1649733bcab853fe31ec393b2.asciidoc | 2 +- .../11e772ff5dbb73408ae30a1a367a0d9b.asciidoc | 2 +- .../120fcf9f55128d6a81d5e87a9c235bbd.asciidoc | 21 +++ .../12433d2b637d002e8d5c9a1adce69d3b.asciidoc | 2 +- .../1259a9c151730e42de35bb2d1ba700c6.asciidoc | 2 +- .../128283698535116931dca9d16a16dca2.asciidoc | 2 +- .../1295f51b9e5d4ba9987b02478146b50b.asciidoc | 2 +- .../12d5ff4b8d3d832b32a7e7e2a520d0bb.asciidoc | 2 +- .../135819da3a4bde684357c57a49ad8e85.asciidoc | 2 +- .../136ae86b8d497dda799cf1cb583df929.asciidoc | 2 +- .../137709a0a0dc38d6094291c9fc75b804.asciidoc | 2 +- .../138f7703c47ddf63633fdf5ca9bc7fa4.asciidoc | 2 +- .../13d90ba227131aefbf4fcfd5992e662a.asciidoc | 2 +- .../13df08eefc9ba98e311793bbca74133b.asciidoc | 2 +- .../13ebcb01ebf1b5d2b5c52739db47e30c.asciidoc | 2 +- .../13ecdf99114098c76b050397d9c3d4e6.asciidoc | 2 +- .../141ef0ebaa3b0772892b79b9bb85efb0.asciidoc | 16 ++ .../1420a22aa817c7a996baaed0ad366d6f.asciidoc | 22 --- .../146bd22fd0e7be2345619e8f11d3a4cb.asciidoc | 2 +- .../147d341cb212dcc015c129a9c5dcf9c9.asciidoc | 2 +- .../14a33c364873c2f930ca83d0a3005389.asciidoc | 2 +- .../14b81f96297952970b78a3216e059596.asciidoc | 2 +- .../150b5fee5678bf8cdf0932da73eada80.asciidoc | 2 +- .../1570976f7807b88dc8a046b833be057b.asciidoc | 2 +- .../15a34bfe0ef8ef6333c8c7b55c011e5d.asciidoc | 2 +- .../15f769bbd7b5fddeb3353ae726b71b14.asciidoc | 2 +- .../162b5b693b713f0bfab1209d59443c46.asciidoc | 2 +- .../1637ef51d673b35cc8894ee80cd61c87.asciidoc | 2 +- .../16634cfa7916cf4e8048a1d70e6240f2.asciidoc | 2 +- .../166bcfc6d5d39defec7ad6aa44d0914b.asciidoc | 2 +- .../16985e5b17d2da0955a14fbe02e8dfca.asciidoc | 2 +- .../172155ca4bf6dfcbd489453f50739396.asciidoc | 2 +- .../17266cee5eaaddf08e5534bf580a1910.asciidoc | 2 +- .../17566e23c191f1004a2719f2c4242307.asciidoc | 2 +- .../178c920d5e8ec0071f77290fa059802c.asciidoc | 2 +- ...17b1647c8509543f2388c886f2584a20.asciidoc} | 4 +- .../17c2b0a6b0305804ff3b7fd3b4a68df3.asciidoc | 2 +- .../18ddb7e7a4bcafd449df956e828ed7a8.asciidoc | 2 +- .../190a21e32db2125ddaea0f634e126a84.asciidoc | 2 +- .../192fa1f6f51dfb640e9e15bb5cd7eebc.asciidoc | 2 +- .../194bbac15e709174ac85b681f3a3d137.asciidoc | 2 +- .../196aed02b11def364bab84e455c1a073.asciidoc | 2 +- .../19c00c6b29bc7dbc5e92b3668da2da93.asciidoc | 7 +- .../1a1f3421717ff744ed83232729289bb0.asciidoc | 2 +- .../1a3897cfb4f974c09d0d847baac8aa6d.asciidoc | 2 +- .../1a3a4b8a4bfee4ab84ddd13d8835f560.asciidoc | 2 +- .../1a8d92e93481c432a91f7c213099800a.asciidoc | 2 +- .../1a9e03ce0355872a7db27fedc783fbec.asciidoc | 2 +- .../1a9efb56adb2cd84faa9825a129381b9.asciidoc | 2 +- .../1aa91d3d48140d6367b6cabca8737b8f.asciidoc | 2 +- .../1b076ceb1ead9f6897c2f351f0e45f74.asciidoc | 2 +- .../1b0b29e5cd7550c648d0892378e93804.asciidoc | 2 +- .../1b0dc9d076bbb58c6a2953ef4323d2fc.asciidoc | 2 +- .../1b2ab75d3c8064fac6ecc63104396c02.asciidoc | 2 +- .../1b3762712c14a19e8c2956b4f530d327.asciidoc | 2 +- .../1b37e2237c9e3aaf84d56cc5c0bdb9ec.asciidoc | 2 +- .../1c3e3c4f2d268f1826a9b417e1868a58.asciidoc | 2 +- .../1c8b6768c4eefc76fcb38708152f561b.asciidoc | 2 +- .../1cecd4d87a92427175157d41859df2af.asciidoc | 2 +- .../1d252d9217c61c2c1cbe7a92f77b078f.asciidoc | 2 +- .../1dadb7efe27b6c0c231eb6535e413bd9.asciidoc | 2 +- .../1db715eb00832686ecddb6603684fc26.asciidoc | 2 +- .../1e08e054c761353f99211cd18e8ca47b.asciidoc | 2 +- .../1e26353d546d733634187b8c3a7837a7.asciidoc | 2 +- .../1e3384bc255729b65a6f0fc8011ff733.asciidoc | 2 +- .../1e3553a73da487017f7a95088b6aa957.asciidoc | 2 +- .../1ed26c7b445ab1c167bd9385e1f0066f.asciidoc | 2 +- .../1f3dd84ab11bae09d3f99b1b3536e239.asciidoc | 2 +- .../1f673e1a0de2970dc648618d5425a994.asciidoc | 2 +- .../1f6a190fa1aade1fb66680388f184ef9.asciidoc | 2 +- .../1ff12523efbd59c213c676937757c460.asciidoc | 2 +- .../20005d8a6555b259b299d862cd218701.asciidoc | 2 +- .../200f6d4cc7b9c300b8962a119e03873f.asciidoc | 2 +- .../20e3b181114e00c943a27a9bbcf85f15.asciidoc | 2 +- .../20f62d0540bf6261549bd286416eae28.asciidoc | 2 +- .../2105f2d1d81977054a93163a175793ce.asciidoc | 2 +- .../218b9009f120e8ad33f710e019179562.asciidoc | 2 +- .../21c1e6ee886140ce0cd67184dd19b981.asciidoc | 2 +- .../22b176a184517cf1b5801f5eb4f17f97.asciidoc | 10 ++ .../22dde5fe7ac5d85d52115641a68b3c55.asciidoc | 2 +- .../23b062c157235246d7c347b9047b2435.asciidoc | 2 +- .../242a26ced0e5706e48dcda19a4003094.asciidoc | 2 +- .../246763219ec06172f7aa57bba28d344a.asciidoc | 109 ++++++++++++ .../249bf48252c8cea47ef872541c8a884c.asciidoc | 2 +- .../24ad3c234f69f55a3fbe2d488e70178a.asciidoc | 2 +- .../24aee6033bf77a68ced74e3fd9d34283.asciidoc | 2 +- .../24d806d1803158dacd4dda73c4204d3e.asciidoc | 2 +- .../2577acb462b95bd4394523cf2f8a661f.asciidoc | 33 ---- .../25a0dad6547d432f5a3d394528f1c138.asciidoc | 2 +- .../25c0e66a433a0cd596e0641b752ff6d7.asciidoc | 2 +- .../25cb9e1da00dfd971065ce182467434d.asciidoc | 2 +- .../261480571394632db40e88fbb6c59c2f.asciidoc | 2 +- .../2623eb122cc0299b42fc9eca6e7f5e56.asciidoc | 2 +- .../2646710ece0c4c843aebeacd370d0396.asciidoc | 2 +- .../26d3ab748a855eb383e992eb1ff79662.asciidoc | 2 +- .../270549e6b062228312c4e7a54a2c2209.asciidoc | 2 +- .../2720e613d520ce352b62e990c2d283f7.asciidoc | 2 +- .../27384266370152add76471dd0332a2f1.asciidoc | 2 +- .../275ec358d5d1e4b9ff06cb4ae7e47650.asciidoc | 2 +- .../2864d04bf99860ed5dbe1458f1ab5f78.asciidoc | 2 +- .../2884eacac3ad05ff794f5296ec7427e7.asciidoc | 2 +- .../2932e6f71e247cf52e11d2f38f114ddf.asciidoc | 2 +- .../295b3aaeb223612afdd991744dc9c873.asciidoc | 2 +- .../29824032d7d64512d17458fdd687b1f6.asciidoc | 2 +- .../299900fb08da80fe455cf3f1bb7d62ee.asciidoc | 2 +- .../29d9df958de292cec50daaf31844b573.asciidoc | 2 +- .../2a1eece9a59ac1773edcf0a932c26de0.asciidoc | 7 +- .../2a21674c40f9b182a8944769d20b2357.asciidoc | 34 ++++ .../2a67608dadbf220a2f040f3a79d3677d.asciidoc | 35 ++++ .../2aa548b692fc2fe7b6f0d90eb8b2ae29.asciidoc | 2 +- .../2acf75803494fef29f9ca70671aa6be1.asciidoc | 2 +- .../2afdf0d83724953aa2875b5fb37d60cc.asciidoc | 10 +- .../2b1c560f00d9bcf5caaf56c03f6b5962.asciidoc | 2 +- .../2b5c69778eb3daba9fbd7242bcc2daf9.asciidoc | 2 +- .../2b7687e3d7c06824950e00618c297864.asciidoc | 2 +- .../2bb41b0b4876ce98cd0cd8fb6d337f18.asciidoc | 16 -- .../2c0dbdcf400cde5d36f7c9e6c1101011.asciidoc | 2 +- .../2c1e16e9ac24cfea979af2a69900d3c2.asciidoc | 2 +- .../2c3dff44904d3d73ff47f1afe89c7f86.asciidoc | 2 +- .../2c602b4ee8f22cda2cdf19bad31da0af.asciidoc | 2 +- .../2d0244c020075595acb625aa5ba8f455.asciidoc | 2 +- .../2d60e3bdfee7afbddee149f40450b8b5.asciidoc | 2 +- .../2d8fcb03de417a71e7888bbdd948a692.asciidoc | 2 +- .../2de6885bacb8769b8f22dce253c96b0c.asciidoc | 2 +- .../2e09666d3ad5ad9afc22763ee6e97a2b.asciidoc | 2 +- .../2e36fe22051a47e052e349854d9948b9.asciidoc | 2 +- .../2e796e5ca59768d4426abbf9a049db3e.asciidoc | 2 +- .../2e7f4b9be999422a12abb680572b13c8.asciidoc | 2 +- .../2f07b81fd47ec3b074242a760f0c4e9e.asciidoc | 13 -- .../2f0b2181c434a879a23b4643bdd92575.asciidoc | 2 +- .../2f2580ea420e1836d922fe48fa8ada97.asciidoc | 2 +- .../2f72a63c73dd672ac2dc3997ad15dd41.asciidoc | 23 +++ .../2fc80a2ad1ca8b2dcb13ed1895b8e861.asciidoc | 2 +- .../2fe28d9a91b3081a9ec4601af8fb7b1c.asciidoc | 2 +- .../2fea3e324939cc7e9c396964aeee7111.asciidoc | 2 +- .../2fee452baff92b409cbfc8d71eb5fc0e.asciidoc | 2 +- .../300576666769b78fa6fa26b232837f81.asciidoc | 2 +- .../30abc76a39e551f4b52c65002bb6405d.asciidoc | 2 +- .../30d051f534aeb884176eedb2c11dac85.asciidoc | 23 +++ .../30f3e3b9df46afd12e68bc71f18483b4.asciidoc | 2 +- .../31ab4ec26176857280af630bf84a2823.asciidoc | 2 +- .../31bc93e429ad0de11dd2dd231e8f2c5e.asciidoc | 10 -- .../31f4400716500149cccbc19aa06bff66.asciidoc | 2 +- .../3218f8ccd59c8c90349816e0428e8fb8.asciidoc | 2 +- .../32b7963c5cabbe9cc7d15da62f5edda9.asciidoc | 2 +- ...3312c82f81816bf76629db9582991812.asciidoc} | 5 +- .../3337c817ebd438254505a31e91c91724.asciidoc | 2 +- .../3341d3bbb53052447a37c92a04c14b70.asciidoc | 2 +- .../33610800d9de3c3e6d6b3c611ace7330.asciidoc | 2 +- .../3386fe07e90844dbcdbbe7c07f09e04a.asciidoc | 2 +- .../339c4e5af9f9069ad9912aa574488b59.asciidoc | 2 +- .../33d480fc6812ada75756cf5337bc9092.asciidoc | 2 +- .../342ddf9121aeddd82fea2464665e25da.asciidoc | 2 +- .../3477a89d869b1f7f72d50c2ca86c4679.asciidoc | 2 +- .../34be27141e3a476c138546190101c8bc.asciidoc | 2 +- .../34cdeefb09bbbe5206957a8bc1bd513d.asciidoc | 13 -- .../3541d4a85e27b2c3896a7a7ee98b4b37.asciidoc | 2 +- .../3545261682af72f4bee57f2bac0a9590.asciidoc | 2 +- .../355d0ee2fcb6c1fc403c6267f710e25a.asciidoc | 2 +- .../357edc9d10e98ed776401c7a439a1a55.asciidoc | 2 +- .../35a272df8c919a12d7c3106a18245748.asciidoc | 2 +- .../35be136ba9df7474a5521631e2a385b1.asciidoc | 2 +- .../35f892b475a1770f18328158be7039fd.asciidoc | 2 +- .../3608e4fcd17dd8d5f88ec9a3db2f5d89.asciidoc | 2 +- .../365256ebdfa47b449780771d9beba8d9.asciidoc | 2 +- .../36d229f734adcdab00be266a7ce038b1.asciidoc | 2 +- .../36da9668fef56910370f16bfb772cc40.asciidoc | 2 +- .../370b297ed3433577adf53e64f572d89d.asciidoc | 2 +- .../3759ca688c4bd3c838780a9aad63258b.asciidoc | 2 +- .../37983daac3d9c8582583a507b3adb7f2.asciidoc | 2 +- .../37f367ca81a16d3aef4ef7126ec33a2e.asciidoc | 79 +++++++++ .../3819d0a5c2eed635c88e9e7bf2e81584.asciidoc | 2 +- .../386eb7dcd3149db82605bf22c5d851bf.asciidoc | 2 +- .../388d3eda4f792d3fce044777739217e6.asciidoc | 2 +- .../38ba93890494bfa7beece58dffa44f98.asciidoc | 23 --- .../38eed000de433b540116928681c520d3.asciidoc | 2 +- .../38f7739f750f1411bccf511a0abaaea3.asciidoc | 2 +- .../3924ee252581ebb96ac0e60046125ae8.asciidoc | 2 +- .../3951d7fcd7f849fa278daf342872125a.asciidoc | 2 +- .../39760996f94ad34aaceaa16a5cc97993.asciidoc | 2 +- .../397ab5f9ea0b69ae85038bb0b9915180.asciidoc | 2 +- .../398389933901b572a06a752bc780af7c.asciidoc | 2 +- .../39ce44333d28ed2b833722d3e3cb06f3.asciidoc | 2 +- .../39d6f575c9458d9c941364dfd0493fa0.asciidoc | 2 +- .../3a12feb0de224bfaaf518d95b9f516ff.asciidoc | 2 +- .../3a489743e49902df38e3368cae00717a.asciidoc | 2 +- .../3a5f2e2313614ea9693545edee22ac43.asciidoc | 2 +- .../3ac8b5234e9d53859245cf8ab0094ca5.asciidoc | 2 +- .../3b04cc894e6a47d57983484010feac0c.asciidoc | 2 +- .../3b18e9de638ff0b1c7a1f1f6bf1c24f3.asciidoc | 2 +- .../3b1ff884f3bab390ae357e622c0544a9.asciidoc | 2 +- .../3b40db1c5c6b36f087d7a09a4ce285c6.asciidoc | 2 +- .../3b606631284877f9bca15051630995ad.asciidoc | 2 +- .../3b64821fe9db73eb03860c60d775d7ff.asciidoc | 2 +- .../3b9c54604535d97e8368d47148aecc6f.asciidoc | 2 +- .../3bb491db29deba25e1cc82bcaa1aa1a1.asciidoc | 2 +- .../3bfa2362add163802fc2210cc2f37ba2.asciidoc | 2 +- .../3c345feb7c52fd54bcb5d5505fd8bc3b.asciidoc | 2 +- .../3c6abb9885cb1a997fcdd16f7fa4f673.asciidoc | 2 +- .../3c7621a81fa982b79f040a6d2611530e.asciidoc | 2 +- .../3d1ff6097e2359f927c88c2ccdb36252.asciidoc | 2 +- .../3d316bddd8503a6cc10566630a4155d3.asciidoc | 2 +- .../3d6a56dd3d93ece0e3da3fb66b4696d3.asciidoc | 2 +- .../3da35090e093c2d83c3b7d0d83bcb4ae.asciidoc | 2 +- .../3db2b5a6424aa92ecab7a8640c38685a.asciidoc | 2 +- .../3e121b43773cbb6dffa9b483c86a1f8d.asciidoc | 2 +- .../3e33c1a4298ea6a0dec65a3ebf9ba973.asciidoc | 2 +- .../3e8ed6ae016eb823cb00d9035b8ac459.asciidoc | 2 +- .../3ea4c971b3f47735dcc207ee2645fa03.asciidoc | 16 ++ .../3ed79871d956bfb2d6d2721d7272520c.asciidoc | 2 +- .../3f1fe5f5f99b98d0891f38003e10b636.asciidoc | 7 +- .../3f30310cc6d0adae6b0f61705624a695.asciidoc | 2 +- .../3f8dc309b63fa0437898107b0d964217.asciidoc | 2 +- .../3f9dcf2aa42f3ecfb5ebfe48c1774103.asciidoc | 18 ++ .../3fe4264ace04405989141c43aadfff81.asciidoc | 2 +- .../3fe5e6c0d5ea4586aa04f989ae54b72e.asciidoc | 2 +- .../400e89eb46ead8e9c9e40f123fd5e590.asciidoc | 2 +- .../4029af36cb3f8202549017f7378803b4.asciidoc | 2 +- .../405511f7c1f12cc0a227b4563fe7b2e2.asciidoc | 8 +- .../405ac843a9156d3cab374e199cac87fb.asciidoc | 2 +- .../405db6f3a01eceacfaa8b0ed3e4b3ac2.asciidoc | 2 +- .../4061fd5ba7221ca85805ed14d59a6bc5.asciidoc | 2 +- .../408060f0c52300588a6dee774f4fd6a5.asciidoc | 2 +- .../4113c57384aa37c58d11579e20c00760.asciidoc | 2 +- .../41175d304e660da2931764f9a4418fd3.asciidoc | 2 +- .../41195ef13af0465cdee1ae18f6c00fde.asciidoc | 2 +- .../412f8238ab5182678f1d8f6383031b11.asciidoc | 2 +- .../41d24383d29b2808a65258a0a3256e96.asciidoc | 18 ++ .../41dbd79f624b998d01c10921e9a35c4b.asciidoc | 2 +- .../41fd33a293a575bd71a1fac7bcc8b47c.asciidoc | 2 +- .../4207219a892339e8f3abe0df8723dd27.asciidoc | 2 +- .../421e68e2b9789f0e8c08760d9e685d1c.asciidoc | 2 +- .../424fbf082cd4affb84439abfc916b597.asciidoc | 2 +- .../425eaaf9c7e3b1e77a3474fbab4183b4.asciidoc | 2 +- .../430705509f8367aef92be413f702520b.asciidoc | 2 +- .../436d50b85fc8f0977d02059eec00719b.asciidoc | 2 +- .../43d9e314431336a6f084cea76dfd6489.asciidoc | 2 +- .../43e86fbaeed068dcc981214338559b5a.asciidoc | 2 +- .../43fe75fa9f3fca846598fdad58fd98cb.asciidoc | 2 +- .../44198781d164a15be633d4469485a544.asciidoc | 2 +- .../44231f7cdd5c3a21025861cdef31e355.asciidoc | 2 +- .../44385b61342e20ea05f254015b2b04d7.asciidoc | 2 +- .../4479e8c63a04fa22207a6a8803eadcad.asciidoc | 2 +- .../4498b9d3b0c77e1b9ef6664ff5963ce2.asciidoc | 2 +- .../44b8a236d7cfb31c43c6d066ae16d8cd.asciidoc | 2 +- .../44da736ce0e1587c1e7c45eee606ead7.asciidoc | 2 +- .../44db41b8465af951e366da97ade63bc1.asciidoc | 2 +- .../458b2228aed7464d915a5d73cb6b98f6.asciidoc | 2 +- .../45954b8aaedfed57012be8b6538b0a24.asciidoc | 48 ++++++ .../46064e81620162a23e75002a7eeb8b10.asciidoc | 2 +- .../46658f00edc4865dfe472a392374cd0f.asciidoc | 2 +- .../468f7ec42cdd8287cdea3ec1cea4a514.asciidoc | 2 +- .../46b1c1f6e0c86528be84c373eeb8d425.asciidoc | 2 +- .../480e531db799c4c909afd8e2a73a8d0b.asciidoc | 2 +- .../483d669ec0768bc4e275a568c6164704.asciidoc | 2 +- .../488f6df1df71972392b670ce557f7ff3.asciidoc | 2 +- ...48e142e6c69014e0509d4c9251749d77.asciidoc} | 5 +- .../4982c547be1ad9455ae836990aea92c5.asciidoc | 2 +- .../49b31e23f8b9667b6a7b2734d55fb6ed.asciidoc | 24 --- .../49c052a748c943180db78fee8e144239.asciidoc | 2 +- .../49d87c2eb7314ed34221c5fb4f21dfcc.asciidoc | 2 +- .../49e8773a34fcbf825de38426cff5509c.asciidoc | 2 +- .../49f4d2a461536d150e16b1e0a3148678.asciidoc | 2 +- .../4a1951844bd39f26961bfc965f3432b1.asciidoc | 2 +- .../4a4b8a406681584a91c0e614c1fa4344.asciidoc | 2 +- .../4ae494d1e62231e832fc0436b04e2014.asciidoc | 2 +- .../4b1044259a6d777d87529eae25675005.asciidoc | 2 +- .../4b5110a21676cc0e26e050a4b4552235.asciidoc | 2 +- .../4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc | 9 +- .../4bc4db44b8c74610b73f21a421099a13.asciidoc | 2 +- .../4be07b34db282044c88d5021c7ea08ee.asciidoc | 2 +- .../4bef98a2dac575a50ee0783c2269f1db.asciidoc | 2 +- .../4bfcb2861f1d572bd0d66acd66deab0b.asciidoc | 2 +- .../4c174e228b6b74497b73ef2be80de7ad.asciidoc | 2 +- .../4c3db8987d7b2d3d3df78ff1e71e7ede.asciidoc | 2 +- .../4c5f0d7af287618062bb627b44ccb23e.asciidoc | 2 +- .../4c712bd5637892a11f16b8975a0a98ed.asciidoc | 2 +- .../4c9350ed09b28f00e297ebe73c3b95a2.asciidoc | 2 +- .../4ca15672fc5ab1d80a127d086b6d2837.asciidoc | 2 +- .../4cb44556b8c699f43489b17b42ddd475.asciidoc | 2 +- .../4cdbd53f08df4bf66e2a47c0f1fcb3f8.asciidoc | 2 +- .../4cdcc3fde5cea165a3a7567962b9bd61.asciidoc | 2 +- .../4d21725453955582ff12b4a1104aa7b6.asciidoc | 2 +- .../4d2e6eb7fea407deeb7a859c267fda62.asciidoc | 2 +- .../4d7c0b52d3c0a084157428624c543c90.asciidoc | 2 +- .../4da0cb8693e9ceceee2ba3b558014bbf.asciidoc | 2 +- .../4dab4c5168047ba596af1beb0e55b845.asciidoc | 10 -- .../4e3414fc712b16311f9e433dd366f49d.asciidoc | 2 +- .../4e931cfac74e46e221cf4a9ab88a182d.asciidoc | 2 +- .../4ed946065faa92f9950f04e402676a97.asciidoc | 2 +- ...4edfb5934d14ad7655bd7e19a112b5c0.asciidoc} | 12 +- .../4ee31fd4ea6d18f32ec28b7fa433441d.asciidoc | 2 +- .../4f08d9e21d9f199acc77abfb83287878.asciidoc | 2 +- .../4f140d8922efdf3420e41b1cb669a289.asciidoc | 2 +- .../4f666d710758578e2582850dac3ad144.asciidoc | 2 +- .../4f6694ef147a73b1163bde3c13779d26.asciidoc | 2 +- .../4f8a4ad49e2bca6784c88ede18a1a709.asciidoc | 2 +- .../4fa9ee04188cbf0b38cfc28f6a56527d.asciidoc | 2 +- .../4fb0629146ca78b85e823edd405497bb.asciidoc | 2 +- .../5093bfd281dbe41bd0dba8ff979e6e47.asciidoc | 2 +- .../50a9623c153cabe64101efb633e10e6c.asciidoc | 2 +- .../50b5c0332949d2154c72b629b5fa6222.asciidoc | 2 +- .../50d9c0508ddb0fc5ba5a893eec219dd8.asciidoc | 2 +- .../50dc35d3d8705bd62aed20a15209476c.asciidoc | 2 +- .../511e5bb8ab881171b7e8629095e30b85.asciidoc | 2 +- .../5174c3c731fc1703e5b43ae2bae7a80e.asciidoc | 2 +- .../5195a88194f7a139c635a84398d76205.asciidoc | 2 +- .../519e46350316a33162740e5d7968aa2c.asciidoc | 27 +++ .../51b44224feee6e2e5974824334474c77.asciidoc | 2 +- .../51f1a0930362594b231a5bcc17673768.asciidoc | 2 +- .../5275842787967b6db876025f4a1c6942.asciidoc | 2 +- .../52a2d119addb15366a935115518335fd.asciidoc | 2 +- .../52b71aa4ae6563abae78cd20ff06d1e9.asciidoc | 2 +- .../52c2b4c180388f5ae044588ba70b70f0.asciidoc | 2 +- .../52f4c5eb08d39f98e2e2f5527ece9731.asciidoc | 2 +- .../5305bc07c1bf90bab3e8db1de3e31b26.asciidoc | 2 +- .../532f371934b61fb4992d37bedcc085de.asciidoc | 2 +- .../53c6256295111524d5ff2885bdcb99a9.asciidoc | 2 +- .../53d9d2ec9cb8d211772d764e76fe6890.asciidoc | 17 ++ .../5457c94f0039c6b95c7f9f305d0c6b58.asciidoc | 2 +- .../55096381f811388fafd8e244dd2402c8.asciidoc | 2 +- .../558b3f9b987771e9f9f35e51a0d7e062.asciidoc | 2 +- .../55e8ddf643726dec51531ada0bec7143.asciidoc | 2 +- .../55f4a15b84b724b9fbf2efd29a4da120.asciidoc | 2 +- .../5619103306878d58a058bce87c5bd82b.asciidoc | 2 +- .../563dfbf421422c837ee6929ae2ede876.asciidoc | 2 +- .../565386eee0951865a684e41fab53b40c.asciidoc | 2 +- .../56563f91d9f0b74e9e4aae9cb221845b.asciidoc | 2 +- .../568979150ce18739f8d3ea859355aaa3.asciidoc | 2 +- .../56a1aa4f7fa62f2289e20607e3039bf3.asciidoc | 2 +- .../56b6b50b174a935d368301ebd717231d.asciidoc | 2 +- .../56db76c987106a870357854d3068ad98.asciidoc | 2 +- .../578808065fee8691355b8f25c35782cd.asciidoc | 2 +- .../57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc | 7 +- .../57e0bbab98f17d5b564d1ea146a55fe4.asciidoc | 2 +- .../582c4b05401dbc190b19411282d85310.asciidoc | 2 +- .../5836b09198feb1269ed12839b416123d.asciidoc | 16 ++ .../5837d5f50665ac0a26181d3aaeb3f204.asciidoc | 2 +- .../585a34ad79aee16678b37da785933ac8.asciidoc | 2 +- .../58ca855be30049f8f0879e532db51ee2.asciidoc | 2 +- .../58f72be60c25752d7899a35fc60fe6eb.asciidoc | 2 +- .../591c7fb7451069829a14bba593136f1f.asciidoc | 2 +- .../5987afb2c17c73fe3d860937565ef115.asciidoc | 2 +- .../599f693cc7d30b1153f5eeecec8eb23a.asciidoc | 2 +- .../59b8b9555f4aa30bc4613f819e9fc8f0.asciidoc | 2 +- .../59d736a4d064ed2013c7ead8e32e0998.asciidoc | 2 +- .../59f0ad2a6f97200e98e8eb079cdd8334.asciidoc | 2 +- .../5a70db31f587b7ffed5e9bc1445430cb.asciidoc | 22 --- .../5ad365ed9e1a3c26093a0f09666c133a.asciidoc | 2 +- .../5afbd9caed88c32f8a2968c07054f096.asciidoc | 2 +- .../5b0cc9e186a8f765a11141809b8b17b7.asciidoc | 2 +- .../5b191f2dbfa46c774cc9b9b9e8d1d831.asciidoc | 2 +- .../5b281956e35a26e734c482b42b356c0d.asciidoc | 2 +- .../5b2a13366bd4e1ab4b25d04d360570dc.asciidoc | 2 +- .../5b7d6f1db88ca6f42c48fa3dbb4341e8.asciidoc | 2 +- .../5bba213a7f543190139d1a69ab2ed076.asciidoc | 11 +- .../5bbccf103107e505c17ae59863753efd.asciidoc | 2 +- .../5c7ece1f30267adabdb832424871900a.asciidoc | 2 +- .../5ccfd9f4698dcd7cdfbc6bad60081aab.asciidoc | 2 +- .../5ceb734e3affe00e2cdc29af748d95bf.asciidoc | 2 +- .../5cf12cc4f98d98dc79bead7e6556679c.asciidoc | 2 +- .../5d03bb385904d20c5323885706738459.asciidoc | 2 +- .../5deeed427f35cbaee4b8ddc45002a9d7.asciidoc | 2 +- .../5dfb23f6e36ef484f1d3271bae76a8d1.asciidoc | 2 +- .../5e021307d331a4483a5aa2198168451b.asciidoc | 2 +- .../5e099493f135ff7bd614e935c4f2bf5a.asciidoc | 2 +- .../5e124875d97c27362ae858160ae1c6d5.asciidoc | 2 +- .../5e415c490a46358643ee2aab554b4876.asciidoc | 2 +- .../5f16358ebb5d14b86f57612d5f92d923.asciidoc | 17 ++ .../5f1ed9cfdc149763b444acfbe10b0e16.asciidoc | 2 +- .../5f8acd1e367b048b5542dbc6079bcc88.asciidoc | 2 +- .../5faa121e00a0582160b2adb2b72fed67.asciidoc | 2 +- .../5fca6671bc8eaddc44ac488d1c3c6909.asciidoc | 2 +- .../5fde0d78e9b2cc0519f8a63848ed344e.asciidoc | 2 +- .../601ad3b0ceccb3fcd282e5ec36748954.asciidoc | 2 +- .../602e04051c092cf77de2f75a563661b8.asciidoc | 2 +- .../604da59fe41160efa10a846a9dacc07a.asciidoc | 2 +- .../6061aadb3b870791278212d1e8f52b39.asciidoc | 2 +- .../6097ae69c64454a92a89ef01b994e9f9.asciidoc | 2 +- .../60d3f9a99cc91b43aaa7524a9a74dba0.asciidoc | 2 +- .../60f889fbed5df3185444f7015b48ed76.asciidoc | 2 +- .../612c2e975f833de9815651135735eae5.asciidoc | 2 +- .../618c9d42284c067891fb57034a4fd834.asciidoc | 2 +- .../61d6b9503459914c436930c3ae87d454.asciidoc | 2 +- .../6244204213f60edf2f23295f9059f2c9.asciidoc | 2 +- .../62ccee6ad356428c2d625742f961ceb7.asciidoc | 2 +- .../62d3c8fccb11471bdc12555c1a7777f2.asciidoc | 2 +- .../62eafc5b3ab75cc67314d5a8567d6077.asciidoc | 2 +- .../63893e7e9479a9b60db71dcddcc79aaf.asciidoc | 2 +- .../63a53fcb0717ae9033a679cbfc932851.asciidoc | 2 +- .../63bf3480627a89b4b4ede4150e1d6bc0.asciidoc | 2 +- .../63cc960215ae83b359c12df3c0993bfa.asciidoc | 2 +- .../63e20883732ec30b5400046be2efb0f1.asciidoc | 2 +- .../63ecdab34940af053acc409164914c32.asciidoc | 2 +- .../640621cea39cdeeb76fbc95bff31a18d.asciidoc | 2 +- .../640dbeecb736bd25f6f2b392b76a7531.asciidoc | 2 +- .../6414b9276ba1c63898c3ff5cbe03c54e.asciidoc | 2 +- .../642161d70dacf7d153767d37d3726838.asciidoc | 2 +- .../643e19c3b6ac1134554dd890e2249c2b.asciidoc | 2 +- .../64622409407316d2d47094e692d9b516.asciidoc | 2 +- .../646d71869f1a18c5bede7759559bfc47.asciidoc | 2 +- .../64a79861225553799b26e118d7851dcc.asciidoc | 2 +- .../64c572abc23394a77b6cca0b5368ee1d.asciidoc | 2 +- .../64d24f4b2a57dba48092dafe3eb68ad1.asciidoc | 2 +- .../654882f545eca8d7047695f867c63072.asciidoc | 2 +- .../65b6185356f16f2f0d84bc5aee2ed0fc.asciidoc | 2 +- .../6606d46685d10377b996b5f20f1229b5.asciidoc | 2 +- .../66539dc6011dd2e0282cf81db1f3df27.asciidoc | 2 +- .../6689aa213884196b47a6f482d4993749.asciidoc | 2 +- .../674bb755111c6fbaa4c5ac759395c122.asciidoc | 2 +- .../67aac8882fa476db8a5878b67ea08eb3.asciidoc | 2 +- .../67bab07fda27ef77e3bc948211051a33.asciidoc | 2 +- .../67ffa135c50c43d6788636c88078c7d1.asciidoc | 2 +- .../681d24c2633f598fc43d6afff8996dbb.asciidoc | 138 ++++++++++++++++ .../68738b4fd0dda177022be45be95b4c84.asciidoc | 2 +- .../68b64313bf89ec3f2c645da61999dbb4.asciidoc | 2 +- ...68d7f7d4d268ee98caead5aef19933d6.asciidoc} | 2 +- .../692606cc6d6462becc321d92961a3bac.asciidoc | 2 +- .../69541f0bb81ab3797926bb2a00607cda.asciidoc | 2 +- .../69a08e7bdcc616f3bdcb8ae842d9e30e.asciidoc | 2 +- .../69c07cfdf8054c301cd6186c5d71aa02.asciidoc | 2 +- .../69f8b0f2a9ba47e11f363d788cee9d6d.asciidoc | 2 +- .../6a350a17701e8c8158407191f2718b66.asciidoc | 2 +- .../6a3a86ff58e5f20950d429cf2832c229.asciidoc | 2 +- .../6aca241c0361d26f134712821e2d09a9.asciidoc | 2 +- .../6b0288acb739c4667d41339e5100c327.asciidoc | 2 +- .../6b1336ff477f91d4a0db0b06db546ff0.asciidoc | 2 +- .../6b3dcde0656d3a96dbcfed1ec814e10a.asciidoc | 2 +- .../6b67c6121efb86ee100d40c2646f77b5.asciidoc | 15 ++ .../6b6f5e0ab4ef523fc9a3a4a655848f64.asciidoc | 2 +- .../6b6fd0a5942dfb9762ad2790cf421a80.asciidoc | 2 +- .../6b8c5c8145c287c4fc535fa57ccf95a7.asciidoc | 2 +- .../6ba332596f5eb29660c90ab2d480e7dc.asciidoc | 2 +- .../6c70b022a8a74b887fe46e514feb38c0.asciidoc | 2 +- .../6c72460570307f23478100db04a84c8e.asciidoc | 2 +- .../6c8bf6d4d68b7756f953be4c07655337.asciidoc | 2 +- .../6c927313867647e0ef3cd3a37cb410cc.asciidoc | 2 +- .../6ce6cac9df216c52371c2e77e6e07ba1.asciidoc | 2 +- .../6dd2a107bc64fd6f058fb17c21640649.asciidoc | 2 +- .../6dd4c02fe3d6b800648a04d3e2d29fc1.asciidoc | 2 +- .../6ddd4e657efbf45def430a6419825796.asciidoc | 2 +- .../6e6b78e6b689a5d6aa637271b6d084e2.asciidoc | 2 +- .../6f3b723bf6179b96c3413597ed7f49e1.asciidoc | 7 +- .../6f48ab7cbb8a4a46d0e9272c07166eaf.asciidoc | 2 +- .../6f4cbebfd6d2cee54aa3e7a86a755ef8.asciidoc | 2 +- .../6f855bc92b4cc6e6a63f95bce1cb4441.asciidoc | 2 +- ...6fa02c2ad485bbe91f44b321158250f3.asciidoc} | 9 + .../6fa570ae7039171e2ab722344ec1063f.asciidoc | 2 +- .../6fc778e9a888b16b937c5c2a7a1ec140.asciidoc | 2 +- .../6fe6c095c6995e0f2214f5f3bc85d74e.asciidoc | 2 +- .../701f1fffc65e9e51c96aa60261e2eae3.asciidoc | 2 +- .../708e7ec681be41791f232817a07cda82.asciidoc | 2 +- .../70c736ecb3746dbe839af0e468712805.asciidoc | 2 +- .../70cc66bf4054ebf0ad4955cb99d9ab80.asciidoc | 2 +- .../7106e6317e6368b9863cf64df9c6f0c9.asciidoc | 2 +- .../71c629c44bf3c542a0daacbfc253c4b0.asciidoc | 2 +- .../71fa652ddea811eb3c8bf8c5db21e549.asciidoc | 2 +- .../72bae0252b74ff6fd9f0702ff008d84a.asciidoc | 2 +- .../72beebe779a258c225dee7b023e60c52.asciidoc | 2 +- .../730045fae3743c39b612813a42c330c3.asciidoc | 26 +++ .../73646c12ad33a813ab2280f1dc83500e.asciidoc | 2 +- .../73b07b24ab2c4cd304a57f9cbda8b863.asciidoc | 2 +- .../73d1a6c5ef90b7e35d43a0bfdc1e158d.asciidoc | 2 +- .../73ebc89cb32adb389ae16bb088d7c7e6.asciidoc | 2 +- .../73fa0d6d03cd98ea538fff9e89d99eed.asciidoc | 2 +- .../741180473ba526219578ad0422f4fe81.asciidoc | 2 +- .../7429b16221fe741fd31b0584786dd0b0.asciidoc | 2 +- .../7471e97aaaf21c3a200abdd89f15c3cc.asciidoc | 2 +- .../7478ff69113fb53f41ea07cdf911fa67.asciidoc | 33 ++++ .../74a80c28737a0648db0dfe7f049d12f2.asciidoc | 2 +- .../74b229a6e020113e5749099451979c89.asciidoc | 26 --- .../74da377bccad43da2b0e276c086d26ba.asciidoc | 2 +- .../750ac969f9a05567f5cdf4f93d6244b6.asciidoc | 2 +- .../75957a7d1b67e3d47899c5f18b32cb61.asciidoc | 2 +- .../75aba7b1d3a22dce62f26b8b1e6bee58.asciidoc | 2 +- .../75e6d66e94e61bd8a555beaaee255c36.asciidoc | 2 +- .../76bc87c2592864152768687c2963d1d1.asciidoc | 2 +- .../76c167d8ab305cb43b594f140c902dfe.asciidoc | 2 +- .../76dbdd0b2bd48c3c6b1a8d81e23bafd6.asciidoc | 2 +- .../76e02434835630cb830724beb92df354.asciidoc | 47 ++++++ .../7709a48020a6cefbbe547fb944541cdb.asciidoc | 2 +- .../77447e2966708e92f5e219d43ac3f00d.asciidoc | 2 +- .../774bfde8793dc4927f7cad2dd91c5b5f.asciidoc | 2 +- .../77518e8c6198acfe77c0934fd2fe65cb.asciidoc | 7 +- .../7752b677825523bfb0c38ad9325a6d47.asciidoc | 2 +- .../776b553df0e507c96dbdbaedecaca0cc.asciidoc | 2 +- .../7781b13b0ffff6026d10c4e3ab4a3a51.asciidoc | 2 +- .../77b90f6787195767b6da60d8532714b4.asciidoc | 2 +- .../77c099c97ea6911e2dd6e996da7dcca0.asciidoc | 2 +- .../77c50f982906718ecc59aa708aed728f.asciidoc | 2 +- .../77ca1a3193f75651e0bf9e8fe5227a04.asciidoc | 2 +- ...77cebba946fe648873a1e7375c13df41.asciidoc} | 5 +- .../77e3dcd87d2b2c8e6ec842462b02df1f.asciidoc | 2 +- .../78043831fd32004a82930c8ac8a1d809.asciidoc | 48 ++++++ .../78176cd6f570e1534bb40b19e6e900b6.asciidoc | 2 +- .../78c4035e4fbf6851140660f6ed2a1fa5.asciidoc | 2 +- .../78c96113ae4ed0054e581b17542528a7.asciidoc | 2 +- .../790684b45bef2bb848ea932f0fd0cfbd.asciidoc | 35 ++++ ...79d206a528be704050a437adce2496dd.asciidoc} | 15 +- .../79f33e05b203eb46eef7958fbc95ef77.asciidoc | 2 +- .../7a0c633a67244e9703344d036e584d95.asciidoc | 2 +- .../7a23a385a63c87cab58fd494870450fd.asciidoc | 2 +- ...7a2fdfd7b0553d63440af7598f9ad867.asciidoc} | 4 +- .../7a3a7fbd81e5050b42e8c1eca26c7c1d.asciidoc | 2 +- .../7a8de5606f283f4ef171b015eef6befa.asciidoc | 2 +- .../7ae434b3667c589a8e70fe560f4ee3f9.asciidoc | 2 +- .../7b5c231526846f2f7b98d78f3656ae6a.asciidoc | 2 +- .../7b864d61767ab283cfd5f9b9ba784b1f.asciidoc | 2 +- .../7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc | 11 ++ ...7bdc283b96c7a965fae23013647b8578.asciidoc} | 10 +- .../7cd23457e220c8b64c5b0041d2acc27a.asciidoc | 2 +- .../7cd3d8388c51a9f6ee3f730cdaddbb89.asciidoc | 2 +- .../7d880157a95f64ad339225d4af71c2de.asciidoc | 2 +- .../7dabae9b37d2cbd724f2a069be9e753b.asciidoc | 2 +- .../7daff6b7e668ab8a762b8ab5dff7a167.asciidoc | 2 +- .../7dd0d9cc6c5982a2c003d301e90feeba.asciidoc | 37 +++++ .../7de7e647c1c9cbe0a1df0d104fc0a947.asciidoc | 2 +- .../7df191cc7f814e410a4ac7261065e6ef.asciidoc | 2 +- .../7e126e2751311db60cfcbb22c9c41caa.asciidoc | 2 +- .../7e484b8b41f9dbc2bcf1f340db197c1d.asciidoc | 2 +- .../7e4cb3de3e3c75646b60f9f81ddc59cc.asciidoc | 2 +- .../7e5faa551f2c95ffd627da352563d450.asciidoc | 2 +- .../7e74d1a54e816e8f40cfdaa01b070788.asciidoc | 2 +- .../7e77509ab646276ff78f58bb38bec8dd.asciidoc | 2 +- .../7ebeb6cf26be5b5ecdfd408bd0fc3215.asciidoc | 2 +- .../7ebfb30b3ece855c1b783d9210939469.asciidoc | 2 +- .../7f2d511cb64743c006225e5933a14bb4.asciidoc | 2 +- .../7f37031fb40b68a61255b7c71d7eed0b.asciidoc | 2 +- .../7f56755fb6c42f7e6203339a6d0cb6e6.asciidoc | 2 +- .../7fb921376cbf66bf9f381bcdd62030ba.asciidoc | 2 +- .../7fd5883564d183603e60b37d286ac7e2.asciidoc | 2 +- .../800861c15bb33ca01a46fb97dde7537a.asciidoc | 2 +- ...80135e8c644e34cc70ce8a4e7915d1a2.asciidoc} | 4 +- .../8051766cadded0892290bc2cc06e145c.asciidoc | 2 +- .../808f4db1e2361be77dd6816c1f818139.asciidoc | 2 +- .../80dd7f5882c59b9c1c90e8351937441f.asciidoc | 7 +- .../80edd2124a822d9f9bf22ecc49d2c2e9.asciidoc | 2 +- .../8141b60ad245ece2ff5e8d0817400ee5.asciidoc | 2 +- .../8141cdaddbe7d794f09f9ee84e46194c.asciidoc | 2 +- .../81612c2537386e031b7eb604f6756a71.asciidoc | 2 +- .../81aad155ff23b1b396833b1182c9d46b.asciidoc | 2 +- .../821422f8a03dc98d024a15fc737fe9eb.asciidoc | 2 +- .../824fded1f9db28906ae7e85ae8de9bd0.asciidoc | 2 +- .../827b7e9308ea288f18aea00a5accc38e.asciidoc | 2 +- .../82844ef45e11c0eece100d3109db3182.asciidoc | 2 +- .../828f0045747fde4888a947bb99e190e3.asciidoc | 2 +- .../82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc | 35 ++++ .../82d6de3081de7b0664f44adf2942675a.asciidoc | 2 +- .../8330b2ea6317769e52d0647ba434b354.asciidoc | 2 +- .../8357aa6099089940589ae3e97e7bcffa.asciidoc | 2 +- .../838a4eabebba4c06100fb37dc30c7722.asciidoc | 2 +- .../839710129a165cf93c6e329abedf9089.asciidoc | 2 +- .../839a4b2930856790e34cc9dfeb983284.asciidoc | 2 +- .../83b94f9e7b3a9abca8e165ea56927714.asciidoc | 2 +- .../841d8b766902c8e3ae85c228a31383ac.asciidoc | 2 +- .../84237aa9da49ab4b4c4e2b21d2548df2.asciidoc | 2 +- .../84c61160ca815e29e9973ba1380219dd.asciidoc | 2 +- .../84c69fb07050f0e89720007a6507a221.asciidoc | 2 +- .../853fc710cea79fb4e1a85fb6d149f9c5.asciidoc | 2 +- .../8582e918a6275472d2eba2e95f1dbe77.asciidoc | 2 +- .../85e2719d9fd6d2c2d47d28d39f2e3f7e.asciidoc | 2 +- .../85f2839beeb71edb66988e5c82188be0.asciidoc | 2 +- .../85f9fc6f98e8573efed9b034e853d5ae.asciidoc | 2 +- .../8619bd17bbfe33490b1f277007f654db.asciidoc | 2 +- .../861f5f61409dc87f3671293b87839ff7.asciidoc | 2 +- .../86280dcb49aa89083be4b2644daf1b7c.asciidoc | 2 +- .../8684589e31d96ab229e8c4feb4d704bb.asciidoc | 2 +- .../8699d35269a47ba867fa8cc766287413.asciidoc | 2 +- .../8739fad1fb2323950b673acf0c9f2ff5.asciidoc | 2 +- .../87416e6a1ca2da324dbed6deb05303eb.asciidoc | 2 +- .../87469f8b7e9b965408479d276c3ce8aa.asciidoc | 2 +- .../87733deeea4b441b595d19a0f97346f0.asciidoc | 2 +- .../87b0b496747ad6c1e4ab4b462128fa1c.asciidoc | 2 +- .../87c3e9963400a3e4b296ef8d1c86fae3.asciidoc | 2 +- .../87c42ef733a50954e4d757fc0a08decc.asciidoc | 2 +- .../88554b79dba8fd79991855a692b69ff9.asciidoc | 2 +- .../88cecae3f0363fc186d955dd8616b5d4.asciidoc | 2 +- .../88cf60d3310a56d8ae12704abc05b565.asciidoc | 2 +- .../89b72dd7f747f6297c2b089e8bc807be.asciidoc | 2 +- .../89d2a3748dc14c6d5d4c6f94b9b03938.asciidoc | 2 +- .../89f8eac24f3ec6a7668d580aaf0eeefa.asciidoc | 2 +- ...8a0b5f759de3f27f0801c1176e616117.asciidoc} | 5 +- .../8a12cd824404d74f098d854716a26899.asciidoc | 2 +- .../8a1f6cffa653800282c0ae160ee375bc.asciidoc | 2 +- .../8aa17bd25a3f2d634e5253b4b72fec4c.asciidoc | 2 +- .../8b144b3eb20872595fd7cbc6c245c7c8.asciidoc | 2 +- .../8b301122cbf42be6eafeda714a36559e.asciidoc | 2 +- .../8b3a94495127efd9d56b2cd7f3eecdca.asciidoc | 2 +- .../8b8b6aac2111b2d8b93758ac737e6543.asciidoc | 2 +- .../8bf1e7a6d529547906ba8b1d6501fa0c.asciidoc | 2 +- .../8c5d48252cd6d1ee26a2bb817f89c78e.asciidoc | 2 +- .../8c639d3eef5c2de29e12bd9c6a42d3d4.asciidoc | 39 +++++ .../8c6f3bb8abae9ff1d21e776f16ad1c86.asciidoc | 2 +- .../8c9081dc738d1290fd76071b283fcaec.asciidoc | 2 +- .../8cd00a3aba7c3c158277bc032aac2830.asciidoc | 2 +- .../8d4dda5d988d568f4f4210a6387e026f.asciidoc | 2 +- .../8d9b04f2a97f4229dec9e620126de049.asciidoc | 2 +- .../8de6fed6ba2b94ce6a12ce076be2b4d7.asciidoc | 2 +- .../8e208098a0156c4c92afe0a06960b230.asciidoc | 2 +- .../8e2bbef535fef688d397e60e09aefa7f.asciidoc | 2 +- .../8e43bb5b7946143e69d397bb81d87df0.asciidoc | 2 +- .../8e89fee0be6a436c4e3d7c152659c47e.asciidoc | 2 +- .../8e9e7dc5fad2b2b8e74ab4dc225d9c53.asciidoc | 2 +- .../8f4a7f68f2ca3698abdf20026a2d8c5f.asciidoc | 2 +- .../90083d93e46fad2524755b8d4d1306fc.asciidoc | 2 +- .../9054187cbab5c9e1c4ca2a4dba6a5db0.asciidoc | 2 +- .../90631797c7fbda43902abf2cc0ea8304.asciidoc | 2 +- .../90c087560ea6c0b7405f710971c86ef0.asciidoc | 2 +- .../90e06d5ec5e454832d8fbd2e73ec2248.asciidoc | 2 +- .../9116ee8a5b00cc877291ed5559563f24.asciidoc | 2 +- .../9129dec88d35571b3166c6677297f03b.asciidoc | 2 +- .../9138550002cb26ab64918cce427963b8.asciidoc | 2 +- .../9143be4f137574271953a7a8107e175b.asciidoc | 2 +- .../9169d19a80175ec94f80865d0f9bef4c.asciidoc | 2 +- .../91c01fcad9bf341d039a15dfc593dcd7.asciidoc | 2 +- .../91c925fc71abe0ddfe52457e9130363b.asciidoc | 2 +- .../91e106a2affbc8df32cd940684a779ed.asciidoc | 18 ++ .../9200ed8d5f798a158def4c526e41269e.asciidoc | 2 +- .../9225841fdcddaf83ebdb90c2b0399e20.asciidoc | 2 +- .../9250ac57ec81d5192e8ad4c462438489.asciidoc | 39 +++++ .../926c0134aeaad53bd0f3bdad9c430217.asciidoc | 2 +- .../9270964d35d172ea5b193c5fc7a473dd.asciidoc | 2 +- .../9298aaf8232a819e79b3bf8471245e98.asciidoc | 2 +- .../92fe53019958ba466d1272da0834cf53.asciidoc | 2 +- .../931817b168e055ecf738785c721125dd.asciidoc | 31 ++++ .../934aa38c3adcc4cf74ea40cd8736876c.asciidoc | 2 +- .../935566d5426d44ade486a49ec5289741.asciidoc | 2 +- .../9382f022086c692ba05efb0acae65946.asciidoc | 2 +- .../93fb59d3204f37af952198b331fb6bb7.asciidoc | 2 +- .../940e8c2c7ff92d71f489bdb7183c1ce6.asciidoc | 2 +- .../9410af79177dd1df9b7b16229a581e18.asciidoc | 2 +- .../941c8d05486200e835d97642e4ee05d5.asciidoc | 2 +- .../94246f45025ed394cd6415ed8d7a0588.asciidoc | 2 +- .../944806221eb89f5af2298ccdf2902277.asciidoc | 2 +- .../946522c26d02bebf5c527ba28e55c724.asciidoc | 2 +- .../948418e0ef1b7e7cfee2f11be715d7d2.asciidoc | 111 +++++++++++++ .../950f1230536422567f99a205ff4165ec.asciidoc | 2 +- .../95414139c7b1203e3c2d99a354415801.asciidoc | 2 +- .../95c1b376652533c352bbf793c74d1b08.asciidoc | 2 +- .../9608820dbeac261ba53fb89bb9400560.asciidoc | 2 +- .../9684e5fa8c22a07a372feb6fc1f5f7c0.asciidoc | 2 +- .../968fb5b92aa65af09544f7c002b0953e.asciidoc | 2 +- ...96e88611f99e6834bd64b58dc8a282c1.asciidoc} | 6 +- .../971fd23adb81bb5842c7750e0379336a.asciidoc | 2 +- .../973a3ff47fc4ce036ecd9bd363fef9f7.asciidoc | 2 +- .../975b4b92464d52068516aa2f0f955cc1.asciidoc | 2 +- .../97a3216af3d4b4d805d467d9c715cb3e.asciidoc | 2 +- .../97babc8d19ef0866774576716eb6d19e.asciidoc | 2 +- .../97c6c07f46f4177f0565a04bc50924a3.asciidoc | 41 +++++ .../97ea5ab17213cb1faaf6f3ea13607098.asciidoc | 2 +- .../97f5df84efec655f479fad78bc392d4d.asciidoc | 2 +- .../986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc | 2 +- .../98855f4bda8726d5d123aeebf7869e47.asciidoc | 2 +- .../99803d7b111b862c0c82e9908e549b16.asciidoc | 2 +- .../99c1cfe60f3ccf5bf3abd24c31ed9034.asciidoc | 2 +- .../99fb82d49ac477e6a9dfdd71f9465374.asciidoc | 11 ++ .../9a05cc10eea1251e23b82a4549913536.asciidoc | 2 +- .../9a09d33ec11e20b6081cae882282ca60.asciidoc | 2 +- .../9a203aae3e1412d919546276fb52a5ca.asciidoc | 2 +- .../9a49b7572d571e00e20dbebdd30f9368.asciidoc | 2 +- .../9a4d5e41c52c20635d1fd9c6e13f6c7a.asciidoc | 2 +- .../9a743b6575c6fe5acdf46024a7fda8a1.asciidoc | 2 +- .../9ab351893dae65ec97fd8cb6832950fb.asciidoc | 2 +- .../9ad0864bcd665b63551e944653d32423.asciidoc | 2 +- .../9ae268058c0ea32ef8926568e011c728.asciidoc | 2 +- .../9aedc45f83e022732789e8d796f5a43c.asciidoc | 2 +- .../9af44592fb2e78fb17ad3e834bbef7a7.asciidoc | 2 +- .../9afa0844883b7471883aa378a8dd10b4.asciidoc | 9 +- .../9b0f34d122a4b348dc86df7410d6ebb6.asciidoc | 2 +- .../9b30a69fec54cf01f7af1b04a6e15239.asciidoc | 2 +- .../9b345e0bfd45f3a37194585ec9193478.asciidoc | 2 +- .../9bae72e974bdeb56007d9104e73eff92.asciidoc | 2 +- .../9bb24fe09e3d1c73a71d00b994ba8cfb.asciidoc | 2 +- .../9c01db07c9ac395b6370e3b33965c21f.asciidoc | 7 +- .../9c021836acf7c0370e289f611325868d.asciidoc | 2 +- .../9c7c8051592b6af3adb5d7c490849068.asciidoc | 2 +- .../9cbb097e5498a9fde39e3b1d3b62a4d2.asciidoc | 2 +- .../9cc64ab2f60f995f5dbfaca67aa6dd41.asciidoc | 2 +- .../9cc952d4a03264b700136cbc45abc8c6.asciidoc | 38 +++++ .../9cd37d0ccbc66ad47ddb626564b27cc8.asciidoc | 2 +- .../9cf6c7012a4f2bb562bc256aa28c3409.asciidoc | 2 +- .../9d66cb59711f24e6b4ff85608c9b5a1b.asciidoc | 2 +- .../9d79645ab3a9da3f63c54a1516214a5a.asciidoc | 2 +- .../9e563b8d5a7845f644db8d5bbf453eb6.asciidoc | 2 +- .../9e9717d9108ae1425bfacf71c7c44539.asciidoc | 2 +- .../9f99be2d58c48a6bf8e892aa24604197.asciidoc | 2 +- .../9fda516a5dc60ba477b970eaad4429db.asciidoc | 2 +- .../9feff356f302ea4915347ab71cc4887a.asciidoc | 2 +- .../9ff9b2a73419a6c82f17a358b4991499.asciidoc | 2 +- .../a00311843b5f8f3e9f7d511334a828b1.asciidoc | 2 +- .../a0f4e902d18460337684d74ea932fbe9.asciidoc | 2 +- .../a159e1ce0cba7a35ce44db9bebad22f3.asciidoc | 2 +- .../a162eb50853331c80596f5994e9d1c38.asciidoc | 6 +- .../a1d0603b24a5b048f0959975d8057534.asciidoc | 2 +- .../a1dda7e7c01be96a4acf7b725d70385f.asciidoc | 2 +- .../a1f70bc71b763b58206814c40a7440e7.asciidoc | 2 +- .../a2b2ce031120dac49b5120b26eea8758.asciidoc | 2 +- .../a2c3e284354e8d49cf51bb8dd5ef3613.asciidoc | 2 +- .../a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc | 2 +- .../a3a2856ac2338a624a1fa5f31aec4db4.asciidoc | 2 +- .../a3a64d568fe93a22b042a8b31b9905b0.asciidoc | 2 +- .../a3ce0cfe2176f3d8a36959a5916995f0.asciidoc | 2 +- .../a3cfd350c73a104b99a998c6be931408.asciidoc | 2 +- .../a3d943ac9d45b4eff4aa0c679b4eceb3.asciidoc | 2 +- .../a3e79d6c626a490341c5b731acbb4a5d.asciidoc | 2 +- .../a45605347d6438e7aecdf3b37198616d.asciidoc | 2 +- .../a4a3c3cd09efa75168dab90105afb2e9.asciidoc | 2 +- .../a520168c1c8b454a8f102d6a13027c73.asciidoc | 2 +- .../a547bb926c25f670078b98fbe67de3cc.asciidoc | 2 +- .../a5dfcfd1cfb3558e7912456669c92eee.asciidoc | 2 +- .../a5e2b3588258430f2e595abda98e3943.asciidoc | 2 +- .../a5e793d82a4455cf4105dac82a156617.asciidoc | 2 +- .../a5f9eb40087921e67d820775acf71522.asciidoc | 2 +- .../a6204edaa0bcf7b82a89ab4f6bda0914.asciidoc | 2 +- .../a692b4c0ca7825c467880b346841f5a5.asciidoc | 2 +- .../a69b1ce5cc9528fb3639185eaf241ae3.asciidoc | 2 +- .../a6bb306ca250cf651f19cae808b97012.asciidoc | 2 +- .../a6be6c1cb4a556866fdccb0dee2f1dea.asciidoc | 2 +- .../a6fdd0100cd362df54af6c95d1055c96.asciidoc | 2 +- .../a72613de3774571ba24def4b495161b5.asciidoc | 2 +- .../a75765e3fb130421dde6c3c2f12e8acb.asciidoc | 2 +- .../a78dfb844d385405d4b0fb0e09b4a5a4.asciidoc | 2 +- .../a7cf31f4b907e4c00132aca75f55790c.asciidoc | 2 +- .../a811b82ba4632bdd9065829085188bc9.asciidoc | 2 +- .../a861a89f52008610e813b9f073951c58.asciidoc | 2 +- .../a960b43e720b4934edb74ab4b085ca77.asciidoc | 2 +- .../a985e6b7b2ead9c3f30a9bc97d8b598e.asciidoc | 2 +- .../a98692a565904ec0783884d81a7b71fc.asciidoc | 2 +- .../a9c08023354aa9b9023807962df71d13.asciidoc | 2 +- .../a9dd9595e96c307b8c798beaeb571521.asciidoc | 2 +- .../a9f14efc26fdd3c37a71f06c310163d9.asciidoc | 27 +++ .../a9fe70387d9c96a07830e1859c57efbb.asciidoc | 2 +- .../aa6282d4bc92c753c4bd7a5b166abece.asciidoc | 2 +- .../aa676d54a59dee87ecd28bcc1edce59b.asciidoc | 2 +- .../aa7cf5df36b867aee5e3314ac4b4fa68.asciidoc | 2 +- .../aa7f62279b487989440d423c1ed4a1c0.asciidoc | 2 +- .../aa814309ad5f1630886ba75255b444f5.asciidoc | 2 +- .../aab3de5a8a3fefbe012fc2ed50dfe4d6.asciidoc | 2 +- .../aaba346e0becdf12db13658296e0b8a1.asciidoc | 2 +- .../ab8b4537fad80107bc88f633d4039a52.asciidoc | 2 +- .../ab8de34fcfc0277901cb39618ecfc9d5.asciidoc | 2 +- .../abd4fc3ce7784413a56fe2dcfe2809b5.asciidoc | 2 +- .../abdbc81e799e28c833556b1c29f03ba6.asciidoc | 2 +- .../ac22cc2b0f4ad659055feed2852a2d59.asciidoc | 39 +++++ .../ac497917ef707538198a8458ae3d5c6b.asciidoc | 2 +- .../ac85e05c0bf2fd5099fbcb9c492f447e.asciidoc | 2 +- .../acb850c08f51226eadb75be09e336076.asciidoc | 2 +- .../ad2b8aed84c67cdc295917b47a12d3dc.asciidoc | 2 +- .../ad3b159657d4bcb373623fdc61acc3bf.asciidoc | 2 +- .../ad88e46bb06739991498dee248850223.asciidoc | 2 +- .../ad92a1a8bb1b0f26d1536fe8ba4ffd17.asciidoc | 2 +- ...ad9889fd8a4b5930e312a51f3bc996dc.asciidoc} | 4 +- .../adc18ca0c344d81d68ec3b9422b54ff5.asciidoc | 2 +- .../adf36e2d8fc05c3719c91912481c4e19.asciidoc | 2 +- .../ae4aa368617637a390074535df86e64b.asciidoc | 2 +- .../ae82eb17c23cb8e5761cb6240a5ed0a6.asciidoc | 2 +- .../ae9ccfaa146731ab9176df90670db1c2.asciidoc | 2 +- .../aeaa97939a05f5b2f3f2c43b771f35e3.asciidoc | 2 +- .../af18f5c5fb2364ae23c6a14431820aba.asciidoc | 2 +- .../af517b6936fa41d124d68b107b2efdc3.asciidoc | 2 +- .../af607715d0693587dd12962266359a96.asciidoc | 2 +- .../af91019991bee136df5460e2fd4ac72a.asciidoc | 2 +- .../af970eb8b93cdea52209e1256eba9d8c.asciidoc | 2 +- .../afbea723c4ba0d50c67d04ebb73a4101.asciidoc | 2 +- .../afc0a9cffc0100797a3f093094394763.asciidoc | 2 +- .../afe30f159937b38d74c869570cfcd369.asciidoc | 2 +- .../afe87a2850326e0328fbebbefec2e839.asciidoc | 2 +- .../b02e4907c9936c1adc16ccce9d49900d.asciidoc | 2 +- .../b0bddf2ffaa83049b195829c06b875cd.asciidoc | 6 +- .../b0fa301cd3c6b9db128e34114f0c1e8f.asciidoc | 2 +- .../b11a0675e49df0709be693297ca73a2c.asciidoc | 2 +- .../b16700002af3aa70639f3e88c733bf35.asciidoc | 2 +- .../b176e0d428726705298184ef39ad5cb2.asciidoc | 2 +- .../b1ee1b0b5f7af596e5f81743cfd3755f.asciidoc | 2 +- .../b1efa1c51a34dd5ab5511b71a399f5b1.asciidoc | 2 +- .../b22559a7c319f90bc63a41cac1c39b4c.asciidoc | 2 +- .../b23ed357dce8ec0014708b7b2850a8fb.asciidoc | 2 +- .../b2440b492149b705ef107137fdccb0c2.asciidoc | 2 +- .../b25256ed615cd837461b0bfa590526b7.asciidoc | 2 +- .../b3756e700d0f6c7e8919003bdf26bc8f.asciidoc | 2 +- .../b37919cc438b47477343833b4e522408.asciidoc | 2 +- .../b3a711c3deddcdb8a3f6623184a8b794.asciidoc | 2 +- .../b45a8c6fc746e9c90fd181e69a605fad.asciidoc | 2 +- .../b47945c7db8868dd36ba079b742f2a90.asciidoc | 2 +- .../b4aec2a1d353852507c091bdb629b765.asciidoc | 2 +- .../b515427f8685ca7d79176def672d19fa.asciidoc | 2 +- .../b583bf8d3a2f49d633aa2cfed5606418.asciidoc | 2 +- .../b590241c4296299b836fbb5a95bdd2dc.asciidoc | 18 ++ .../b5bc1bb7278f2f95bc54790c78c928e0.asciidoc | 2 +- .../b61afb7ca29a11243232ffcc8b5a43cf.asciidoc | 2 +- .../b620ef4400d2f660fe2c67835938442c.asciidoc | 2 +- .../b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc | 13 +- .../b66be1daf6c220eb66d94e708b2fae39.asciidoc | 2 +- .../b6d278737d27973e498ac61cda9e5126.asciidoc | 21 +++ .../b6e385760e036e36827f719b540d9c11.asciidoc | 2 +- .../b6f690896001f8f9ad5bf24e1304a552.asciidoc | 2 +- .../b728d6ba226dba719aadcd8b8099cc74.asciidoc | 2 +- .../b7df0848b2dc3093f931976db5b8cfff.asciidoc | 2 +- .../b7f8bd33c22f3c93336ab57c2e091f73.asciidoc | 2 +- .../b80e1f5b26bae4f3c2f8a604b7caaf17.asciidoc | 2 +- .../b839f79a5d58506baed5714f1876ab55.asciidoc | 2 +- .../b85716ba42a57096452665c38995da7d.asciidoc | 2 +- .../b88a2d96da1401d548a4540cca223d27.asciidoc | 2 +- .../ba10b644a4e9a2e7d78744ca607355d0.asciidoc | 2 +- .../ba21a7fbb74180ff138d97032f28ace7.asciidoc | 2 +- .../ba6040de55afb2c8fb9e5b24bb038820.asciidoc | 2 +- .../baadbfffcd0c16f51eb3537f516dc3ed.asciidoc | 2 +- .../bb293e1bdf0c6f6d9069eeb7edc9d399.asciidoc | 2 +- .../bb2ba5d1885f87506f90dbb002e518f4.asciidoc | 49 ++++++ .../bb9e268ec62d19ca2a6366cbb48fae68.asciidoc | 2 +- .../bcae0f00ae1e6f08fa395ca741fe84f9.asciidoc | 2 +- .../bcb572658986d69ae17c28ddd7e4bfd8.asciidoc | 2 +- .../bccd4eb26b1a325d103b12e198a13c08.asciidoc | 12 ++ .../bcdfaa4487747249699a86a0dcd22f5e.asciidoc | 7 +- .../bd23c3a03907b1238dcb07ab9eecae7b.asciidoc | 2 +- .../bd3d710ec50a151453e141691163af72.asciidoc | 2 +- .../bd458073196a19ecdeb24a8016488c20.asciidoc | 2 +- .../bd57976bc93ca64b2d3e001df9f06c82.asciidoc | 2 +- .../bd5bd5d8b3d81241335fe1e5747080ac.asciidoc | 2 +- .../bd68666ca2e0be12f7624016317a62bc.asciidoc | 2 +- .../bd6f30e3caa3632260da42d9ff82c98c.asciidoc | 2 +- .../bd7a1417fc27b5a801334ec44462b376.asciidoc | 2 +- .../bdb30dd52d32f50994008f4f9c0da5f0.asciidoc | 2 +- .../bdc1afd2181154bb78797360f9dbb1a0.asciidoc | 2 +- .../bdc55256fa5f701680631a149dbb75a9.asciidoc | 22 +++ .../bdd28276618235487ac96bd6679bc206.asciidoc | 31 ++++ .../bde74dbbcef8ebf8541cae2c1711255f.asciidoc | 2 +- .../be3a6431d01846950dc1a39a7a6a1faa.asciidoc | 2 +- .../be5b415d7f33d6f0397ac2f8b5c10521.asciidoc | 2 +- .../be5c5a9c25901737585e4fff9195da3c.asciidoc | 2 +- .../be5fef0640c3a650ee96f84e3376a1be.asciidoc | 2 +- .../be6b0bfcdce1ef100af89f74da5d4748.asciidoc | 2 +- .../beaf43b274b0f32cf3cf48f59e5cb1f2.asciidoc | 2 +- .../beb0b9ff4f68672273fcff1b7bae706b.asciidoc | 2 +- .../befa73a8a419fcf3b7798548b54a20bf.asciidoc | 2 +- .../bf1de9fa1b825fa875d27fa08821a6d1.asciidoc | 2 +- .../bf3c3bc41c593a80faebef1df353e483.asciidoc | 22 +++ .../bfb8a15cd05b43094ffbce8078bad3e1.asciidoc | 2 +- .../bfdad8a928ea30d7cf60d0a0a6bc6e2e.asciidoc | 2 +- .../c00c9412609832ebceb9e786dd9542df.asciidoc | 2 +- .../c067182d385f59ce5952fb9a716fbf05.asciidoc | 2 +- .../c088ce5291ae28650b6091cdec489398.asciidoc | 2 +- .../c0ddfb2e6315f5bcf0d3ef414b5bbed3.asciidoc | 2 +- .../c0ebaa33e750b87555dc352073f692e8.asciidoc | 2 +- .../c0ff8b3db994c4736f7579dde18097d2.asciidoc | 2 +- .../c12d6e962f083c728f9397932f05202e.asciidoc | 2 +- .../c1409f591a01589638d9b00436ce42c0.asciidoc | 2 +- .../c18100d62ed31bc9e05f62900156e6a8.asciidoc | 2 +- .../c187b52646cedeebe0716327add65642.asciidoc | 2 +- .../c1ac9e53b04f7acee4b4933969d6b574.asciidoc | 2 +- .../c1ad9ff64728a5bfeeb485e60ec694a1.asciidoc | 2 +- .../c208a06212dc0cf6ac413d4f2c154296.asciidoc | 2 +- .../c21aaedb5752a83489476fa3b5e2e9ff.asciidoc | 2 +- .../c21eb4bc30087188241cbba6b6b89999.asciidoc | 2 +- .../c23e32775340d7bc6f46820313014d8a.asciidoc | 2 +- .../c27b7d9836aa4ea756f59e9c42911721.asciidoc | 2 +- .../c38c882c642dd412e8fa4c3eed49d12f.asciidoc | 2 +- .../c47f030216a3c89f92f31787fc4d5df5.asciidoc | 2 +- .../c4a1d03dcfb82913d0724a42b0a89f20.asciidoc | 2 +- .../c4b727723b57052b6504bb74fe09abc6.asciidoc | 2 +- .../c526fca1609b4c3c1d12dfd218d69a50.asciidoc | 2 +- .../c54597143ac86540726f6422fd98b22e.asciidoc | 2 +- .../c580990a70028bb49cca8a6bde86bbf6.asciidoc | 7 +- .../c5ba7c4badb5ef5ca32740106e4aa6b6.asciidoc | 2 +- .../c6339d09f85000a6432304b0ec63b8f6.asciidoc | 2 +- .../c64b61bedb21b9def8fce5092e677af9.asciidoc | 2 +- .../c65b00a285f510dcd2865aa3539b4e03.asciidoc | 2 +- .../c67b0f00c2e690303c0e5af2f51e0fea.asciidoc | 2 +- .../c6b5c695a9b757b5e7325345b206bde5.asciidoc | 2 +- .../c6bdd5c7de79d6d9ac8e33a397b511e8.asciidoc | 2 +- .../c765ce78f3605c0e70d213f22aac8a53.asciidoc | 2 +- .../c87038b96ab06d9a741a130f94de4f02.asciidoc | 2 +- .../c873f9cd093e26515148f052e28c7805.asciidoc | 2 +- .../c8e2109b19d50467ab83a40006462e9f.asciidoc | 2 +- .../c956bf1f0829a5f0357c0494ed8b6ca3.asciidoc | 2 +- .../c97fd95ebdcf56cc973582e37f732ed2.asciidoc | 2 +- .../c9c396b94bb88098477e2b08b55a12ee.asciidoc | 2 +- .../c9ce07a7d3d8a317f08535bdd3aa69a3.asciidoc | 2 +- .../ca3bcd6278510ebced5f74484033cb36.asciidoc | 2 +- .../ca5ae0eb7709f3807bc6239cd4bd9141.asciidoc | 2 +- .../ca5dda98e977125d40a7fe1e178e213f.asciidoc | 2 +- .../ca98afbd6a90f63e02f62239d225313b.asciidoc | 2 +- .../caaafef1a76c2bec677704c2dc233218.asciidoc | 2 +- .../caab99520d3fe41f6154d74a7f696057.asciidoc | 2 +- .../cafed0e2c2b1d1574eb4a5ecd514a97a.asciidoc | 2 +- .../cb0c3223fd45148497df73adfba2e9ce.asciidoc | 2 +- .../cb2f70601cb004b9ece9b0b43a9dc21a.asciidoc | 2 +- .../cb71332115c92cfb89375abd30b8bbbb.asciidoc | 2 +- .../cba3462a307e2483c14e3e198f6960e3.asciidoc | 2 +- .../cbfd6f23f8283e64ec3157c65bb722c4.asciidoc | 2 +- .../cc56be758d5d75febbd975786187c861.asciidoc | 2 +- .../cc90639f2e65bd89cb73296cac6135cf.asciidoc | 2 +- .../cc9dac8db7a1482e2fbe3235197c3de1.asciidoc | 2 +- .../ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc | 7 +- .../ccec66fb20d5ede6c691e0890cfe402a.asciidoc | 2 +- .../cd373a6eb1ef4748616500b26fab3006.asciidoc | 2 +- .../cd38c601ab293a6ec0e2df71d0c96b58.asciidoc | 2 +- .../cd67ad2c09fafef2d441c3502d0bb3d7.asciidoc | 2 +- .../cd8006165ac64f1ef99af48e5a35a25b.asciidoc | 2 +- .../cd93919e13f656ad2e6629f45c579b93.asciidoc | 2 +- .../cdb68b3f565df7c85e52a55864b37d40.asciidoc | 2 +- .../cdb7613b445e6ed6e8b473f9cae1af90.asciidoc | 35 ++++ .../cdce7bc083dfb36e6f1d465a5c9d5049.asciidoc | 2 +- .../cdd29b01e730b3996de68a2788050021.asciidoc | 2 +- .../cdf400299acd1c7b1b7bb42e284e3d08.asciidoc | 2 +- .../cdfd4fef983c1c0fe8d7417f67d01eae.asciidoc | 2 +- .../ce0c3d7330727f7673cf68fc9a1cfb86.asciidoc | 2 +- .../ce2c2e8f5a2e4daf051b6e10122e5aae.asciidoc | 2 +- .../ce3c391c2b1915cfc44a2917bca71d19.asciidoc | 2 +- .../ce8471d31e5d60309e142feb040fd2f8.asciidoc | 2 +- .../ce8eebfb810335803630abe83278bee7.asciidoc | 2 +- ...cecfaa659af6646b3b67d7b311586fa0.asciidoc} | 4 +- .../cedb56a71cc743d80263ce352bb21720.asciidoc | 2 +- .../cee491dd0a8d10ed0cb11a2faa0c99f0.asciidoc | 2 +- .../cee591c1fc70d4f180c623a3a6d07755.asciidoc | 2 +- .../cf8ca470156698dbf47fdc822d0a714f.asciidoc | 2 +- .../cfad3631be0634ee49c424f9ccec62d9.asciidoc | 2 +- .../cffce059425d3d21e7f9571500d63524.asciidoc | 2 +- .../d01a590fa9ea8a0cb34ed8dda502296c.asciidoc | 11 -- .../d01d309b0257d6fbca6d0941adeb3256.asciidoc | 2 +- .../d03139a851888db53f8b7affd85eb495.asciidoc | 2 +- .../d095b422d9803c02b62c01adffc85376.asciidoc | 2 +- .../d14fe5838fc02224f4b5ade2626d6026.asciidoc | 2 +- .../d1b53bc9794e8609bd6f2245624bf977.asciidoc | 2 +- .../d1d8b6e642db1a7c70dbbf0fe6d8e92d.asciidoc | 2 +- .../d1e0fee64389e7c8d4c092030626b61f.asciidoc | 2 +- .../d1fde25de1980b7e84fa878289fd0bcb.asciidoc | 2 +- .../d29031409016b2b798148ef173a196ae.asciidoc | 24 +++ .../d3263afc69b6f969b9bbd8738cd07b97.asciidoc | 2 +- .../d35c8cf7a98b3f112e1de8797ec6689d.asciidoc | 7 +- .../d3672a87a857ddb87519788236e57497.asciidoc | 28 ++++ .../d4158d486e7fee2702a14068b69e3b33.asciidoc | 156 ++++++++++++++++++ .../d4323be84152fa91abd76e966d4751dc.asciidoc | 2 +- .../d46e9739bbf25eb2f7225f58ab08b2a7.asciidoc | 2 +- .../d48b274a4b6098ffef0c016c6c945fb9.asciidoc | 2 +- .../d4b405ef0302227e050ac8f0e39068e1.asciidoc | 2 +- .../d4d450f536d747d5ef5050d2d8c66f09.asciidoc | 2 +- .../d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc | 16 ++ .../d4ef6ac034c4d42cb75d830ec69146e6.asciidoc | 2 +- .../d50b030edfe6d1128eb76aa5ba9d4e27.asciidoc | 2 +- ...d5242b1ab0213f25e5e0742032274ce6.asciidoc} | 2 +- .../d524db57be9f16abac5396895b9a2a59.asciidoc | 2 +- .../d5533f08f5cc0479f07a46c761f0786b.asciidoc | 2 +- .../d56a9d89282df56adbbc34b91390ac17.asciidoc | 2 +- .../d5d0ecf75843ddb5f92cfebd089e53e9.asciidoc | 2 +- .../d5ead6aacbfbedc8396f87bb34acc880.asciidoc | 2 +- .../d603e76ab70131f7ec6b08758f95a0e3.asciidoc | 2 +- .../d681508a745b2bc777d47ba606d24224.asciidoc | 2 +- .../d6a21afa4a94b9baa734eac430940bcf.asciidoc | 2 +- .../d7141bd4d0db964f5cc4a872ad79dce9.asciidoc | 2 +- .../d775836a0d7abecc6637aa988f204c30.asciidoc | 2 +- .../d7898526d239d2aea83727fb982f8f77.asciidoc | 2 +- .../d7ae456f119246e95f2f4c37e7544b8c.asciidoc | 2 +- .../d7fe687201ac87b307cd06ed015dd317.asciidoc | 2 +- .../d80ac403d8d936ca9dec185c7da13f2f.asciidoc | 2 +- .../d870d5bd1f97fc75872a298fcddec513.asciidoc | 2 +- .../d87175daed2327565d4325528c6d8b38.asciidoc | 2 +- .../d89d36741d906a71eca6c144e8d83889.asciidoc | 2 +- .../d8a82511cb94f49b4fe4828fee3ba074.asciidoc | 2 +- .../d8c053ee26c1533ce936ec81101d8e1b.asciidoc | 11 ++ .../da0fe1316e5b8fd68e2a8525bcd8b0f6.asciidoc | 2 +- .../da18bae37cda566c0254b30c15221b01.asciidoc | 2 +- .../da3f280bc65b581fb3097be768061bee.asciidoc | 2 +- .../daae2e6acebc84e537764f4ba07f2e6e.asciidoc | 2 +- .../dac8ec8547bc446637fd97d9fa872f4f.asciidoc | 2 +- .../db19cc7a26ca80106d86d688f4be67a8.asciidoc | 2 +- .../db773f690edf659ac9b044dc854c77eb.asciidoc | 2 +- .../db8710a9793ae0817a45892d33468160.asciidoc | 2 +- .../dbc50b8c934171e94604575a8b36f349.asciidoc | 2 +- .../dbdd58cdeac9ef20b42ff73e4864e697.asciidoc | 2 +- .../dbf93d02ab86a09929a21232b19709cc.asciidoc | 2 +- .../dbf9abc37899352751dab0ede62af2fd.asciidoc | 2 +- .../dc468865da947b4a9136a5b92878d918.asciidoc | 2 +- .../dc8c94c9bef1f879282caea5c406f36e.asciidoc | 2 +- .../dcc02ad69da0a5aa10c4e53b34be8ec0.asciidoc | 2 +- .../dcf82f3aacae49c0bb4ccbc673f13e9f.asciidoc | 2 +- .../dcfa7f479a33f459a2d222a92e651451.asciidoc | 2 +- .../dd4f051ab62f0507e3b6e3d6f333e85f.asciidoc | 2 +- .../dd71b0c9f9197684ff29c61062c55660.asciidoc | 7 +- .../dda949d20d07a9edbe64cefc623df945.asciidoc | 2 +- .../ddaadd91b7743a1c7e946ce1b593cd1b.asciidoc | 14 -- .../dde283eab92608e7bfbfa09c6482a12e.asciidoc | 2 +- .../ddf375e4b6175d830fa4097ea0b41536.asciidoc | 2 +- .../ddf56782ecc7eaeb3115e150c4830013.asciidoc | 2 +- .../de2f59887737de3a27716177b60393a2.asciidoc | 2 +- .../de876505acc75d371d1f6f484c449197.asciidoc | 2 +- .../de90249caeac6f1601a7e7e9f98f1bec.asciidoc | 2 +- .../df0d27d3abd286b75aef7ddcf0e6c66c.asciidoc | 2 +- .../df7dbac966b67404b8bfa9cdda5ef480.asciidoc | 2 +- .../df7ed126d8c92ddd3655c59ce4f305c9.asciidoc | 2 +- ...df81b88a2192dd6f9912e0c948a44487.asciidoc} | 2 +- .../dfcdcd3ea6753dcc391a4a52cf640527.asciidoc | 2 +- .../dfce1be1d035aff0b8fdf4a8839f7795.asciidoc | 2 +- .../dff61a76d5ef9ca8cbe59a416269a84b.asciidoc | 2 +- .../dffbbdc4025e5777c647d8818847b960.asciidoc | 2 +- .../e017c2de6f93a8dd97f5c6e002dd5c4f.asciidoc | 2 +- .../e095fc96504efecc588f97673912e3d3.asciidoc | 2 +- .../e0a7c730ef0f22e3edffe9a254bc56e7.asciidoc | 2 +- .../e0b2f56c34e33ff52f8f9658be2f7ca1.asciidoc | 2 +- .../e0bbfb368eae307e9508ab8d6e9cf23c.asciidoc | 2 +- .../e0d4a800de2d8f4062e69433586c38db.asciidoc | 2 +- .../e1337c6b76defd5a46d05220f9d9c9fc.asciidoc | 2 +- .../e14a5a5a1c880031486bfff43031fa3a.asciidoc | 2 +- .../e20037f66bf54bcac7d10f536f031f34.asciidoc | 13 -- .../e22a1da3c622611be6855e534c0709ae.asciidoc | 2 +- .../e26c96978096ccc592849cca9db67ffc.asciidoc | 2 +- .../e2a22c6fd58cc0becf4c383134a08f8b.asciidoc | 2 +- .../e2a753029b450942a3228e3003a55a7d.asciidoc | 2 +- .../e2bcc8f4ed2b4de82729e7a5a7c8f634.asciidoc | 2 +- .../e2ec9e867f7141b304b53ebc59098f2a.asciidoc | 2 +- .../e3019fd5f23458ae49ad9854c97d321c.asciidoc | 7 +- .../e35abc9403e4aef7d538ab29ccc363b3.asciidoc | 2 +- .../e375c7da666276c4df6664c6821cd5f4.asciidoc | 37 +++++ .../e3fe842951dc873d7d00c8f6a010c53f.asciidoc | 2 +- .../e48e7da65c2b32d724fd7e3bfa175c6f.asciidoc | 2 +- .../e4b38973c74037335378d8480f1ce894.asciidoc | 7 +- .../e4b6a6a921c97b4c0bbe97bd89f4cf33.asciidoc | 2 +- .../e4be53736bcc02b03068fd72fdbfe271.asciidoc | 2 +- .../e4d1f01c025fb797a1d87f372760eabf.asciidoc | 2 +- .../e51a86b666f447cda5f634547a8e1a4a.asciidoc | 2 +- .../e58833449d01379df20ad06dc28144d8.asciidoc | 2 +- .../e5901f48eb8a419b878fc2cb815d8691.asciidoc | 2 +- .../e5f50b31f165462d883ecbff45f74985.asciidoc | 2 +- .../e5f89a04f50df707a0a53ec0f2eecbbd.asciidoc | 2 +- .../e60b7f75ca806f2c74927c3d9409a986.asciidoc | 2 +- .../e61b5abe85000cc954a42e2cd74f3a26.asciidoc | 2 +- .../e63775a2ff22b945ab9d5f630b80c506.asciidoc | 2 +- .../e650d73c57ab313e686fec01e3b0c90f.asciidoc | 2 +- .../e6dcc2911d2416a65eaec9846b956e15.asciidoc | 2 +- .../e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc | 50 ++++++ .../e71d300cd87f09a9527cf45395dd7eb1.asciidoc | 2 +- ...e77c2f41a7eca765b0c5f734a66d919f.asciidoc} | 4 +- .../e784fc00894635470adfd78a0c46b427.asciidoc | 2 +- .../e7d819634d765cde269e2669e2dc677f.asciidoc | 2 +- .../e8211247c280a3fbbbdd32850b743b7b.asciidoc | 2 +- .../e821d27a8b810821707ba860e31f8b78.asciidoc | 2 +- .../e827a9040e137410d62d10bb3b3cbb71.asciidoc | 2 +- .../e84e23232c7ecc8d6377ec2c16a60269.asciidoc | 2 +- .../e8c348cabe15dfe58ab4c3cc13a963fe.asciidoc | 2 +- .../e8f1c9ee003d115ec8f55e57990df6e4.asciidoc | 2 +- .../e9625da419bff6470ffd9927c59ca159.asciidoc | 2 +- .../e9fc47015922d51c2b05e502ce9c622e.asciidoc | 2 +- .../ea5391267ced860c00214c096e08c8d4.asciidoc | 2 +- .../ea68e3428cc2ca3455bf312d09451489.asciidoc | 2 +- ...ea8c4229afa6dd4f1321355542be9912.asciidoc} | 4 +- .../eafdabe80b21b90495555fa6d9089412.asciidoc | 2 +- .../eb14cedd3bdda9ffef3c118f3d528dcd.asciidoc | 2 +- .../eb4e43b47867b54214a8630172dd0e21.asciidoc | 2 +- .../eb54506fbc71a7d250e86b22d0600114.asciidoc | 2 +- .../eb964d8d7f27c057a4542448ba5b74e4.asciidoc | 2 +- .../eb96d7dd5f3116a50f7a86b729f1a934.asciidoc | 2 +- .../eb9a41f7fc8bdf5559bb9db822ae3a65.asciidoc | 2 +- .../ebb1c7554e91adb4552599f3e5de1865.asciidoc | 2 +- .../ebd76a45e153c4656c5871e23b7b5508.asciidoc | 2 +- .../ec0e50f78390b8622cef4e0b0cd45967.asciidoc | 2 +- .../ec4b43c3ebd8816799fa004596b2f0cb.asciidoc | 14 ++ .../ecfd0d94dd14ef05dfa861f22544b388.asciidoc | 2 +- .../ed09432c6069e41409f0a5e0d1d3842a.asciidoc | 2 +- .../ed3bdf4d6799b43526851e92b6a60c55.asciidoc | 2 +- .../ed5bfa68d01e079aac94de78dc5caddf.asciidoc | 2 +- .../edb25dc0162b039d477cb06aed2d6275.asciidoc | 2 +- .../ee223e604bb695cad2517d28ae63ac34.asciidoc | 2 +- .../ee577c4c7cc723e99569ea2d1137adba.asciidoc | 2 +- .../ee90d1fb22b59d30da339d825303b912.asciidoc | 2 +- .../eec051555c8050d017d3fe38ea59e3a0.asciidoc | 2 +- .../eee6110831c08b9c1b3f56b24656e95b.asciidoc | 2 +- .../ef22234b97cc06d7dd620b4ce7c97b31.asciidoc | 2 +- .../ef33b3b373f7040b874146599db5d557.asciidoc | 2 +- .../ef9c29759459904fef162acd223462c4.asciidoc | 2 +- .../eff8ecaed1ed084909c64450fc363a20.asciidoc | 2 +- .../f04e1284d09ceb4443d67b2ef9c7f476.asciidoc | 2 +- .../f0816beb8ac21cb0940858b72f6b1946.asciidoc | 2 +- .../f128a9dff5051b47efe2c53c4454a68f.asciidoc | 2 +- .../f160561efab38e40c2feebf5a2542ab5.asciidoc | 2 +- .../f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc | 2 +- .../f1bf0c03581b79c3324cfa3246a60e4d.asciidoc | 2 +- .../f1e2af6dbb30fc5335e7d0b5507a2a93.asciidoc | 2 +- .../f2175feadc2abe545899889e6d4ffcad.asciidoc | 2 +- .../f27c28ddbf4c266b5f42d14da837b8de.asciidoc | 2 +- .../f298c4eb50ea97b34c57f8756eb350d3.asciidoc | 2 +- .../f29b2674299ddf51a25ed87619025ede.asciidoc | 2 +- .../f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc | 2 +- .../f2e854b6c99659ccc1824e86c096e433.asciidoc | 2 +- .../f2ec53c0ef5025de8890d0ff8ec287a0.asciidoc | 2 +- .../f2f1cae094855a45fd8f73478bec8e70.asciidoc | 2 +- .../f3594de7ef39ab09b0bb12c1e76bfe6b.asciidoc | 2 +- .../f3697682a886ab129530f3e5c1b30632.asciidoc | 2 +- .../f37173a75cd1b0d683c6f67819dd1de3.asciidoc | 2 +- .../f388e571224dd6850f8c9f9f08fca3da.asciidoc | 2 +- .../f3b185131f40687c25d2f85e1231d8bd.asciidoc | 2 +- .../f3fb3cba44988b6e9fee93316138b2cf.asciidoc | 2 +- .../f43ec4041e3b72bbde063452990bfc4b.asciidoc | 2 +- .../f44d287c6937785eb09b91353c1deb1e.asciidoc | 2 +- .../f454e3f8ad5f5bd82a4a25af7dee9ca1.asciidoc | 2 +- .../f49ac80f0130cae8d0ea6f4472a149dd.asciidoc | 2 +- .../f4b9baed3c6a82be3672cbc8999c2368.asciidoc | 2 +- .../f4dc1286d0a2f8d1fde64fbf12fd9f8d.asciidoc | 2 +- .../f4f557716049b23f8840d58d71e748f0.asciidoc | 2 +- .../f4fdfe52ecba65eec6beb30d8deb8bbf.asciidoc | 2 +- .../f5140f08f56c64b5789357539f8b9ba8.asciidoc | 2 +- .../f57ce7de0946e9416ddb9150e95f4b74.asciidoc | 2 +- .../f58fd031597e2c3df78bf0efd07206e3.asciidoc | 2 +- .../f63f6343e74bd5c844854272e746de14.asciidoc | 2 +- .../f656c1e64268293ecc8ebd8065628faa.asciidoc | 2 +- .../f66643c54999426c5afa6d5a87435d4e.asciidoc | 2 +- .../f6982ff80b9a64cd5fcac5b20908c906.asciidoc | 2 +- .../f6d493650b4344f17297b568016fb445.asciidoc | 2 +- .../f6df4acf3c7a4f85706ff314b21ebcb2.asciidoc | 2 +- .../f6ead39c5505045543b9225deca7367d.asciidoc | 2 +- .../f6eff830fb0fad200ebfb1e3e46f6f0e.asciidoc | 2 +- .../f6f647eb644a2d236637ff05f833cb73.asciidoc | 5 +- .../f70a54cd9a9f4811bf962e469f2ca2ea.asciidoc | 2 +- .../f70ff57c80cdbce3f1e7c63ee307c92d.asciidoc | 2 +- .../f785b5d17eb59f8d2a353c2dee66eb5b.asciidoc | 2 +- .../f823e4b87ed181b27f73ebc51351f0ee.asciidoc | 2 +- .../f8a0010753b1ff563dc42d703902d2fa.asciidoc | 2 +- .../f8cb1a04c2e487ff006b5ae0e1a7afbd.asciidoc | 2 +- .../f92d2f5018a8843ffbb56ade15f84406.asciidoc | 2 +- .../f95a4d7ab02bf400246c8822f0245f02.asciidoc | 2 +- .../f96d4614f2fc294339fef325b794355f.asciidoc | 2 +- .../f96d8131e8a592fbf6dfd686173940a9.asciidoc | 2 +- .../f978088f5117d4addd55c11ee3777312.asciidoc | 2 +- .../f97aa2efabbf11a534073041eb2658c9.asciidoc | 2 +- .../f9c8245cc13770dff052b6759a749efa.asciidoc | 2 +- .../fa42ae3bf6a300420cd0f77ba006458a.asciidoc | 2 +- .../fa82d86a046d67366cfe9ce65535e433.asciidoc | 2 +- .../fa88f6f5a7d728ec4f1d05244228cb09.asciidoc | 2 +- .../fab702851e90e945c1b62dec0bb6a205.asciidoc | 2 +- .../fabe14480624a99e8ee42c7338672058.asciidoc | 2 +- .../fad26f4fb5a1bc9c38db33394e877d94.asciidoc | 2 +- .../faf7d8b9827cf5c0db5c177f01dc31c4.asciidoc | 2 +- .../fb4799d2fe4011bf6084f89d97d9a4a5.asciidoc | 2 +- .../fc190fbbf71949331266dcb3f46a1198.asciidoc | 2 +- .../fc49437ce2e7916facf58128308c2aa3.asciidoc | 2 +- .../fccbddfba9f975de7e321732874dfb78.asciidoc | 2 +- .../fce5c03a388c893cb11a6696e068543f.asciidoc | 2 +- .../fd2d289e6b725fcc3cbe8fe7ffe02ea0.asciidoc | 2 +- .../fd352b472d44d197022a46fce90b6ecb.asciidoc | 2 +- .../fd60b4092c6552164862cec287359676.asciidoc | 2 +- .../fd620f09dbce62c6f0f603a366623607.asciidoc | 2 +- .../fd9b668eeb1f117950bd4991c7c03fb1.asciidoc | 2 +- .../fdada036a875d7995d5d7aba9c06361e.asciidoc | 2 +- .../fe6e35839f7d7381f8ec535c8f21959b.asciidoc | 2 +- .../febb71d774e0a1fc67454213d7448c53.asciidoc | 2 +- .../ff09e13391cecb2e8b9dd440b37e065f.asciidoc | 2 +- .../ff56ded50c65998c70f3c5691ddc6f86.asciidoc | 2 +- .../ff63ae39c34925dbfa54282ec9989124.asciidoc | 2 +- .../ff776c0fccf93e1c7050f7cb7efbae0b.asciidoc | 2 +- .../ff7b81fa96c3b994efa3dee230512291.asciidoc | 2 +- .../ffd63dd186ab81b893faec3b3358fa09.asciidoc | 2 +- .../fff86117c47f974074284644e8a97a99.asciidoc | 18 ++ docs/guide/release-notes.asciidoc | 47 ++++++ elasticsearch/_version.py | 2 +- 1247 files changed, 3520 insertions(+), 1506 deletions(-) create mode 100644 docs/examples/015e6e6132b6d6d44bddb06bc3b316ed.asciidoc create mode 100644 docs/examples/0165d22da5f2fc7678392b31d8eb5566.asciidoc create mode 100644 docs/examples/0722b302b2b3275a988d858044f99d5d.asciidoc rename docs/examples/{844928da2ff9a1394af5347a5e2e4f78.asciidoc => 074e4602d1ca54412380a40867d078bc.asciidoc} (69%) create mode 100644 docs/examples/082e78c7a2061a7c4a52b494e5ede0e8.asciidoc create mode 100644 docs/examples/0bc6155e0c88062a4d8490da49db3aa8.asciidoc rename docs/examples/{160986f49758f4e8345d183a842f6351.asciidoc => 0c52af573c9401a2a687e86a4beb182b.asciidoc} (78%) create mode 100644 docs/examples/0d689ac6e78be5d438f9b5d441be2b44.asciidoc delete mode 100644 docs/examples/0e83f140237d75469a428ff403564bb5.asciidoc create mode 100644 docs/examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc create mode 100644 docs/examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc delete mode 100644 docs/examples/1420a22aa817c7a996baaed0ad366d6f.asciidoc rename docs/examples/{c02c2916b97b6fa7db82dbc7f0378310.asciidoc => 17b1647c8509543f2388c886f2584a20.asciidoc} (86%) create mode 100644 docs/examples/22b176a184517cf1b5801f5eb4f17f97.asciidoc create mode 100644 docs/examples/246763219ec06172f7aa57bba28d344a.asciidoc delete mode 100644 docs/examples/2577acb462b95bd4394523cf2f8a661f.asciidoc create mode 100644 docs/examples/2a21674c40f9b182a8944769d20b2357.asciidoc create mode 100644 docs/examples/2a67608dadbf220a2f040f3a79d3677d.asciidoc delete mode 100644 docs/examples/2bb41b0b4876ce98cd0cd8fb6d337f18.asciidoc delete mode 100644 docs/examples/2f07b81fd47ec3b074242a760f0c4e9e.asciidoc create mode 100644 docs/examples/2f72a63c73dd672ac2dc3997ad15dd41.asciidoc create mode 100644 docs/examples/30d051f534aeb884176eedb2c11dac85.asciidoc delete mode 100644 docs/examples/31bc93e429ad0de11dd2dd231e8f2c5e.asciidoc rename docs/examples/{23af230e824f48b9cd56a4cf973d788c.asciidoc => 3312c82f81816bf76629db9582991812.asciidoc} (79%) delete mode 100644 docs/examples/34cdeefb09bbbe5206957a8bc1bd513d.asciidoc create mode 100644 docs/examples/37f367ca81a16d3aef4ef7126ec33a2e.asciidoc delete mode 100644 docs/examples/38ba93890494bfa7beece58dffa44f98.asciidoc create mode 100644 docs/examples/3ea4c971b3f47735dcc207ee2645fa03.asciidoc create mode 100644 docs/examples/3f9dcf2aa42f3ecfb5ebfe48c1774103.asciidoc create mode 100644 docs/examples/41d24383d29b2808a65258a0a3256e96.asciidoc create mode 100644 docs/examples/45954b8aaedfed57012be8b6538b0a24.asciidoc rename docs/examples/{9f16fca9813304e398ee052aa857dbcd.asciidoc => 48e142e6c69014e0509d4c9251749d77.asciidoc} (72%) delete mode 100644 docs/examples/49b31e23f8b9667b6a7b2734d55fb6ed.asciidoc delete mode 100644 docs/examples/4dab4c5168047ba596af1beb0e55b845.asciidoc rename docs/examples/{191074b2eebd5f74e628c2ada4b6d2e4.asciidoc => 4edfb5934d14ad7655bd7e19a112b5c0.asciidoc} (92%) create mode 100644 docs/examples/519e46350316a33162740e5d7968aa2c.asciidoc create mode 100644 docs/examples/53d9d2ec9cb8d211772d764e76fe6890.asciidoc create mode 100644 docs/examples/5836b09198feb1269ed12839b416123d.asciidoc delete mode 100644 docs/examples/5a70db31f587b7ffed5e9bc1445430cb.asciidoc create mode 100644 docs/examples/5f16358ebb5d14b86f57612d5f92d923.asciidoc create mode 100644 docs/examples/681d24c2633f598fc43d6afff8996dbb.asciidoc rename docs/examples/{2fd458d37aab509fe2d970c0b6e2a10f.asciidoc => 68d7f7d4d268ee98caead5aef19933d6.asciidoc} (98%) create mode 100644 docs/examples/6b67c6121efb86ee100d40c2646f77b5.asciidoc rename docs/examples/{f9ee5d55a73f4c1fe7d507609047aefd.asciidoc => 6fa02c2ad485bbe91f44b321158250f3.asciidoc} (69%) create mode 100644 docs/examples/730045fae3743c39b612813a42c330c3.asciidoc create mode 100644 docs/examples/7478ff69113fb53f41ea07cdf911fa67.asciidoc delete mode 100644 docs/examples/74b229a6e020113e5749099451979c89.asciidoc create mode 100644 docs/examples/76e02434835630cb830724beb92df354.asciidoc rename docs/examples/{9d47f02a063444da9f098858a1830d28.asciidoc => 77cebba946fe648873a1e7375c13df41.asciidoc} (60%) create mode 100644 docs/examples/78043831fd32004a82930c8ac8a1d809.asciidoc create mode 100644 docs/examples/790684b45bef2bb848ea932f0fd0cfbd.asciidoc rename docs/examples/{8cad5d95a0e7c103f08be53d0b172558.asciidoc => 79d206a528be704050a437adce2496dd.asciidoc} (53%) rename docs/examples/{58f6b72009512851843c7b7a20e9504a.asciidoc => 7a2fdfd7b0553d63440af7598f9ad867.asciidoc} (84%) create mode 100644 docs/examples/7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc rename docs/examples/{8593715fcc70315a0816b435551258e0.asciidoc => 7bdc283b96c7a965fae23013647b8578.asciidoc} (79%) create mode 100644 docs/examples/7dd0d9cc6c5982a2c003d301e90feeba.asciidoc rename docs/examples/{f4d0ef2e0f76babee83d999fe35127f2.asciidoc => 80135e8c644e34cc70ce8a4e7915d1a2.asciidoc} (89%) create mode 100644 docs/examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc rename docs/examples/{c8fa8d7e029792d539464fede18ce258.asciidoc => 8a0b5f759de3f27f0801c1176e616117.asciidoc} (75%) create mode 100644 docs/examples/8c639d3eef5c2de29e12bd9c6a42d3d4.asciidoc create mode 100644 docs/examples/91e106a2affbc8df32cd940684a779ed.asciidoc create mode 100644 docs/examples/9250ac57ec81d5192e8ad4c462438489.asciidoc create mode 100644 docs/examples/931817b168e055ecf738785c721125dd.asciidoc create mode 100644 docs/examples/948418e0ef1b7e7cfee2f11be715d7d2.asciidoc rename docs/examples/{19f1f9f25933f8e7aba59a10881c648b.asciidoc => 96e88611f99e6834bd64b58dc8a282c1.asciidoc} (67%) create mode 100644 docs/examples/97c6c07f46f4177f0565a04bc50924a3.asciidoc create mode 100644 docs/examples/99fb82d49ac477e6a9dfdd71f9465374.asciidoc create mode 100644 docs/examples/9cc952d4a03264b700136cbc45abc8c6.asciidoc create mode 100644 docs/examples/a9f14efc26fdd3c37a71f06c310163d9.asciidoc create mode 100644 docs/examples/ac22cc2b0f4ad659055feed2852a2d59.asciidoc rename docs/examples/{7b9691bd34a02dd859562eb927f175e0.asciidoc => ad9889fd8a4b5930e312a51f3bc996dc.asciidoc} (83%) create mode 100644 docs/examples/b590241c4296299b836fbb5a95bdd2dc.asciidoc create mode 100644 docs/examples/b6d278737d27973e498ac61cda9e5126.asciidoc create mode 100644 docs/examples/bb2ba5d1885f87506f90dbb002e518f4.asciidoc create mode 100644 docs/examples/bccd4eb26b1a325d103b12e198a13c08.asciidoc create mode 100644 docs/examples/bdc55256fa5f701680631a149dbb75a9.asciidoc create mode 100644 docs/examples/bdd28276618235487ac96bd6679bc206.asciidoc create mode 100644 docs/examples/bf3c3bc41c593a80faebef1df353e483.asciidoc create mode 100644 docs/examples/cdb7613b445e6ed6e8b473f9cae1af90.asciidoc rename docs/examples/{b26b5574438e4eaf146b2428bf537c51.asciidoc => cecfaa659af6646b3b67d7b311586fa0.asciidoc} (91%) delete mode 100644 docs/examples/d01a590fa9ea8a0cb34ed8dda502296c.asciidoc create mode 100644 docs/examples/d29031409016b2b798148ef173a196ae.asciidoc create mode 100644 docs/examples/d3672a87a857ddb87519788236e57497.asciidoc create mode 100644 docs/examples/d4158d486e7fee2702a14068b69e3b33.asciidoc create mode 100644 docs/examples/d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc rename docs/examples/{35fd9549350926f8d57dc1765e2f40d3.asciidoc => d5242b1ab0213f25e5e0742032274ce6.asciidoc} (94%) create mode 100644 docs/examples/d8c053ee26c1533ce936ec81101d8e1b.asciidoc delete mode 100644 docs/examples/ddaadd91b7743a1c7e946ce1b593cd1b.asciidoc rename docs/examples/{a225fc8c134cb21a85bc6025dac9368b.asciidoc => df81b88a2192dd6f9912e0c948a44487.asciidoc} (91%) delete mode 100644 docs/examples/e20037f66bf54bcac7d10f536f031f34.asciidoc create mode 100644 docs/examples/e375c7da666276c4df6664c6821cd5f4.asciidoc create mode 100644 docs/examples/e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc rename docs/examples/{5ba32ebaa7ee28a339c7693696d305ca.asciidoc => e77c2f41a7eca765b0c5f734a66d919f.asciidoc} (83%) rename docs/examples/{bb5a1319c496acc862c670cc7224e59a.asciidoc => ea8c4229afa6dd4f1321355542be9912.asciidoc} (89%) create mode 100644 docs/examples/ec4b43c3ebd8816799fa004596b2f0cb.asciidoc create mode 100644 docs/examples/fff86117c47f974074284644e8a97a99.asciidoc diff --git a/docs/examples/00272f75a6afea91f8554ef7cda0c1f2.asciidoc b/docs/examples/00272f75a6afea91f8554ef7cda0c1f2.asciidoc index dc87b0549..6c562c4cb 100644 --- a/docs/examples/00272f75a6afea91f8554ef7cda0c1f2.asciidoc +++ b/docs/examples/00272f75a6afea91f8554ef7cda0c1f2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/clear-cache.asciidoc:69 +// rest-api/security/clear-cache.asciidoc:75 [source, python] ---- diff --git a/docs/examples/004743b9c9f61588926ccf734696b713.asciidoc b/docs/examples/004743b9c9f61588926ccf734696b713.asciidoc index 9e5b1713a..b45ec61fd 100644 --- a/docs/examples/004743b9c9f61588926ccf734696b713.asciidoc +++ b/docs/examples/004743b9c9f61588926ccf734696b713.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/forcemerge.asciidoc:210 +// indices/forcemerge.asciidoc:216 [source, python] ---- diff --git a/docs/examples/008ed823c89e703c447ac89c6b689833.asciidoc b/docs/examples/008ed823c89e703c447ac89c6b689833.asciidoc index 97fa31508..ba5031dda 100644 --- a/docs/examples/008ed823c89e703c447ac89c6b689833.asciidoc +++ b/docs/examples/008ed823c89e703c447ac89c6b689833.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// migration/apis/feature-migration.asciidoc:152 +// migration/apis/feature-migration.asciidoc:158 [source, python] ---- diff --git a/docs/examples/00b3b6d76a368ae71277ea24af318693.asciidoc b/docs/examples/00b3b6d76a368ae71277ea24af318693.asciidoc index 41250b193..83c7b6585 100644 --- a/docs/examples/00b3b6d76a368ae71277ea24af318693.asciidoc +++ b/docs/examples/00b3b6d76a368ae71277ea24af318693.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/shard-stores.asciidoc:134 +// indices/shard-stores.asciidoc:140 [source, python] ---- diff --git a/docs/examples/00fea15cbca83be9d5f1a024ff2ec708.asciidoc b/docs/examples/00fea15cbca83be9d5f1a024ff2ec708.asciidoc index 820bb804e..c9322a6a6 100644 --- a/docs/examples/00fea15cbca83be9d5f1a024ff2ec708.asciidoc +++ b/docs/examples/00fea15cbca83be9d5f1a024ff2ec708.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-elasticsearch.asciidoc:163 +// inference/service-elasticsearch.asciidoc:204 [source, python] ---- diff --git a/docs/examples/015e6e6132b6d6d44bddb06bc3b316ed.asciidoc b/docs/examples/015e6e6132b6d6d44bddb06bc3b316ed.asciidoc new file mode 100644 index 000000000..78636bc7a --- /dev/null +++ b/docs/examples/015e6e6132b6d6d44bddb06bc3b316ed.asciidoc @@ -0,0 +1,46 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/retrievers-examples.asciidoc:801 + +[source, python] +---- +resp = client.search( + index="retrievers_example", + retriever={ + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "range": { + "year": { + "gt": 2023 + } + } + } + } + }, + { + "standard": { + "query": { + "term": { + "topic": "elastic" + } + } + } + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + }, + source=False, + aggs={ + "topics": { + "terms": { + "field": "topic" + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/0165d22da5f2fc7678392b31d8eb5566.asciidoc b/docs/examples/0165d22da5f2fc7678392b31d8eb5566.asciidoc new file mode 100644 index 000000000..749e89a02 --- /dev/null +++ b/docs/examples/0165d22da5f2fc7678392b31d8eb5566.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/retrievers-examples.asciidoc:1113 + +[source, python] +---- +resp = client.inference.put( + task_type="rerank", + inference_id="my-rerank-model", + inference_config={ + "service": "cohere", + "service_settings": { + "model_id": "rerank-english-v3.0", + "api_key": "{{COHERE_API_KEY}}" + } + }, +) +print(resp) +---- diff --git a/docs/examples/01bc0f2ed30eb3dd23511d01ce0ac6e1.asciidoc b/docs/examples/01bc0f2ed30eb3dd23511d01ce0ac6e1.asciidoc index f3c753df4..b10bf0ef1 100644 --- a/docs/examples/01bc0f2ed30eb3dd23511d01ce0ac6e1.asciidoc +++ b/docs/examples/01bc0f2ed30eb3dd23511d01ce0ac6e1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// transform/apis/start-transform.asciidoc:79 +// transform/apis/start-transform.asciidoc:85 [source, python] ---- diff --git a/docs/examples/01cd0ea360282a2c591a366679d7187d.asciidoc b/docs/examples/01cd0ea360282a2c591a366679d7187d.asciidoc index 79f5d0185..d52222ef9 100644 --- a/docs/examples/01cd0ea360282a2c591a366679d7187d.asciidoc +++ b/docs/examples/01cd0ea360282a2c591a366679d7187d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/task-queue-backlog.asciidoc:60 +// troubleshooting/common-issues/task-queue-backlog.asciidoc:83 [source, python] ---- diff --git a/docs/examples/01dc7bdc223bd651574ed2d3954a5b1c.asciidoc b/docs/examples/01dc7bdc223bd651574ed2d3954a5b1c.asciidoc index 81e0a9db9..8edde87f0 100644 --- a/docs/examples/01dc7bdc223bd651574ed2d3954a5b1c.asciidoc +++ b/docs/examples/01dc7bdc223bd651574ed2d3954a5b1c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/execute-watch.asciidoc:147 +// rest-api/watcher/execute-watch.asciidoc:153 [source, python] ---- diff --git a/docs/examples/020c95db88ef356093f03be84893ddf9.asciidoc b/docs/examples/020c95db88ef356093f03be84893ddf9.asciidoc index f179d62cb..491db9b18 100644 --- a/docs/examples/020c95db88ef356093f03be84893ddf9.asciidoc +++ b/docs/examples/020c95db88ef356093f03be84893ddf9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ccr/apis/follow/get-follow-stats.asciidoc:35 +// ccr/apis/follow/get-follow-stats.asciidoc:41 [source, python] ---- diff --git a/docs/examples/0246f73cc2ed3dfec577119e8cd15404.asciidoc b/docs/examples/0246f73cc2ed3dfec577119e8cd15404.asciidoc index 71415a0a1..670c9daa3 100644 --- a/docs/examples/0246f73cc2ed3dfec577119e8cd15404.asciidoc +++ b/docs/examples/0246f73cc2ed3dfec577119e8cd15404.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:177 +// indices/put-mapping.asciidoc:183 [source, python] ---- diff --git a/docs/examples/02520ac7816b2c4cf8fb413fd16122f2.asciidoc b/docs/examples/02520ac7816b2c4cf8fb413fd16122f2.asciidoc index 788c4344b..4192c617a 100644 --- a/docs/examples/02520ac7816b2c4cf8fb413fd16122f2.asciidoc +++ b/docs/examples/02520ac7816b2c4cf8fb413fd16122f2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/flush-job.asciidoc:75 +// ml/anomaly-detection/apis/flush-job.asciidoc:81 [source, python] ---- diff --git a/docs/examples/0280247e0cf2e561c548f22c9fb31163.asciidoc b/docs/examples/0280247e0cf2e561c548f22c9fb31163.asciidoc index 10c5d0ce4..113679b3d 100644 --- a/docs/examples/0280247e0cf2e561c548f22c9fb31163.asciidoc +++ b/docs/examples/0280247e0cf2e561c548f22c9fb31163.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/invalidate-tokens.asciidoc:199 +// rest-api/security/invalidate-tokens.asciidoc:205 [source, python] ---- diff --git a/docs/examples/02f65c6bab8f40bf3ce18160623d1870.asciidoc b/docs/examples/02f65c6bab8f40bf3ce18160623d1870.asciidoc index c34253d87..d36267cb5 100644 --- a/docs/examples/02f65c6bab8f40bf3ce18160623d1870.asciidoc +++ b/docs/examples/02f65c6bab8f40bf3ce18160623d1870.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-index-template-v1.asciidoc:35 +// indices/get-index-template-v1.asciidoc:41 [source, python] ---- diff --git a/docs/examples/0308cbd85281f95fc458042afe3f587d.asciidoc b/docs/examples/0308cbd85281f95fc458042afe3f587d.asciidoc index 7ca4d69ff..43b6a859e 100644 --- a/docs/examples/0308cbd85281f95fc458042afe3f587d.asciidoc +++ b/docs/examples/0308cbd85281f95fc458042afe3f587d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/get.asciidoc:79 +// docs/get.asciidoc:85 [source, python] ---- diff --git a/docs/examples/032eac56b798bea29390e102538f4a26.asciidoc b/docs/examples/032eac56b798bea29390e102538f4a26.asciidoc index 8f4b0e3ed..36dfb2a73 100644 --- a/docs/examples/032eac56b798bea29390e102538f4a26.asciidoc +++ b/docs/examples/032eac56b798bea29390e102538f4a26.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/refresh.asciidoc:103 +// indices/refresh.asciidoc:109 [source, python] ---- diff --git a/docs/examples/0350ff5ebb8207c004eb771088339cb4.asciidoc b/docs/examples/0350ff5ebb8207c004eb771088339cb4.asciidoc index c815c5834..4cd810a13 100644 --- a/docs/examples/0350ff5ebb8207c004eb771088339cb4.asciidoc +++ b/docs/examples/0350ff5ebb8207c004eb771088339cb4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/rrf.asciidoc:121 +// search/rrf.asciidoc:127 [source, python] ---- diff --git a/docs/examples/03891265df2111a38e0b6b24c1b967e1.asciidoc b/docs/examples/03891265df2111a38e0b6b24c1b967e1.asciidoc index 4c265f7ad..cf65de095 100644 --- a/docs/examples/03891265df2111a38e0b6b24c1b967e1.asciidoc +++ b/docs/examples/03891265df2111a38e0b6b24c1b967e1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-service-accounts.asciidoc:299 +// rest-api/security/get-service-accounts.asciidoc:320 [source, python] ---- diff --git a/docs/examples/03c4b815bf1e6a8c5cfcc6ddf94bc093.asciidoc b/docs/examples/03c4b815bf1e6a8c5cfcc6ddf94bc093.asciidoc index b8fb76686..985ac93d1 100644 --- a/docs/examples/03c4b815bf1e6a8c5cfcc6ddf94bc093.asciidoc +++ b/docs/examples/03c4b815bf1e6a8c5cfcc6ddf94bc093.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// sql/apis/sql-search-api.asciidoc:11 +// sql/apis/sql-search-api.asciidoc:17 [source, python] ---- diff --git a/docs/examples/04412d11783dac25b5fd2ec5407078a3.asciidoc b/docs/examples/04412d11783dac25b5fd2ec5407078a3.asciidoc index 223f90baf..613984b02 100644 --- a/docs/examples/04412d11783dac25b5fd2ec5407078a3.asciidoc +++ b/docs/examples/04412d11783dac25b5fd2ec5407078a3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-api-key-id-api.asciidoc:87 +// connector/apis/update-connector-api-key-id-api.asciidoc:93 [source, python] ---- diff --git a/docs/examples/0470d7101637568b9d3d1239f06325a7.asciidoc b/docs/examples/0470d7101637568b9d3d1239f06325a7.asciidoc index b8b157197..b4f0da774 100644 --- a/docs/examples/0470d7101637568b9d3d1239f06325a7.asciidoc +++ b/docs/examples/0470d7101637568b9d3d1239f06325a7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/update-desired-nodes.asciidoc:15 +// cluster/update-desired-nodes.asciidoc:21 [source, python] ---- diff --git a/docs/examples/048652b6abfe195da8ea8cef10ee01b1.asciidoc b/docs/examples/048652b6abfe195da8ea8cef10ee01b1.asciidoc index 26a2e9379..f295c9528 100644 --- a/docs/examples/048652b6abfe195da8ea8cef10ee01b1.asciidoc +++ b/docs/examples/048652b6abfe195da8ea8cef10ee01b1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// transform/apis/reset-transform.asciidoc:61 +// transform/apis/reset-transform.asciidoc:67 [source, python] ---- diff --git a/docs/examples/04d6ce0c903bd468afbecd3aa1c4a78a.asciidoc b/docs/examples/04d6ce0c903bd468afbecd3aa1c4a78a.asciidoc index 1d0483c0f..b2d34865d 100644 --- a/docs/examples/04d6ce0c903bd468afbecd3aa1c4a78a.asciidoc +++ b/docs/examples/04d6ce0c903bd468afbecd3aa1c4a78a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/apis/put-pipeline.asciidoc:120 +// ingest/apis/put-pipeline.asciidoc:126 [source, python] ---- diff --git a/docs/examples/04de2e3a9c00c2056b07bf9cf9e63a99.asciidoc b/docs/examples/04de2e3a9c00c2056b07bf9cf9e63a99.asciidoc index 9e4fffd1f..c3086c6d5 100644 --- a/docs/examples/04de2e3a9c00c2056b07bf9cf9e63a99.asciidoc +++ b/docs/examples/04de2e3a9c00c2056b07bf9cf9e63a99.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-google-vertex-ai.asciidoc:127 +// inference/service-google-vertex-ai.asciidoc:133 [source, python] ---- diff --git a/docs/examples/04f5dd677c777bcb15d7d5fa63275fc8.asciidoc b/docs/examples/04f5dd677c777bcb15d7d5fa63275fc8.asciidoc index 115f443da..1191184c0 100644 --- a/docs/examples/04f5dd677c777bcb15d7d5fa63275fc8.asciidoc +++ b/docs/examples/04f5dd677c777bcb15d7d5fa63275fc8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/health.asciidoc:42 +// cluster/health.asciidoc:48 [source, python] ---- diff --git a/docs/examples/0502284d4685c478eb68761f979f4303.asciidoc b/docs/examples/0502284d4685c478eb68761f979f4303.asciidoc index 7a6344c79..4e87cee50 100644 --- a/docs/examples/0502284d4685c478eb68761f979f4303.asciidoc +++ b/docs/examples/0502284d4685c478eb68761f979f4303.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/df-analytics/apis/evaluate-dfanalytics.asciidoc:315 +// ml/df-analytics/apis/evaluate-dfanalytics.asciidoc:321 [source, python] ---- diff --git a/docs/examples/05284c8ea91769c09c8db47db8a6629a.asciidoc b/docs/examples/05284c8ea91769c09c8db47db8a6629a.asciidoc index 03dfbf18d..93c297023 100644 --- a/docs/examples/05284c8ea91769c09c8db47db8a6629a.asciidoc +++ b/docs/examples/05284c8ea91769c09c8db47db8a6629a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/repositories.asciidoc:51 +// cat/repositories.asciidoc:57 [source, python] ---- diff --git a/docs/examples/05f6049c677a156bdf9b83e71a3b87ed.asciidoc b/docs/examples/05f6049c677a156bdf9b83e71a3b87ed.asciidoc index b5222abb1..54ed3cff4 100644 --- a/docs/examples/05f6049c677a156bdf9b83e71a3b87ed.asciidoc +++ b/docs/examples/05f6049c677a156bdf9b83e71a3b87ed.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/ssl.asciidoc:84 +// rest-api/security/ssl.asciidoc:90 [source, python] ---- diff --git a/docs/examples/0601b5cb5328c9ebff30f4be1b210f93.asciidoc b/docs/examples/0601b5cb5328c9ebff30f4be1b210f93.asciidoc index aac12b70e..004b0ae50 100644 --- a/docs/examples/0601b5cb5328c9ebff30f4be1b210f93.asciidoc +++ b/docs/examples/0601b5cb5328c9ebff30f4be1b210f93.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/get-snapshot-status-api.asciidoc:327 +// snapshot-restore/apis/get-snapshot-status-api.asciidoc:333 [source, python] ---- diff --git a/docs/examples/06454a8e85e2d3479c90390bb955eb39.asciidoc b/docs/examples/06454a8e85e2d3479c90390bb955eb39.asciidoc index cc543366b..3ecb496e2 100644 --- a/docs/examples/06454a8e85e2d3479c90390bb955eb39.asciidoc +++ b/docs/examples/06454a8e85e2d3479c90390bb955eb39.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/get-snapshot-api.asciidoc:583 +// snapshot-restore/apis/get-snapshot-api.asciidoc:589 [source, python] ---- diff --git a/docs/examples/066e0bdcdfa3b8afa5d1e5777f73fccb.asciidoc b/docs/examples/066e0bdcdfa3b8afa5d1e5777f73fccb.asciidoc index 944e2ba95..4de440663 100644 --- a/docs/examples/066e0bdcdfa3b8afa5d1e5777f73fccb.asciidoc +++ b/docs/examples/066e0bdcdfa3b8afa5d1e5777f73fccb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/rollover-index.asciidoc:327 +// indices/rollover-index.asciidoc:333 [source, python] ---- diff --git a/docs/examples/06b5d3d56c4d4e3b61ae42ea26401c40.asciidoc b/docs/examples/06b5d3d56c4d4e3b61ae42ea26401c40.asciidoc index f4be82bcb..5e26ee403 100644 --- a/docs/examples/06b5d3d56c4d4e3b61ae42ea26401c40.asciidoc +++ b/docs/examples/06b5d3d56c4d4e3b61ae42ea26401c40.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/multi-search.asciidoc:10 +// search/multi-search.asciidoc:16 [source, python] ---- diff --git a/docs/examples/0709a38613d2de90d418ce12b36af30e.asciidoc b/docs/examples/0709a38613d2de90d418ce12b36af30e.asciidoc index 719d89389..340a95e60 100644 --- a/docs/examples/0709a38613d2de90d418ce12b36af30e.asciidoc +++ b/docs/examples/0709a38613d2de90d418ce12b36af30e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:109 +// troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:113 [source, python] ---- diff --git a/docs/examples/0721c8adec544d5ecea3fcc410e45feb.asciidoc b/docs/examples/0721c8adec544d5ecea3fcc410e45feb.asciidoc index 84d5c2622..27e6809d9 100644 --- a/docs/examples/0721c8adec544d5ecea3fcc410e45feb.asciidoc +++ b/docs/examples/0721c8adec544d5ecea3fcc410e45feb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/activate-user-profile.asciidoc:98 +// rest-api/security/activate-user-profile.asciidoc:104 [source, python] ---- diff --git a/docs/examples/0722b302b2b3275a988d858044f99d5d.asciidoc b/docs/examples/0722b302b2b3275a988d858044f99d5d.asciidoc new file mode 100644 index 000000000..abbb65005 --- /dev/null +++ b/docs/examples/0722b302b2b3275a988d858044f99d5d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/aggs-tutorial.asciidoc:45 + +[source, python] +---- +resp = client.indices.get_mapping( + index="kibana_sample_data_ecommerce", +) +print(resp) +---- diff --git a/docs/examples/073864d3f52f8f79aafdaa85a88ac46a.asciidoc b/docs/examples/073864d3f52f8f79aafdaa85a88ac46a.asciidoc index bb4580ebf..c89d52af8 100644 --- a/docs/examples/073864d3f52f8f79aafdaa85a88ac46a.asciidoc +++ b/docs/examples/073864d3f52f8f79aafdaa85a88ac46a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/clear-cache.asciidoc:76 +// rest-api/security/clear-cache.asciidoc:82 [source, python] ---- diff --git a/docs/examples/844928da2ff9a1394af5347a5e2e4f78.asciidoc b/docs/examples/074e4602d1ca54412380a40867d078bc.asciidoc similarity index 69% rename from docs/examples/844928da2ff9a1394af5347a5e2e4f78.asciidoc rename to docs/examples/074e4602d1ca54412380a40867d078bc.asciidoc index 9b5028b37..8eab49b0b 100644 --- a/docs/examples/844928da2ff9a1394af5347a5e2e4f78.asciidoc +++ b/docs/examples/074e4602d1ca54412380a40867d078bc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// index-modules/slowlog.asciidoc:133 +// index-modules/slowlog.asciidoc:180 [source, python] ---- @@ -10,7 +10,9 @@ resp = client.indices.put_settings( "index.indexing.slowlog.threshold.index.info": "5s", "index.indexing.slowlog.threshold.index.debug": "2s", "index.indexing.slowlog.threshold.index.trace": "500ms", - "index.indexing.slowlog.source": "1000" + "index.indexing.slowlog.source": "1000", + "index.indexing.slowlog.reformat": True, + "index.indexing.slowlog.include.user": True }, ) print(resp) diff --git a/docs/examples/0755471d7dce4785d2e7ed0c10182ea3.asciidoc b/docs/examples/0755471d7dce4785d2e7ed0c10182ea3.asciidoc index 3e665950b..1b267037f 100644 --- a/docs/examples/0755471d7dce4785d2e7ed0c10182ea3.asciidoc +++ b/docs/examples/0755471d7dce4785d2e7ed0c10182ea3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// transform/apis/get-transform-stats.asciidoc:330 +// transform/apis/get-transform-stats.asciidoc:336 [source, python] ---- diff --git a/docs/examples/07a5fdeb7805cec1d28ba288b28f5ff5.asciidoc b/docs/examples/07a5fdeb7805cec1d28ba288b28f5ff5.asciidoc index 29406d787..99ffe9440 100644 --- a/docs/examples/07a5fdeb7805cec1d28ba288b28f5ff5.asciidoc +++ b/docs/examples/07a5fdeb7805cec1d28ba288b28f5ff5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/stop-job.asciidoc:75 +// rollup/apis/stop-job.asciidoc:81 [source, python] ---- diff --git a/docs/examples/07ba3eaa931f2cf110052e3544db51f8.asciidoc b/docs/examples/07ba3eaa931f2cf110052e3544db51f8.asciidoc index dd35d3388..12cec5a60 100644 --- a/docs/examples/07ba3eaa931f2cf110052e3544db51f8.asciidoc +++ b/docs/examples/07ba3eaa931f2cf110052e3544db51f8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/reindex.asciidoc:878 +// docs/reindex.asciidoc:884 [source, python] ---- diff --git a/docs/examples/07c07f6d497b1a3012aa4320f830e09e.asciidoc b/docs/examples/07c07f6d497b1a3012aa4320f830e09e.asciidoc index 9e8d9d9d0..7792f2e29 100644 --- a/docs/examples/07c07f6d497b1a3012aa4320f830e09e.asciidoc +++ b/docs/examples/07c07f6d497b1a3012aa4320f830e09e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ccr/apis/follow/post-forget-follower.asciidoc:133 +// ccr/apis/follow/post-forget-follower.asciidoc:139 [source, python] ---- diff --git a/docs/examples/082e78c7a2061a7c4a52b494e5ede0e8.asciidoc b/docs/examples/082e78c7a2061a7c4a52b494e5ede0e8.asciidoc new file mode 100644 index 000000000..33ec4dfcc --- /dev/null +++ b/docs/examples/082e78c7a2061a7c4a52b494e5ede0e8.asciidoc @@ -0,0 +1,48 @@ +// This file is autogenerated, DO NOT EDIT +// mapping/types/rank-vectors.asciidoc:64 + +[source, python] +---- +resp = client.indices.create( + index="my-rank-vectors-bit", + mappings={ + "properties": { + "my_vector": { + "type": "rank_vectors", + "element_type": "bit" + } + } + }, +) +print(resp) + +resp1 = client.bulk( + index="my-rank-vectors-bit", + refresh=True, + operations=[ + { + "index": { + "_id": "1" + } + }, + { + "my_vector": [ + 127, + -127, + 0, + 1, + 42 + ] + }, + { + "index": { + "_id": "2" + } + }, + { + "my_vector": "8100012a7f" + } + ], +) +print(resp1) +---- diff --git a/docs/examples/083b92e8ea264e49bf9fd40fc6a3094b.asciidoc b/docs/examples/083b92e8ea264e49bf9fd40fc6a3094b.asciidoc index 1407a4b6c..6c94b6da2 100644 --- a/docs/examples/083b92e8ea264e49bf9fd40fc6a3094b.asciidoc +++ b/docs/examples/083b92e8ea264e49bf9fd40fc6a3094b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-elasticsearch.asciidoc:223 +// inference/service-elasticsearch.asciidoc:264 [source, python] ---- diff --git a/docs/examples/0881397074d261ccc2db514daf116c31.asciidoc b/docs/examples/0881397074d261ccc2db514daf116c31.asciidoc index 269216db0..c4ffb3c12 100644 --- a/docs/examples/0881397074d261ccc2db514daf116c31.asciidoc +++ b/docs/examples/0881397074d261ccc2db514daf116c31.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-api-keys.asciidoc:122 +// rest-api/security/get-api-keys.asciidoc:128 [source, python] ---- diff --git a/docs/examples/08c9af9dd519c011deedd406f3061836.asciidoc b/docs/examples/08c9af9dd519c011deedd406f3061836.asciidoc index 43eafcf42..9c456b203 100644 --- a/docs/examples/08c9af9dd519c011deedd406f3061836.asciidoc +++ b/docs/examples/08c9af9dd519c011deedd406f3061836.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/preview-datafeed.asciidoc:151 +// ml/anomaly-detection/apis/preview-datafeed.asciidoc:157 [source, python] ---- diff --git a/docs/examples/08e08feb514b24006e13f258d617d873.asciidoc b/docs/examples/08e08feb514b24006e13f258d617d873.asciidoc index e72e593f0..0659ee723 100644 --- a/docs/examples/08e08feb514b24006e13f258d617d873.asciidoc +++ b/docs/examples/08e08feb514b24006e13f258d617d873.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// scripting/using.asciidoc:229 +// scripting/using.asciidoc:234 [source, python] ---- diff --git a/docs/examples/0957bbd535f58c97b12ffba90813d64c.asciidoc b/docs/examples/0957bbd535f58c97b12ffba90813d64c.asciidoc index b508719c5..adf5b2f2e 100644 --- a/docs/examples/0957bbd535f58c97b12ffba90813d64c.asciidoc +++ b/docs/examples/0957bbd535f58c97b12ffba90813d64c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/analyze.asciidoc:361 +// indices/analyze.asciidoc:367 [source, python] ---- diff --git a/docs/examples/095e3f21941a9cc75f398389a075152d.asciidoc b/docs/examples/095e3f21941a9cc75f398389a075152d.asciidoc index 00532f55c..af9f1554d 100644 --- a/docs/examples/095e3f21941a9cc75f398389a075152d.asciidoc +++ b/docs/examples/095e3f21941a9cc75f398389a075152d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/infer-trained-model.asciidoc:1144 +// ml/trained-models/apis/infer-trained-model.asciidoc:1150 [source, python] ---- diff --git a/docs/examples/09769561f082b50558fb7d8707719963.asciidoc b/docs/examples/09769561f082b50558fb7d8707719963.asciidoc index 9aa9cba6e..e931567b9 100644 --- a/docs/examples/09769561f082b50558fb7d8707719963.asciidoc +++ b/docs/examples/09769561f082b50558fb7d8707719963.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/nodes-stats.asciidoc:2582 +// cluster/nodes-stats.asciidoc:2588 [source, python] ---- diff --git a/docs/examples/099006ab11b52ea99693401dceee8bad.asciidoc b/docs/examples/099006ab11b52ea99693401dceee8bad.asciidoc index 75518ba6e..06f1008ce 100644 --- a/docs/examples/099006ab11b52ea99693401dceee8bad.asciidoc +++ b/docs/examples/099006ab11b52ea99693401dceee8bad.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// scripting/using.asciidoc:215 +// scripting/using.asciidoc:220 [source, python] ---- diff --git a/docs/examples/09e6e06ba562f4b9bac59455e9151a80.asciidoc b/docs/examples/09e6e06ba562f4b9bac59455e9151a80.asciidoc index 64f28519d..20e4b07b4 100644 --- a/docs/examples/09e6e06ba562f4b9bac59455e9151a80.asciidoc +++ b/docs/examples/09e6e06ba562f4b9bac59455e9151a80.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/df-analytics/apis/evaluate-dfanalytics.asciidoc:517 +// ml/df-analytics/apis/evaluate-dfanalytics.asciidoc:523 [source, python] ---- diff --git a/docs/examples/0a46cc8fe93e372909660a63dc52ae3b.asciidoc b/docs/examples/0a46cc8fe93e372909660a63dc52ae3b.asciidoc index c08ace7f6..bbe7b7ca8 100644 --- a/docs/examples/0a46cc8fe93e372909660a63dc52ae3b.asciidoc +++ b/docs/examples/0a46cc8fe93e372909660a63dc52ae3b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/rollover-index.asciidoc:309 +// indices/rollover-index.asciidoc:315 [source, python] ---- diff --git a/docs/examples/0a650401134f07e40216f0d0d1a66a32.asciidoc b/docs/examples/0a650401134f07e40216f0d0d1a66a32.asciidoc index c9a221494..7dfc1b717 100644 --- a/docs/examples/0a650401134f07e40216f0d0d1a66a32.asciidoc +++ b/docs/examples/0a650401134f07e40216f0d0d1a66a32.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/allocation.asciidoc:120 +// cat/allocation.asciidoc:126 [source, python] ---- diff --git a/docs/examples/0a758d9dec74d9e942cf41a06499234f.asciidoc b/docs/examples/0a758d9dec74d9e942cf41a06499234f.asciidoc index b44d34496..615755b86 100644 --- a/docs/examples/0a758d9dec74d9e942cf41a06499234f.asciidoc +++ b/docs/examples/0a758d9dec74d9e942cf41a06499234f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// scripting/using.asciidoc:282 +// scripting/using.asciidoc:287 [source, python] ---- diff --git a/docs/examples/0ad8edd10542ec2c4d5d8700d7e2ba97.asciidoc b/docs/examples/0ad8edd10542ec2c4d5d8700d7e2ba97.asciidoc index 0941c559f..842ce3b94 100644 --- a/docs/examples/0ad8edd10542ec2c4d5d8700d7e2ba97.asciidoc +++ b/docs/examples/0ad8edd10542ec2c4d5d8700d7e2ba97.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-amazon-bedrock.asciidoc:156 +// inference/service-amazon-bedrock.asciidoc:162 [source, python] ---- diff --git a/docs/examples/0ade87c8cb0e3c188d2e3dce279d5cc2.asciidoc b/docs/examples/0ade87c8cb0e3c188d2e3dce279d5cc2.asciidoc index d92db47de..058dd2421 100644 --- a/docs/examples/0ade87c8cb0e3c188d2e3dce279d5cc2.asciidoc +++ b/docs/examples/0ade87c8cb0e3c188d2e3dce279d5cc2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-filtering-api.asciidoc:116 +// connector/apis/update-connector-filtering-api.asciidoc:122 [source, python] ---- diff --git a/docs/examples/0aff04881be21eea45375ec4f4f50e66.asciidoc b/docs/examples/0aff04881be21eea45375ec4f4f50e66.asciidoc index c1a67eca1..ce55cdeaf 100644 --- a/docs/examples/0aff04881be21eea45375ec4f4f50e66.asciidoc +++ b/docs/examples/0aff04881be21eea45375ec4f4f50e66.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/invalidate-api-keys.asciidoc:83 +// rest-api/security/invalidate-api-keys.asciidoc:89 [source, python] ---- diff --git a/docs/examples/0b615ff4ef5a8847ee8109b2fd11619a.asciidoc b/docs/examples/0b615ff4ef5a8847ee8109b2fd11619a.asciidoc index 573e75fe5..41910d39b 100644 --- a/docs/examples/0b615ff4ef5a8847ee8109b2fd11619a.asciidoc +++ b/docs/examples/0b615ff4ef5a8847ee8109b2fd11619a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// scripting/using.asciidoc:238 +// scripting/using.asciidoc:243 [source, python] ---- diff --git a/docs/examples/0bc6155e0c88062a4d8490da49db3aa8.asciidoc b/docs/examples/0bc6155e0c88062a4d8490da49db3aa8.asciidoc new file mode 100644 index 000000000..2479ec8bb --- /dev/null +++ b/docs/examples/0bc6155e0c88062a4d8490da49db3aa8.asciidoc @@ -0,0 +1,57 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/retrievers-examples.asciidoc:562 + +[source, python] +---- +resp = client.search( + index="retrievers_example_nested", + retriever={ + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "nested": { + "path": "nested_field", + "inner_hits": { + "name": "nested_vector", + "_source": False, + "fields": [ + "nested_field.paragraph_id" + ] + }, + "query": { + "knn": { + "field": "nested_field.nested_vector", + "query_vector": [ + 1, + 0, + 0.5 + ], + "k": 10 + } + } + } + } + } + }, + { + "standard": { + "query": { + "term": { + "topic": "ai" + } + } + } + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + }, + source=[ + "topic" + ], +) +print(resp) +---- diff --git a/docs/examples/0bee07a581c5776e068f6f4efad5a399.asciidoc b/docs/examples/0bee07a581c5776e068f6f4efad5a399.asciidoc index 3e564fc3a..3cc5e8efb 100644 --- a/docs/examples/0bee07a581c5776e068f6f4efad5a399.asciidoc +++ b/docs/examples/0bee07a581c5776e068f6f4efad5a399.asciidoc @@ -1,10 +1,15 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-across-clusters.asciidoc:197 +// esql/esql-across-clusters.asciidoc:192 [source, python] ---- -resp = client.esql.async_query( - format="json", +resp = client.perform_request( + "POST", + "/_query/async", + params={ + "format": "json" + }, + headers={"Content-Type": "application/json"}, body={ "query": "\n FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ", "include_ccs_metadata": True diff --git a/docs/examples/0c2d9ac7e3f28d4d802e21cbbbcfeb34.asciidoc b/docs/examples/0c2d9ac7e3f28d4d802e21cbbbcfeb34.asciidoc index 0a3e4fe55..1ccb32524 100644 --- a/docs/examples/0c2d9ac7e3f28d4d802e21cbbbcfeb34.asciidoc +++ b/docs/examples/0c2d9ac7e3f28d4d802e21cbbbcfeb34.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/recovery.asciidoc:112 +// cat/recovery.asciidoc:118 [source, python] ---- diff --git a/docs/examples/0c464965126cc09e6812716a145991d4.asciidoc b/docs/examples/0c464965126cc09e6812716a145991d4.asciidoc index 0f5465ce3..e3c34eb90 100644 --- a/docs/examples/0c464965126cc09e6812716a145991d4.asciidoc +++ b/docs/examples/0c464965126cc09e6812716a145991d4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/nodes-info.asciidoc:301 +// cluster/nodes-info.asciidoc:306 [source, python] ---- diff --git a/docs/examples/160986f49758f4e8345d183a842f6351.asciidoc b/docs/examples/0c52af573c9401a2a687e86a4beb182b.asciidoc similarity index 78% rename from docs/examples/160986f49758f4e8345d183a842f6351.asciidoc rename to docs/examples/0c52af573c9401a2a687e86a4beb182b.asciidoc index 7b460b136..8a7a866b5 100644 --- a/docs/examples/160986f49758f4e8345d183a842f6351.asciidoc +++ b/docs/examples/0c52af573c9401a2a687e86a4beb182b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/processors/attachment.asciidoc:165 +// ingest/processors/attachment.asciidoc:214 [source, python] ---- @@ -10,7 +10,7 @@ resp = client.ingest.put_pipeline( { "attachment": { "field": "data", - "remove_binary": False + "remove_binary": True } } ], diff --git a/docs/examples/0c6f9c9da75293fae69659ac1d6329de.asciidoc b/docs/examples/0c6f9c9da75293fae69659ac1d6329de.asciidoc index 53ac89709..739b8288c 100644 --- a/docs/examples/0c6f9c9da75293fae69659ac1d6329de.asciidoc +++ b/docs/examples/0c6f9c9da75293fae69659ac1d6329de.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/invalidate-tokens.asciidoc:175 +// rest-api/security/invalidate-tokens.asciidoc:181 [source, python] ---- diff --git a/docs/examples/0c6fc67c2dd1c1771cd866ce471d74e1.asciidoc b/docs/examples/0c6fc67c2dd1c1771cd866ce471d74e1.asciidoc index 3852a2102..a8056bc77 100644 --- a/docs/examples/0c6fc67c2dd1c1771cd866ce471d74e1.asciidoc +++ b/docs/examples/0c6fc67c2dd1c1771cd866ce471d74e1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/create-role-mappings.asciidoc:206 +// rest-api/security/create-role-mappings.asciidoc:212 [source, python] ---- diff --git a/docs/examples/0c892d328b73d38396aaef6d9cbcd36b.asciidoc b/docs/examples/0c892d328b73d38396aaef6d9cbcd36b.asciidoc index fc37a12ca..157a9b740 100644 --- a/docs/examples/0c892d328b73d38396aaef6d9cbcd36b.asciidoc +++ b/docs/examples/0c892d328b73d38396aaef6d9cbcd36b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/delete.asciidoc:82 +// docs/delete.asciidoc:88 [source, python] ---- diff --git a/docs/examples/0d30077cd34e93377a3a86f2ebd69415.asciidoc b/docs/examples/0d30077cd34e93377a3a86f2ebd69415.asciidoc index 1d9eb5f3c..abca0eadf 100644 --- a/docs/examples/0d30077cd34e93377a3a86f2ebd69415.asciidoc +++ b/docs/examples/0d30077cd34e93377a3a86f2ebd69415.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/create-connector-api.asciidoc:112 +// connector/apis/create-connector-api.asciidoc:118 [source, python] ---- diff --git a/docs/examples/0d689ac6e78be5d438f9b5d441be2b44.asciidoc b/docs/examples/0d689ac6e78be5d438f9b5d441be2b44.asciidoc new file mode 100644 index 000000000..816c0d5bb --- /dev/null +++ b/docs/examples/0d689ac6e78be5d438f9b5d441be2b44.asciidoc @@ -0,0 +1,60 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/retrievers-examples.asciidoc:941 + +[source, python] +---- +resp = client.search( + index="retrievers_example", + retriever={ + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "term": { + "topic": "elastic" + } + } + } + }, + { + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "query_string": { + "query": "(information retrieval) OR (artificial intelligence)", + "default_field": "text" + } + } + } + }, + { + "knn": { + "field": "vector", + "query_vector": [ + 0.23, + 0.67, + 0.89 + ], + "k": 3, + "num_candidates": 5 + } + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + }, + source=False, + size=1, + explain=True, +) +print(resp) +---- diff --git a/docs/examples/0d8063b484a18f8672fb5ed8712c5c97.asciidoc b/docs/examples/0d8063b484a18f8672fb5ed8712c5c97.asciidoc index a0fcce54b..882dc89ab 100644 --- a/docs/examples/0d8063b484a18f8672fb5ed8712c5c97.asciidoc +++ b/docs/examples/0d8063b484a18f8672fb5ed8712c5c97.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-index-template.asciidoc:299 +// indices/put-index-template.asciidoc:305 [source, python] ---- diff --git a/docs/examples/0d94d76b7f00d0459d1f8c962c144dcd.asciidoc b/docs/examples/0d94d76b7f00d0459d1f8c962c144dcd.asciidoc index 916b6c7dd..e5bbbd493 100644 --- a/docs/examples/0d94d76b7f00d0459d1f8c962c144dcd.asciidoc +++ b/docs/examples/0d94d76b7f00d0459d1f8c962c144dcd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/create-role-mappings.asciidoc:308 +// rest-api/security/create-role-mappings.asciidoc:314 [source, python] ---- diff --git a/docs/examples/0da747e9d98bae157d3520ff1b489ad4.asciidoc b/docs/examples/0da747e9d98bae157d3520ff1b489ad4.asciidoc index 259041fa3..95b8034eb 100644 --- a/docs/examples/0da747e9d98bae157d3520ff1b489ad4.asciidoc +++ b/docs/examples/0da747e9d98bae157d3520ff1b489ad4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/repository-s3.asciidoc:41 +// snapshot-restore/repository-s3.asciidoc:45 [source, python] ---- diff --git a/docs/examples/0dd30ffe2f900dde86cc9bb601d5e68e.asciidoc b/docs/examples/0dd30ffe2f900dde86cc9bb601d5e68e.asciidoc index bffcc9942..73b37ee80 100644 --- a/docs/examples/0dd30ffe2f900dde86cc9bb601d5e68e.asciidoc +++ b/docs/examples/0dd30ffe2f900dde86cc9bb601d5e68e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/nodes.asciidoc:378 +// cat/nodes.asciidoc:387 [source, python] ---- diff --git a/docs/examples/0ddf705317d9c5095b4a1419a2e3bace.asciidoc b/docs/examples/0ddf705317d9c5095b4a1419a2e3bace.asciidoc index 24bf20afd..9b024dea4 100644 --- a/docs/examples/0ddf705317d9c5095b4a1419a2e3bace.asciidoc +++ b/docs/examples/0ddf705317d9c5095b4a1419a2e3bace.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-app-privileges.asciidoc:95 +// rest-api/security/get-app-privileges.asciidoc:101 [source, python] ---- diff --git a/docs/examples/0dfa9733c94bc43c6f14c7b6984c98fb.asciidoc b/docs/examples/0dfa9733c94bc43c6f14c7b6984c98fb.asciidoc index 41ae4d4dd..1501cad9e 100644 --- a/docs/examples/0dfa9733c94bc43c6f14c7b6984c98fb.asciidoc +++ b/docs/examples/0dfa9733c94bc43c6f14c7b6984c98fb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/component-templates.asciidoc:107 +// cat/component-templates.asciidoc:113 [source, python] ---- diff --git a/docs/examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc b/docs/examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc index fd7eec55f..285b99308 100644 --- a/docs/examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc +++ b/docs/examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc @@ -1,10 +1,12 @@ // This file is autogenerated, DO NOT EDIT -// search-application/apis/search-application-render-query.asciidoc:114 +// search-application/apis/search-application-render-query.asciidoc:119 [source, python] ---- -resp = client.search_application.render_query( - name="my-app", +resp = client.perform_request( + "POST", + "/_application/search_application/my-app/_render_query", + headers={"Content-Type": "application/json"}, body={ "params": { "query_string": "my first query", diff --git a/docs/examples/0e31b8ad176b31028becf9500989bcbd.asciidoc b/docs/examples/0e31b8ad176b31028becf9500989bcbd.asciidoc index 5965ade94..fed70ff60 100644 --- a/docs/examples/0e31b8ad176b31028becf9500989bcbd.asciidoc +++ b/docs/examples/0e31b8ad176b31028becf9500989bcbd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-watsonx-ai.asciidoc:96 +// inference/service-watsonx-ai.asciidoc:102 [source, python] ---- diff --git a/docs/examples/0e5d25c7bb738c42d471020d678e2966.asciidoc b/docs/examples/0e5d25c7bb738c42d471020d678e2966.asciidoc index 3baa81cf2..15393e78e 100644 --- a/docs/examples/0e5d25c7bb738c42d471020d678e2966.asciidoc +++ b/docs/examples/0e5d25c7bb738c42d471020d678e2966.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/start-trained-model-deployment.asciidoc:200 +// ml/trained-models/apis/start-trained-model-deployment.asciidoc:206 [source, python] ---- diff --git a/docs/examples/0e83f140237d75469a428ff403564bb5.asciidoc b/docs/examples/0e83f140237d75469a428ff403564bb5.asciidoc deleted file mode 100644 index 32d3f186a..000000000 --- a/docs/examples/0e83f140237d75469a428ff403564bb5.asciidoc +++ /dev/null @@ -1,15 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// modules/cluster/disk_allocator.asciidoc:162 - -[source, python] ----- -resp = client.cluster.put_settings( - persistent={ - "cluster.routing.allocation.disk.watermark.low": "100gb", - "cluster.routing.allocation.disk.watermark.high": "50gb", - "cluster.routing.allocation.disk.watermark.flood_stage": "10gb", - "cluster.info.update.interval": "1m" - }, -) -print(resp) ----- diff --git a/docs/examples/0ea146b178561bc8b9002bed8a35641f.asciidoc b/docs/examples/0ea146b178561bc8b9002bed8a35641f.asciidoc index ffc82f814..c038b9ec9 100644 --- a/docs/examples/0ea146b178561bc8b9002bed8a35641f.asciidoc +++ b/docs/examples/0ea146b178561bc8b9002bed8a35641f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// autoscaling/apis/get-autoscaling-policy.asciidoc:68 +// autoscaling/apis/get-autoscaling-policy.asciidoc:75 [source, python] ---- diff --git a/docs/examples/0ea2167ce7c87d311b20c4f8c698a8d0.asciidoc b/docs/examples/0ea2167ce7c87d311b20c4f8c698a8d0.asciidoc index bbccc1dc4..b62b18e0c 100644 --- a/docs/examples/0ea2167ce7c87d311b20c4f8c698a8d0.asciidoc +++ b/docs/examples/0ea2167ce7c87d311b20c4f8c698a8d0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/point-in-time-api.asciidoc:190 +// search/point-in-time-api.asciidoc:196 [source, python] ---- diff --git a/docs/examples/0eae571e9e1c40a40cb4b1c9530a8987.asciidoc b/docs/examples/0eae571e9e1c40a40cb4b1c9530a8987.asciidoc index 3741e5dca..bb8bed4b5 100644 --- a/docs/examples/0eae571e9e1c40a40cb4b1c9530a8987.asciidoc +++ b/docs/examples/0eae571e9e1c40a40cb4b1c9530a8987.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ilm/apis/migrate-to-data-tiers.asciidoc:154 +// ilm/apis/migrate-to-data-tiers.asciidoc:160 [source, python] ---- diff --git a/docs/examples/0f4583c56cfe5bd59eeb35bfba02957c.asciidoc b/docs/examples/0f4583c56cfe5bd59eeb35bfba02957c.asciidoc index b69dcbe47..aaeada4ee 100644 --- a/docs/examples/0f4583c56cfe5bd59eeb35bfba02957c.asciidoc +++ b/docs/examples/0f4583c56cfe5bd59eeb35bfba02957c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/rank-eval.asciidoc:312 +// search/rank-eval.asciidoc:318 [source, python] ---- diff --git a/docs/examples/0f7aa40ad26d59a9268630b980a3d594.asciidoc b/docs/examples/0f7aa40ad26d59a9268630b980a3d594.asciidoc index 1efc4dc25..d76d2022a 100644 --- a/docs/examples/0f7aa40ad26d59a9268630b980a3d594.asciidoc +++ b/docs/examples/0f7aa40ad26d59a9268630b980a3d594.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/simulate-template.asciidoc:55 +// indices/simulate-template.asciidoc:61 [source, python] ---- diff --git a/docs/examples/0fa220ee3fb267020382f74aa70eb1e9.asciidoc b/docs/examples/0fa220ee3fb267020382f74aa70eb1e9.asciidoc index c9dab1646..4f9bbfa21 100644 --- a/docs/examples/0fa220ee3fb267020382f74aa70eb1e9.asciidoc +++ b/docs/examples/0fa220ee3fb267020382f74aa70eb1e9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/state.asciidoc:151 +// cluster/state.asciidoc:157 [source, python] ---- diff --git a/docs/examples/0fc4b589df5388da784c6d981e769e31.asciidoc b/docs/examples/0fc4b589df5388da784c6d981e769e31.asciidoc index 938f9a3ec..e499f3289 100644 --- a/docs/examples/0fc4b589df5388da784c6d981e769e31.asciidoc +++ b/docs/examples/0fc4b589df5388da784c6d981e769e31.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-index-template-v1.asciidoc:149 +// indices/put-index-template-v1.asciidoc:155 [source, python] ---- diff --git a/docs/examples/0fe74ccd098c742619805a7c0bd0fae6.asciidoc b/docs/examples/0fe74ccd098c742619805a7c0bd0fae6.asciidoc index 1d1721228..ff294eab9 100644 --- a/docs/examples/0fe74ccd098c742619805a7c0bd0fae6.asciidoc +++ b/docs/examples/0fe74ccd098c742619805a7c0bd0fae6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// transform/apis/schedule-now-transform.asciidoc:52 +// transform/apis/schedule-now-transform.asciidoc:58 [source, python] ---- diff --git a/docs/examples/10796a4efa3c2a5e9e50b6bdeb08bbb9.asciidoc b/docs/examples/10796a4efa3c2a5e9e50b6bdeb08bbb9.asciidoc index 0b6368d46..c77e550d8 100644 --- a/docs/examples/10796a4efa3c2a5e9e50b6bdeb08bbb9.asciidoc +++ b/docs/examples/10796a4efa3c2a5e9e50b6bdeb08bbb9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/update-desired-nodes.asciidoc:74 +// cluster/update-desired-nodes.asciidoc:80 [source, python] ---- diff --git a/docs/examples/109db8ff7b715aca98de8ef1ab7e44ab.asciidoc b/docs/examples/109db8ff7b715aca98de8ef1ab7e44ab.asciidoc index 0c41f9430..433cdd913 100644 --- a/docs/examples/109db8ff7b715aca98de8ef1ab7e44ab.asciidoc +++ b/docs/examples/109db8ff7b715aca98de8ef1ab7e44ab.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ccr/apis/follow/post-resume-follow.asciidoc:37 +// ccr/apis/follow/post-resume-follow.asciidoc:43 [source, python] ---- diff --git a/docs/examples/10b924bf6298aa6157ed00ce12f8edc1.asciidoc b/docs/examples/10b924bf6298aa6157ed00ce12f8edc1.asciidoc index 0d05f28bd..236094851 100644 --- a/docs/examples/10b924bf6298aa6157ed00ce12f8edc1.asciidoc +++ b/docs/examples/10b924bf6298aa6157ed00ce12f8edc1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/execute-watch.asciidoc:363 +// rest-api/watcher/execute-watch.asciidoc:369 [source, python] ---- diff --git a/docs/examples/10d9da8a3b7061479be908c8c5c76cfb.asciidoc b/docs/examples/10d9da8a3b7061479be908c8c5c76cfb.asciidoc index c1ee092f8..f0d3a5e77 100644 --- a/docs/examples/10d9da8a3b7061479be908c8c5c76cfb.asciidoc +++ b/docs/examples/10d9da8a3b7061479be908c8c5c76cfb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-api-keys.asciidoc:217 +// rest-api/security/get-api-keys.asciidoc:223 [source, python] ---- diff --git a/docs/examples/10f7a2c0a952ba3bc3d20b7d5f310f41.asciidoc b/docs/examples/10f7a2c0a952ba3bc3d20b7d5f310f41.asciidoc index 076157426..c1bfee71f 100644 --- a/docs/examples/10f7a2c0a952ba3bc3d20b7d5f310f41.asciidoc +++ b/docs/examples/10f7a2c0a952ba3bc3d20b7d5f310f41.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search-application/apis/list-search-applications.asciidoc:94 +// search-application/apis/list-search-applications.asciidoc:99 [source, python] ---- diff --git a/docs/examples/114d470e752efa9672ca68d7290fada8.asciidoc b/docs/examples/114d470e752efa9672ca68d7290fada8.asciidoc index 5c3bcaac2..c37479ac3 100644 --- a/docs/examples/114d470e752efa9672ca68d7290fada8.asciidoc +++ b/docs/examples/114d470e752efa9672ca68d7290fada8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/add-alias.asciidoc:10 +// indices/add-alias.asciidoc:16 [source, python] ---- diff --git a/docs/examples/115529722ba30b0b0d51a7ff87e59198.asciidoc b/docs/examples/115529722ba30b0b0d51a7ff87e59198.asciidoc index f80eab225..b0e58c04f 100644 --- a/docs/examples/115529722ba30b0b0d51a7ff87e59198.asciidoc +++ b/docs/examples/115529722ba30b0b0d51a7ff87e59198.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-roles.asciidoc:58 +// rest-api/security/get-roles.asciidoc:64 [source, python] ---- diff --git a/docs/examples/11c395d1649733bcab853fe31ec393b2.asciidoc b/docs/examples/11c395d1649733bcab853fe31ec393b2.asciidoc index 0d33f1a71..e1b7fe6cf 100644 --- a/docs/examples/11c395d1649733bcab853fe31ec393b2.asciidoc +++ b/docs/examples/11c395d1649733bcab853fe31ec393b2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// licensing/get-license.asciidoc:56 +// licensing/get-license.asciidoc:62 [source, python] ---- diff --git a/docs/examples/11e772ff5dbb73408ae30a1a367a0d9b.asciidoc b/docs/examples/11e772ff5dbb73408ae30a1a367a0d9b.asciidoc index a3009b49d..3bea63ae5 100644 --- a/docs/examples/11e772ff5dbb73408ae30a1a367a0d9b.asciidoc +++ b/docs/examples/11e772ff5dbb73408ae30a1a367a0d9b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/apis/delete-pipeline.asciidoc:91 +// ingest/apis/delete-pipeline.asciidoc:97 [source, python] ---- diff --git a/docs/examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc b/docs/examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc new file mode 100644 index 000000000..5fb759d64 --- /dev/null +++ b/docs/examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// inference/chat-completion-inference.asciidoc:301 + +[source, python] +---- +resp = client.perform_request( + "POST", + "/_inference/chat_completion/openai-completion/_stream", + headers={"Content-Type": "application/json"}, + body={ + "model": "gpt-4o", + "messages": [ + { + "role": "user", + "content": "What is Elastic?" + } + ] + }, +) +print(resp) +---- diff --git a/docs/examples/12433d2b637d002e8d5c9a1adce69d3b.asciidoc b/docs/examples/12433d2b637d002e8d5c9a1adce69d3b.asciidoc index 210b477f5..3d588dde2 100644 --- a/docs/examples/12433d2b637d002e8d5c9a1adce69d3b.asciidoc +++ b/docs/examples/12433d2b637d002e8d5c9a1adce69d3b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:100 +// indices/put-mapping.asciidoc:106 [source, python] ---- diff --git a/docs/examples/1259a9c151730e42de35bb2d1ba700c6.asciidoc b/docs/examples/1259a9c151730e42de35bb2d1ba700c6.asciidoc index b9f36704e..71588e2ce 100644 --- a/docs/examples/1259a9c151730e42de35bb2d1ba700c6.asciidoc +++ b/docs/examples/1259a9c151730e42de35bb2d1ba700c6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-mapping.asciidoc:70 +// indices/get-mapping.asciidoc:76 [source, python] ---- diff --git a/docs/examples/128283698535116931dca9d16a16dca2.asciidoc b/docs/examples/128283698535116931dca9d16a16dca2.asciidoc index 70028cd42..905f74f10 100644 --- a/docs/examples/128283698535116931dca9d16a16dca2.asciidoc +++ b/docs/examples/128283698535116931dca9d16a16dca2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-roles.asciidoc:93 +// rest-api/security/get-roles.asciidoc:99 [source, python] ---- diff --git a/docs/examples/1295f51b9e5d4ba9987b02478146b50b.asciidoc b/docs/examples/1295f51b9e5d4ba9987b02478146b50b.asciidoc index 69cab898b..7c4537814 100644 --- a/docs/examples/1295f51b9e5d4ba9987b02478146b50b.asciidoc +++ b/docs/examples/1295f51b9e5d4ba9987b02478146b50b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/high-jvm-memory-pressure.asciidoc:72 +// troubleshooting/common-issues/high-jvm-memory-pressure.asciidoc:76 [source, python] ---- diff --git a/docs/examples/12d5ff4b8d3d832b32a7e7e2a520d0bb.asciidoc b/docs/examples/12d5ff4b8d3d832b32a7e7e2a520d0bb.asciidoc index 8b8fe870b..dd56d383e 100644 --- a/docs/examples/12d5ff4b8d3d832b32a7e7e2a520d0bb.asciidoc +++ b/docs/examples/12d5ff4b8d3d832b32a7e7e2a520d0bb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/get-calendar-event.asciidoc:156 +// ml/anomaly-detection/apis/get-calendar-event.asciidoc:162 [source, python] ---- diff --git a/docs/examples/135819da3a4bde684357c57a49ad8e85.asciidoc b/docs/examples/135819da3a4bde684357c57a49ad8e85.asciidoc index 90a789649..dae5a72e5 100644 --- a/docs/examples/135819da3a4bde684357c57a49ad8e85.asciidoc +++ b/docs/examples/135819da3a4bde684357c57a49ad8e85.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// migration/apis/deprecation.asciidoc:61 +// migration/apis/deprecation.asciidoc:67 [source, python] ---- diff --git a/docs/examples/136ae86b8d497dda799cf1cb583df929.asciidoc b/docs/examples/136ae86b8d497dda799cf1cb583df929.asciidoc index fbcdd4845..b49a8add3 100644 --- a/docs/examples/136ae86b8d497dda799cf1cb583df929.asciidoc +++ b/docs/examples/136ae86b8d497dda799cf1cb583df929.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-field-mapping.asciidoc:74 +// indices/get-field-mapping.asciidoc:80 [source, python] ---- diff --git a/docs/examples/137709a0a0dc38d6094291c9fc75b804.asciidoc b/docs/examples/137709a0a0dc38d6094291c9fc75b804.asciidoc index 3219eef08..9efbb0b9b 100644 --- a/docs/examples/137709a0a0dc38d6094291c9fc75b804.asciidoc +++ b/docs/examples/137709a0a0dc38d6094291c9fc75b804.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/get.asciidoc:342 +// docs/get.asciidoc:348 [source, python] ---- diff --git a/docs/examples/138f7703c47ddf63633fdf5ca9bc7fa4.asciidoc b/docs/examples/138f7703c47ddf63633fdf5ca9bc7fa4.asciidoc index 7790b2fca..7c7526192 100644 --- a/docs/examples/138f7703c47ddf63633fdf5ca9bc7fa4.asciidoc +++ b/docs/examples/138f7703c47ddf63633fdf5ca9bc7fa4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/get.asciidoc:385 +// docs/get.asciidoc:391 [source, python] ---- diff --git a/docs/examples/13d90ba227131aefbf4fcfd5992e662a.asciidoc b/docs/examples/13d90ba227131aefbf4fcfd5992e662a.asciidoc index 4a7ac32aa..fc457fe4e 100644 --- a/docs/examples/13d90ba227131aefbf4fcfd5992e662a.asciidoc +++ b/docs/examples/13d90ba227131aefbf4fcfd5992e662a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/bool-query.asciidoc:156 +// query-dsl/bool-query.asciidoc:159 [source, python] ---- diff --git a/docs/examples/13df08eefc9ba98e311793bbca74133b.asciidoc b/docs/examples/13df08eefc9ba98e311793bbca74133b.asciidoc index 4a3854b00..352868b73 100644 --- a/docs/examples/13df08eefc9ba98e311793bbca74133b.asciidoc +++ b/docs/examples/13df08eefc9ba98e311793bbca74133b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-user-profile.asciidoc:109 +// rest-api/security/get-user-profile.asciidoc:115 [source, python] ---- diff --git a/docs/examples/13ebcb01ebf1b5d2b5c52739db47e30c.asciidoc b/docs/examples/13ebcb01ebf1b5d2b5c52739db47e30c.asciidoc index 29086570d..c2870a215 100644 --- a/docs/examples/13ebcb01ebf1b5d2b5c52739db47e30c.asciidoc +++ b/docs/examples/13ebcb01ebf1b5d2b5c52739db47e30c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/recovery.asciidoc:179 +// indices/recovery.asciidoc:185 [source, python] ---- diff --git a/docs/examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc b/docs/examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc index 67a855b1c..442cbc631 100644 --- a/docs/examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc +++ b/docs/examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/post-inference.asciidoc:196 +// inference/post-inference.asciidoc:202 [source, python] ---- diff --git a/docs/examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc b/docs/examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc new file mode 100644 index 000000000..11d594a0f --- /dev/null +++ b/docs/examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// inference/update-inference.asciidoc:83 + +[source, python] +---- +resp = client.inference.put( + task_type="my-inference-endpoint", + inference_id="_update", + inference_config={ + "service_settings": { + "api_key": "" + } + }, +) +print(resp) +---- diff --git a/docs/examples/1420a22aa817c7a996baaed0ad366d6f.asciidoc b/docs/examples/1420a22aa817c7a996baaed0ad366d6f.asciidoc deleted file mode 100644 index e6437237a..000000000 --- a/docs/examples/1420a22aa817c7a996baaed0ad366d6f.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// query-dsl/semantic-query.asciidoc:143 - -[source, python] ----- -resp = client.search( - index="test-index", - query={ - "nested": { - "path": "inference_field.inference.chunks", - "query": { - "sparse_vector": { - "field": "inference_field.inference.chunks.embeddings", - "inference_id": "my-inference-id", - "query": "mountain lake" - } - } - } - }, -) -print(resp) ----- diff --git a/docs/examples/146bd22fd0e7be2345619e8f11d3a4cb.asciidoc b/docs/examples/146bd22fd0e7be2345619e8f11d3a4cb.asciidoc index 4e1e563b6..9ad89f39c 100644 --- a/docs/examples/146bd22fd0e7be2345619e8f11d3a4cb.asciidoc +++ b/docs/examples/146bd22fd0e7be2345619e8f11d3a4cb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/hotspotting.asciidoc:249 +// troubleshooting/common-issues/hotspotting.asciidoc:253 [source, python] ---- diff --git a/docs/examples/147d341cb212dcc015c129a9c5dcf9c9.asciidoc b/docs/examples/147d341cb212dcc015c129a9c5dcf9c9.asciidoc index b4f01a0ca..832d1d5d8 100644 --- a/docs/examples/147d341cb212dcc015c129a9c5dcf9c9.asciidoc +++ b/docs/examples/147d341cb212dcc015c129a9c5dcf9c9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/put-trained-models-aliases.asciidoc:82 +// ml/trained-models/apis/put-trained-models-aliases.asciidoc:87 [source, python] ---- diff --git a/docs/examples/14a33c364873c2f930ca83d0a3005389.asciidoc b/docs/examples/14a33c364873c2f930ca83d0a3005389.asciidoc index a609fa01a..140091a2a 100644 --- a/docs/examples/14a33c364873c2f930ca83d0a3005389.asciidoc +++ b/docs/examples/14a33c364873c2f930ca83d0a3005389.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/disk-usage-exceeded.asciidoc:42 +// troubleshooting/common-issues/disk-usage-exceeded.asciidoc:46 [source, python] ---- diff --git a/docs/examples/14b81f96297952970b78a3216e059596.asciidoc b/docs/examples/14b81f96297952970b78a3216e059596.asciidoc index 11c9b50c0..fbcc88b3c 100644 --- a/docs/examples/14b81f96297952970b78a3216e059596.asciidoc +++ b/docs/examples/14b81f96297952970b78a3216e059596.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/async-search.asciidoc:153 +// search/async-search.asciidoc:159 [source, python] ---- diff --git a/docs/examples/150b5fee5678bf8cdf0932da73eada80.asciidoc b/docs/examples/150b5fee5678bf8cdf0932da73eada80.asciidoc index f5004f878..034357a7b 100644 --- a/docs/examples/150b5fee5678bf8cdf0932da73eada80.asciidoc +++ b/docs/examples/150b5fee5678bf8cdf0932da73eada80.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/nodes-stats.asciidoc:2550 +// cluster/nodes-stats.asciidoc:2556 [source, python] ---- diff --git a/docs/examples/1570976f7807b88dc8a046b833be057b.asciidoc b/docs/examples/1570976f7807b88dc8a046b833be057b.asciidoc index 293680f4f..fe84afc8a 100644 --- a/docs/examples/1570976f7807b88dc8a046b833be057b.asciidoc +++ b/docs/examples/1570976f7807b88dc8a046b833be057b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/hotspotting.asciidoc:30 +// troubleshooting/common-issues/hotspotting.asciidoc:34 [source, python] ---- diff --git a/docs/examples/15a34bfe0ef8ef6333c8c7b55c011e5d.asciidoc b/docs/examples/15a34bfe0ef8ef6333c8c7b55c011e5d.asciidoc index d6f040464..f5aa74945 100644 --- a/docs/examples/15a34bfe0ef8ef6333c8c7b55c011e5d.asciidoc +++ b/docs/examples/15a34bfe0ef8ef6333c8c7b55c011e5d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/analyze.asciidoc:269 +// indices/analyze.asciidoc:275 [source, python] ---- diff --git a/docs/examples/15f769bbd7b5fddeb3353ae726b71b14.asciidoc b/docs/examples/15f769bbd7b5fddeb3353ae726b71b14.asciidoc index 9478424ba..998ee76a1 100644 --- a/docs/examples/15f769bbd7b5fddeb3353ae726b71b14.asciidoc +++ b/docs/examples/15f769bbd7b5fddeb3353ae726b71b14.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// vectors/vector-functions.asciidoc:401 +// vectors/vector-functions.asciidoc:405 [source, python] ---- diff --git a/docs/examples/162b5b693b713f0bfab1209d59443c46.asciidoc b/docs/examples/162b5b693b713f0bfab1209d59443c46.asciidoc index 2f6bf0c47..044c538d9 100644 --- a/docs/examples/162b5b693b713f0bfab1209d59443c46.asciidoc +++ b/docs/examples/162b5b693b713f0bfab1209d59443c46.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/bool-query.asciidoc:130 +// query-dsl/bool-query.asciidoc:133 [source, python] ---- diff --git a/docs/examples/1637ef51d673b35cc8894ee80cd61c87.asciidoc b/docs/examples/1637ef51d673b35cc8894ee80cd61c87.asciidoc index 3701d222a..b56c30f38 100644 --- a/docs/examples/1637ef51d673b35cc8894ee80cd61c87.asciidoc +++ b/docs/examples/1637ef51d673b35cc8894ee80cd61c87.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/high-cpu-usage.asciidoc:24 +// troubleshooting/common-issues/high-cpu-usage.asciidoc:28 [source, python] ---- diff --git a/docs/examples/16634cfa7916cf4e8048a1d70e6240f2.asciidoc b/docs/examples/16634cfa7916cf4e8048a1d70e6240f2.asciidoc index 4cd3c344a..52b0a6e97 100644 --- a/docs/examples/16634cfa7916cf4e8048a1d70e6240f2.asciidoc +++ b/docs/examples/16634cfa7916cf4e8048a1d70e6240f2.asciidoc @@ -12,7 +12,7 @@ resp = client.search_application.put( "template": { "script": { "lang": "mustache", - "source": "\n {\n \"query\": {\n \"bool\": {\n \"must\": [\n {{#query}}\n \n {{/query}}\n ],\n \"filter\": {{#toJson}}_es_filters{{/toJson}}\n }\n },\n \"_source\": {\n \"includes\": [\"title\", \"plot\"]\n },\n \"highlight\": {\n \"fields\": {\n \"title\": { \"fragment_size\": 0 },\n \"plot\": { \"fragment_size\": 200 }\n }\n },\n \"aggs\": {{#toJson}}_es_aggs{{/toJson}},\n \"from\": {{from}},\n \"size\": {{size}},\n \"sort\": {{#toJson}}_es_sort_fields{{/toJson}}\n }\n ", + "source": "\n {\n \"query\": {\n \"bool\": {\n \"must\": [\n {{#query}}\n {{/query}}\n ],\n \"filter\": {{#toJson}}_es_filters{{/toJson}}\n }\n },\n \"_source\": {\n \"includes\": [\"title\", \"plot\"]\n },\n \"highlight\": {\n \"fields\": {\n \"title\": { \"fragment_size\": 0 },\n \"plot\": { \"fragment_size\": 200 }\n }\n },\n \"aggs\": {{#toJson}}_es_aggs{{/toJson}},\n \"from\": {{from}},\n \"size\": {{size}},\n \"sort\": {{#toJson}}_es_sort_fields{{/toJson}}\n }\n ", "params": { "query": "", "_es_filters": {}, diff --git a/docs/examples/166bcfc6d5d39defec7ad6aa44d0914b.asciidoc b/docs/examples/166bcfc6d5d39defec7ad6aa44d0914b.asciidoc index e6e17b4a9..b0fb68c8f 100644 --- a/docs/examples/166bcfc6d5d39defec7ad6aa44d0914b.asciidoc +++ b/docs/examples/166bcfc6d5d39defec7ad6aa44d0914b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/tasks.asciidoc:74 +// cluster/tasks.asciidoc:80 [source, python] ---- diff --git a/docs/examples/16985e5b17d2da0955a14fbe02e8dfca.asciidoc b/docs/examples/16985e5b17d2da0955a14fbe02e8dfca.asciidoc index 896661d47..e4a014c33 100644 --- a/docs/examples/16985e5b17d2da0955a14fbe02e8dfca.asciidoc +++ b/docs/examples/16985e5b17d2da0955a14fbe02e8dfca.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/termvectors.asciidoc:237 +// docs/termvectors.asciidoc:243 [source, python] ---- diff --git a/docs/examples/172155ca4bf6dfcbd489453f50739396.asciidoc b/docs/examples/172155ca4bf6dfcbd489453f50739396.asciidoc index dcf168990..872e66960 100644 --- a/docs/examples/172155ca4bf6dfcbd489453f50739396.asciidoc +++ b/docs/examples/172155ca4bf6dfcbd489453f50739396.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/get-snapshot-api.asciidoc:402 +// snapshot-restore/apis/get-snapshot-api.asciidoc:408 [source, python] ---- diff --git a/docs/examples/17266cee5eaaddf08e5534bf580a1910.asciidoc b/docs/examples/17266cee5eaaddf08e5534bf580a1910.asciidoc index 3cc4afa13..595478934 100644 --- a/docs/examples/17266cee5eaaddf08e5534bf580a1910.asciidoc +++ b/docs/examples/17266cee5eaaddf08e5534bf580a1910.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/stats.asciidoc:84 +// rest-api/watcher/stats.asciidoc:90 [source, python] ---- diff --git a/docs/examples/17566e23c191f1004a2719f2c4242307.asciidoc b/docs/examples/17566e23c191f1004a2719f2c4242307.asciidoc index 0c0a031e1..28428b6a7 100644 --- a/docs/examples/17566e23c191f1004a2719f2c4242307.asciidoc +++ b/docs/examples/17566e23c191f1004a2719f2c4242307.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// autoscaling/apis/get-autoscaling-capacity.asciidoc:262 +// autoscaling/apis/get-autoscaling-capacity.asciidoc:268 [source, python] ---- diff --git a/docs/examples/178c920d5e8ec0071f77290fa059802c.asciidoc b/docs/examples/178c920d5e8ec0071f77290fa059802c.asciidoc index b95beb841..4f7a123ae 100644 --- a/docs/examples/178c920d5e8ec0071f77290fa059802c.asciidoc +++ b/docs/examples/178c920d5e8ec0071f77290fa059802c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/update-settings.asciidoc:132 +// indices/update-settings.asciidoc:138 [source, python] ---- diff --git a/docs/examples/c02c2916b97b6fa7db82dbc7f0378310.asciidoc b/docs/examples/17b1647c8509543f2388c886f2584a20.asciidoc similarity index 86% rename from docs/examples/c02c2916b97b6fa7db82dbc7f0378310.asciidoc rename to docs/examples/17b1647c8509543f2388c886f2584a20.asciidoc index d50b44d64..bd99a92db 100644 --- a/docs/examples/c02c2916b97b6fa7db82dbc7f0378310.asciidoc +++ b/docs/examples/17b1647c8509543f2388c886f2584a20.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// reranking/semantic-reranking.asciidoc:105 +// reranking/semantic-reranking.asciidoc:107 [source, python] ---- @@ -16,7 +16,7 @@ resp = client.search( } }, "field": "text", - "inference_id": "my-cohere-rerank-model", + "inference_id": "elastic-rerank", "inference_text": "How often does the moon hide the sun?", "rank_window_size": 100, "min_score": 0.5 diff --git a/docs/examples/17c2b0a6b0305804ff3b7fd3b4a68df3.asciidoc b/docs/examples/17c2b0a6b0305804ff3b7fd3b4a68df3.asciidoc index 06dbd0899..94f19586c 100644 --- a/docs/examples/17c2b0a6b0305804ff3b7fd3b4a68df3.asciidoc +++ b/docs/examples/17c2b0a6b0305804ff3b7fd3b4a68df3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/apis/simulate-pipeline.asciidoc:217 +// ingest/apis/simulate-pipeline.asciidoc:223 [source, python] ---- diff --git a/docs/examples/18ddb7e7a4bcafd449df956e828ed7a8.asciidoc b/docs/examples/18ddb7e7a4bcafd449df956e828ed7a8.asciidoc index 34fce2eb6..b64153dde 100644 --- a/docs/examples/18ddb7e7a4bcafd449df956e828ed7a8.asciidoc +++ b/docs/examples/18ddb7e7a4bcafd449df956e828ed7a8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update-by-query.asciidoc:546 +// docs/update-by-query.asciidoc:552 [source, python] ---- diff --git a/docs/examples/190a21e32db2125ddaea0f634e126a84.asciidoc b/docs/examples/190a21e32db2125ddaea0f634e126a84.asciidoc index be5b2b5c9..97747b9ec 100644 --- a/docs/examples/190a21e32db2125ddaea0f634e126a84.asciidoc +++ b/docs/examples/190a21e32db2125ddaea0f634e126a84.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/clone-index.asciidoc:91 +// indices/clone-index.asciidoc:97 [source, python] ---- diff --git a/docs/examples/192fa1f6f51dfb640e9e15bb5cd7eebc.asciidoc b/docs/examples/192fa1f6f51dfb640e9e15bb5cd7eebc.asciidoc index 16e0c0f3b..e53c1cce6 100644 --- a/docs/examples/192fa1f6f51dfb640e9e15bb5cd7eebc.asciidoc +++ b/docs/examples/192fa1f6f51dfb640e9e15bb5cd7eebc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ilm/error-handling.asciidoc:147 +// ilm/error-handling.asciidoc:148 [source, python] ---- diff --git a/docs/examples/194bbac15e709174ac85b681f3a3d137.asciidoc b/docs/examples/194bbac15e709174ac85b681f3a3d137.asciidoc index 9ace291db..6c8c37a22 100644 --- a/docs/examples/194bbac15e709174ac85b681f3a3d137.asciidoc +++ b/docs/examples/194bbac15e709174ac85b681f3a3d137.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-index-template.asciidoc:189 +// indices/put-index-template.asciidoc:195 [source, python] ---- diff --git a/docs/examples/196aed02b11def364bab84e455c1a073.asciidoc b/docs/examples/196aed02b11def364bab84e455c1a073.asciidoc index 8eeb99faf..bdc7e4d76 100644 --- a/docs/examples/196aed02b11def364bab84e455c1a073.asciidoc +++ b/docs/examples/196aed02b11def364bab84e455c1a073.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-index-template.asciidoc:327 +// indices/put-index-template.asciidoc:333 [source, python] ---- diff --git a/docs/examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc b/docs/examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc index 565dfeef0..3f54b088b 100644 --- a/docs/examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc +++ b/docs/examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc @@ -1,9 +1,12 @@ // This file is autogenerated, DO NOT EDIT -// ingest/apis/simulate-ingest.asciidoc:273 +// ingest/apis/simulate-ingest.asciidoc:279 [source, python] ---- -resp = client.simulate.ingest( +resp = client.perform_request( + "POST", + "/_ingest/_simulate", + headers={"Content-Type": "application/json"}, body={ "docs": [ { diff --git a/docs/examples/1a1f3421717ff744ed83232729289bb0.asciidoc b/docs/examples/1a1f3421717ff744ed83232729289bb0.asciidoc index 03648bd41..09e201a7b 100644 --- a/docs/examples/1a1f3421717ff744ed83232729289bb0.asciidoc +++ b/docs/examples/1a1f3421717ff744ed83232729289bb0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// slm/apis/slm-delete.asciidoc:65 +// slm/apis/slm-delete.asciidoc:71 [source, python] ---- diff --git a/docs/examples/1a3897cfb4f974c09d0d847baac8aa6d.asciidoc b/docs/examples/1a3897cfb4f974c09d0d847baac8aa6d.asciidoc index 3935ff8bc..d122e4ea3 100644 --- a/docs/examples/1a3897cfb4f974c09d0d847baac8aa6d.asciidoc +++ b/docs/examples/1a3897cfb4f974c09d0d847baac8aa6d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/hotspotting.asciidoc:192 +// troubleshooting/common-issues/hotspotting.asciidoc:196 [source, python] ---- diff --git a/docs/examples/1a3a4b8a4bfee4ab84ddd13d8835f560.asciidoc b/docs/examples/1a3a4b8a4bfee4ab84ddd13d8835f560.asciidoc index 94c618bb8..1c3d79093 100644 --- a/docs/examples/1a3a4b8a4bfee4ab84ddd13d8835f560.asciidoc +++ b/docs/examples/1a3a4b8a4bfee4ab84ddd13d8835f560.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/df-analytics/apis/start-dfanalytics.asciidoc:82 +// ml/df-analytics/apis/start-dfanalytics.asciidoc:88 [source, python] ---- diff --git a/docs/examples/1a8d92e93481c432a91f7c213099800a.asciidoc b/docs/examples/1a8d92e93481c432a91f7c213099800a.asciidoc index 8f5bbf405..6a6e71c2d 100644 --- a/docs/examples/1a8d92e93481c432a91f7c213099800a.asciidoc +++ b/docs/examples/1a8d92e93481c432a91f7c213099800a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/query-api-key.asciidoc:290 +// rest-api/security/query-api-key.asciidoc:295 [source, python] ---- diff --git a/docs/examples/1a9e03ce0355872a7db27fedc783fbec.asciidoc b/docs/examples/1a9e03ce0355872a7db27fedc783fbec.asciidoc index c5c8f0d23..8de0fcb9a 100644 --- a/docs/examples/1a9e03ce0355872a7db27fedc783fbec.asciidoc +++ b/docs/examples/1a9e03ce0355872a7db27fedc783fbec.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-google-vertex-ai.asciidoc:145 +// inference/service-google-vertex-ai.asciidoc:151 [source, python] ---- diff --git a/docs/examples/1a9efb56adb2cd84faa9825a129381b9.asciidoc b/docs/examples/1a9efb56adb2cd84faa9825a129381b9.asciidoc index 590880dfc..870811976 100644 --- a/docs/examples/1a9efb56adb2cd84faa9825a129381b9.asciidoc +++ b/docs/examples/1a9efb56adb2cd84faa9825a129381b9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/rollup-search.asciidoc:213 +// rollup/apis/rollup-search.asciidoc:219 [source, python] ---- diff --git a/docs/examples/1aa91d3d48140d6367b6cabca8737b8f.asciidoc b/docs/examples/1aa91d3d48140d6367b6cabca8737b8f.asciidoc index fc3519cf1..17dc48f5e 100644 --- a/docs/examples/1aa91d3d48140d6367b6cabca8737b8f.asciidoc +++ b/docs/examples/1aa91d3d48140d6367b6cabca8737b8f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/bulk.asciidoc:632 +// docs/bulk.asciidoc:642 [source, python] ---- diff --git a/docs/examples/1b076ceb1ead9f6897c2f351f0e45f74.asciidoc b/docs/examples/1b076ceb1ead9f6897c2f351f0e45f74.asciidoc index 937867c30..d2cf9d555 100644 --- a/docs/examples/1b076ceb1ead9f6897c2f351f0e45f74.asciidoc +++ b/docs/examples/1b076ceb1ead9f6897c2f351f0e45f74.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/create-api-keys.asciidoc:220 +// rest-api/security/create-api-keys.asciidoc:226 [source, python] ---- diff --git a/docs/examples/1b0b29e5cd7550c648d0892378e93804.asciidoc b/docs/examples/1b0b29e5cd7550c648d0892378e93804.asciidoc index f5f77f18a..f6c38af80 100644 --- a/docs/examples/1b0b29e5cd7550c648d0892378e93804.asciidoc +++ b/docs/examples/1b0b29e5cd7550c648d0892378e93804.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/delete-calendar-job.asciidoc:36 +// ml/anomaly-detection/apis/delete-calendar-job.asciidoc:42 [source, python] ---- diff --git a/docs/examples/1b0dc9d076bbb58c6a2953ef4323d2fc.asciidoc b/docs/examples/1b0dc9d076bbb58c6a2953ef4323d2fc.asciidoc index b6018227b..080538e48 100644 --- a/docs/examples/1b0dc9d076bbb58c6a2953ef4323d2fc.asciidoc +++ b/docs/examples/1b0dc9d076bbb58c6a2953ef4323d2fc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/ack-watch.asciidoc:190 +// rest-api/watcher/ack-watch.asciidoc:196 [source, python] ---- diff --git a/docs/examples/1b2ab75d3c8064fac6ecc63104396c02.asciidoc b/docs/examples/1b2ab75d3c8064fac6ecc63104396c02.asciidoc index f06ba7169..1bb8b3564 100644 --- a/docs/examples/1b2ab75d3c8064fac6ecc63104396c02.asciidoc +++ b/docs/examples/1b2ab75d3c8064fac6ecc63104396c02.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/put-calendar-job.asciidoc:36 +// ml/anomaly-detection/apis/put-calendar-job.asciidoc:42 [source, python] ---- diff --git a/docs/examples/1b3762712c14a19e8c2956b4f530d327.asciidoc b/docs/examples/1b3762712c14a19e8c2956b4f530d327.asciidoc index 9756e7301..a93efc331 100644 --- a/docs/examples/1b3762712c14a19e8c2956b4f530d327.asciidoc +++ b/docs/examples/1b3762712c14a19e8c2956b4f530d327.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ccr/apis/follow/put-follow.asciidoc:108 +// ccr/apis/follow/put-follow.asciidoc:114 [source, python] ---- diff --git a/docs/examples/1b37e2237c9e3aaf84d56cc5c0bdb9ec.asciidoc b/docs/examples/1b37e2237c9e3aaf84d56cc5c0bdb9ec.asciidoc index 6e8bc60ae..ebcd04ea5 100644 --- a/docs/examples/1b37e2237c9e3aaf84d56cc5c0bdb9ec.asciidoc +++ b/docs/examples/1b37e2237c9e3aaf84d56cc5c0bdb9ec.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ilm/error-handling.asciidoc:18 +// ilm/error-handling.asciidoc:19 [source, python] ---- diff --git a/docs/examples/1c3e3c4f2d268f1826a9b417e1868a58.asciidoc b/docs/examples/1c3e3c4f2d268f1826a9b417e1868a58.asciidoc index 059651182..6134ccb03 100644 --- a/docs/examples/1c3e3c4f2d268f1826a9b417e1868a58.asciidoc +++ b/docs/examples/1c3e3c4f2d268f1826a9b417e1868a58.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// scripting/using.asciidoc:312 +// scripting/using.asciidoc:317 [source, python] ---- diff --git a/docs/examples/1c8b6768c4eefc76fcb38708152f561b.asciidoc b/docs/examples/1c8b6768c4eefc76fcb38708152f561b.asciidoc index aa1420b78..1d350ebc2 100644 --- a/docs/examples/1c8b6768c4eefc76fcb38708152f561b.asciidoc +++ b/docs/examples/1c8b6768c4eefc76fcb38708152f561b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/df-analytics/apis/delete-dfanalytics.asciidoc:51 +// ml/df-analytics/apis/delete-dfanalytics.asciidoc:57 [source, python] ---- diff --git a/docs/examples/1cecd4d87a92427175157d41859df2af.asciidoc b/docs/examples/1cecd4d87a92427175157d41859df2af.asciidoc index 5e73037d0..593c0dd4e 100644 --- a/docs/examples/1cecd4d87a92427175157d41859df2af.asciidoc +++ b/docs/examples/1cecd4d87a92427175157d41859df2af.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/allocation-explain.asciidoc:10 +// cluster/allocation-explain.asciidoc:16 [source, python] ---- diff --git a/docs/examples/1d252d9217c61c2c1cbe7a92f77b078f.asciidoc b/docs/examples/1d252d9217c61c2c1cbe7a92f77b078f.asciidoc index 442eae70f..2e758afa8 100644 --- a/docs/examples/1d252d9217c61c2c1cbe7a92f77b078f.asciidoc +++ b/docs/examples/1d252d9217c61c2c1cbe7a92f77b078f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/query-api-key.asciidoc:608 +// rest-api/security/query-api-key.asciidoc:613 [source, python] ---- diff --git a/docs/examples/1dadb7efe27b6c0c231eb6535e413bd9.asciidoc b/docs/examples/1dadb7efe27b6c0c231eb6535e413bd9.asciidoc index 631cc34b0..753a4b9d6 100644 --- a/docs/examples/1dadb7efe27b6c0c231eb6535e413bd9.asciidoc +++ b/docs/examples/1dadb7efe27b6c0c231eb6535e413bd9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-azure-ai-studio.asciidoc:162 +// inference/service-azure-ai-studio.asciidoc:168 [source, python] ---- diff --git a/docs/examples/1db715eb00832686ecddb6603684fc26.asciidoc b/docs/examples/1db715eb00832686ecddb6603684fc26.asciidoc index 9e6c2b2c5..7c5a300e6 100644 --- a/docs/examples/1db715eb00832686ecddb6603684fc26.asciidoc +++ b/docs/examples/1db715eb00832686ecddb6603684fc26.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/enroll-kibana.asciidoc:28 +// rest-api/security/enroll-kibana.asciidoc:34 [source, python] ---- diff --git a/docs/examples/1e08e054c761353f99211cd18e8ca47b.asciidoc b/docs/examples/1e08e054c761353f99211cd18e8ca47b.asciidoc index c4ff7e042..abd862bc3 100644 --- a/docs/examples/1e08e054c761353f99211cd18e8ca47b.asciidoc +++ b/docs/examples/1e08e054c761353f99211cd18e8ca47b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/delete-snapshot.asciidoc:43 +// ml/anomaly-detection/apis/delete-snapshot.asciidoc:49 [source, python] ---- diff --git a/docs/examples/1e26353d546d733634187b8c3a7837a7.asciidoc b/docs/examples/1e26353d546d733634187b8c3a7837a7.asciidoc index e5f2463f7..37cc447f6 100644 --- a/docs/examples/1e26353d546d733634187b8c3a7837a7.asciidoc +++ b/docs/examples/1e26353d546d733634187b8c3a7837a7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/list-connectors-api.asciidoc:101 +// connector/apis/list-connectors-api.asciidoc:110 [source, python] ---- diff --git a/docs/examples/1e3384bc255729b65a6f0fc8011ff733.asciidoc b/docs/examples/1e3384bc255729b65a6f0fc8011ff733.asciidoc index e0a2d183c..f37ae1db0 100644 --- a/docs/examples/1e3384bc255729b65a6f0fc8011ff733.asciidoc +++ b/docs/examples/1e3384bc255729b65a6f0fc8011ff733.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/segments.asciidoc:12 +// indices/segments.asciidoc:18 [source, python] ---- diff --git a/docs/examples/1e3553a73da487017f7a95088b6aa957.asciidoc b/docs/examples/1e3553a73da487017f7a95088b6aa957.asciidoc index 2b259046e..f481e9086 100644 --- a/docs/examples/1e3553a73da487017f7a95088b6aa957.asciidoc +++ b/docs/examples/1e3553a73da487017f7a95088b6aa957.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/clear-roles-cache.asciidoc:56 +// rest-api/security/clear-roles-cache.asciidoc:62 [source, python] ---- diff --git a/docs/examples/1ed26c7b445ab1c167bd9385e1f0066f.asciidoc b/docs/examples/1ed26c7b445ab1c167bd9385e1f0066f.asciidoc index fcf34c209..cd92b0ba4 100644 --- a/docs/examples/1ed26c7b445ab1c167bd9385e1f0066f.asciidoc +++ b/docs/examples/1ed26c7b445ab1c167bd9385e1f0066f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// sql/apis/delete-async-sql-search-api.asciidoc:12 +// sql/apis/delete-async-sql-search-api.asciidoc:18 [source, python] ---- diff --git a/docs/examples/1f3dd84ab11bae09d3f99b1b3536e239.asciidoc b/docs/examples/1f3dd84ab11bae09d3f99b1b3536e239.asciidoc index 2f5859098..221b65d39 100644 --- a/docs/examples/1f3dd84ab11bae09d3f99b1b3536e239.asciidoc +++ b/docs/examples/1f3dd84ab11bae09d3f99b1b3536e239.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/create-snapshot-api.asciidoc:25 +// snapshot-restore/apis/create-snapshot-api.asciidoc:31 [source, python] ---- diff --git a/docs/examples/1f673e1a0de2970dc648618d5425a994.asciidoc b/docs/examples/1f673e1a0de2970dc648618d5425a994.asciidoc index 6f79d3872..1a19d1004 100644 --- a/docs/examples/1f673e1a0de2970dc648618d5425a994.asciidoc +++ b/docs/examples/1f673e1a0de2970dc648618d5425a994.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/reindex.asciidoc:267 +// docs/reindex.asciidoc:273 [source, python] ---- diff --git a/docs/examples/1f6a190fa1aade1fb66680388f184ef9.asciidoc b/docs/examples/1f6a190fa1aade1fb66680388f184ef9.asciidoc index 130a7178d..0457f7d8f 100644 --- a/docs/examples/1f6a190fa1aade1fb66680388f184ef9.asciidoc +++ b/docs/examples/1f6a190fa1aade1fb66680388f184ef9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/validate.asciidoc:266 +// search/validate.asciidoc:272 [source, python] ---- diff --git a/docs/examples/1ff12523efbd59c213c676937757c460.asciidoc b/docs/examples/1ff12523efbd59c213c676937757c460.asciidoc index 244ccbbe6..9a18e3091 100644 --- a/docs/examples/1ff12523efbd59c213c676937757c460.asciidoc +++ b/docs/examples/1ff12523efbd59c213c676937757c460.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/invalidate-api-keys.asciidoc:110 +// rest-api/security/invalidate-api-keys.asciidoc:116 [source, python] ---- diff --git a/docs/examples/20005d8a6555b259b299d862cd218701.asciidoc b/docs/examples/20005d8a6555b259b299d862cd218701.asciidoc index 93d00d236..0a4219e1e 100644 --- a/docs/examples/20005d8a6555b259b299d862cd218701.asciidoc +++ b/docs/examples/20005d8a6555b259b299d862cd218701.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/match-query.asciidoc:186 +// query-dsl/match-query.asciidoc:190 [source, python] ---- diff --git a/docs/examples/200f6d4cc7b9c300b8962a119e03873f.asciidoc b/docs/examples/200f6d4cc7b9c300b8962a119e03873f.asciidoc index 9548b036a..1714999be 100644 --- a/docs/examples/200f6d4cc7b9c300b8962a119e03873f.asciidoc +++ b/docs/examples/200f6d4cc7b9c300b8962a119e03873f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-data-stream.asciidoc:280 +// indices/get-data-stream.asciidoc:286 [source, python] ---- diff --git a/docs/examples/20e3b181114e00c943a27a9bbcf85f15.asciidoc b/docs/examples/20e3b181114e00c943a27a9bbcf85f15.asciidoc index b4083b135..b4ef16609 100644 --- a/docs/examples/20e3b181114e00c943a27a9bbcf85f15.asciidoc +++ b/docs/examples/20e3b181114e00c943a27a9bbcf85f15.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/get-record.asciidoc:280 +// ml/anomaly-detection/apis/get-record.asciidoc:286 [source, python] ---- diff --git a/docs/examples/20f62d0540bf6261549bd286416eae28.asciidoc b/docs/examples/20f62d0540bf6261549bd286416eae28.asciidoc index 0dc048650..2d27949a9 100644 --- a/docs/examples/20f62d0540bf6261549bd286416eae28.asciidoc +++ b/docs/examples/20f62d0540bf6261549bd286416eae28.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/apis/enrich/put-enrich-policy.asciidoc:25 +// ingest/apis/enrich/put-enrich-policy.asciidoc:30 [source, python] ---- diff --git a/docs/examples/2105f2d1d81977054a93163a175793ce.asciidoc b/docs/examples/2105f2d1d81977054a93163a175793ce.asciidoc index 3212ee5bf..d18bcdd76 100644 --- a/docs/examples/2105f2d1d81977054a93163a175793ce.asciidoc +++ b/docs/examples/2105f2d1d81977054a93163a175793ce.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/get-snapshot-status-api.asciidoc:75 +// snapshot-restore/apis/get-snapshot-status-api.asciidoc:81 [source, python] ---- diff --git a/docs/examples/218b9009f120e8ad33f710e019179562.asciidoc b/docs/examples/218b9009f120e8ad33f710e019179562.asciidoc index 6a6c45d49..a5f7af03b 100644 --- a/docs/examples/218b9009f120e8ad33f710e019179562.asciidoc +++ b/docs/examples/218b9009f120e8ad33f710e019179562.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/get-repo-api.asciidoc:119 +// snapshot-restore/apis/get-repo-api.asciidoc:125 [source, python] ---- diff --git a/docs/examples/21c1e6ee886140ce0cd67184dd19b981.asciidoc b/docs/examples/21c1e6ee886140ce0cd67184dd19b981.asciidoc index 96e791f0b..1858a055a 100644 --- a/docs/examples/21c1e6ee886140ce0cd67184dd19b981.asciidoc +++ b/docs/examples/21c1e6ee886140ce0cd67184dd19b981.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/dangling-indices-list.asciidoc:13 +// indices/dangling-indices-list.asciidoc:19 [source, python] ---- diff --git a/docs/examples/22b176a184517cf1b5801f5eb4f17f97.asciidoc b/docs/examples/22b176a184517cf1b5801f5eb4f17f97.asciidoc new file mode 100644 index 000000000..b641acec5 --- /dev/null +++ b/docs/examples/22b176a184517cf1b5801f5eb4f17f97.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// data-streams/downsampling-dsl.asciidoc:349 + +[source, python] +---- +resp = client.indices.rollover( + alias="datastream", +) +print(resp) +---- diff --git a/docs/examples/22dde5fe7ac5d85d52115641a68b3c55.asciidoc b/docs/examples/22dde5fe7ac5d85d52115641a68b3c55.asciidoc index d09d80766..6479363e5 100644 --- a/docs/examples/22dde5fe7ac5d85d52115641a68b3c55.asciidoc +++ b/docs/examples/22dde5fe7ac5d85d52115641a68b3c55.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/analyze.asciidoc:196 +// indices/analyze.asciidoc:202 [source, python] ---- diff --git a/docs/examples/23b062c157235246d7c347b9047b2435.asciidoc b/docs/examples/23b062c157235246d7c347b9047b2435.asciidoc index 1277fbb7a..94a71a76e 100644 --- a/docs/examples/23b062c157235246d7c347b9047b2435.asciidoc +++ b/docs/examples/23b062c157235246d7c347b9047b2435.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/create-role-mappings.asciidoc:113 +// rest-api/security/create-role-mappings.asciidoc:119 [source, python] ---- diff --git a/docs/examples/242a26ced0e5706e48dcda19a4003094.asciidoc b/docs/examples/242a26ced0e5706e48dcda19a4003094.asciidoc index e6459126a..cfe98f488 100644 --- a/docs/examples/242a26ced0e5706e48dcda19a4003094.asciidoc +++ b/docs/examples/242a26ced0e5706e48dcda19a4003094.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/reindex.asciidoc:964 +// docs/reindex.asciidoc:970 [source, python] ---- diff --git a/docs/examples/246763219ec06172f7aa57bba28d344a.asciidoc b/docs/examples/246763219ec06172f7aa57bba28d344a.asciidoc new file mode 100644 index 000000000..5a340dc07 --- /dev/null +++ b/docs/examples/246763219ec06172f7aa57bba28d344a.asciidoc @@ -0,0 +1,109 @@ +// This file is autogenerated, DO NOT EDIT +// mapping/types/rank-vectors.asciidoc:159 + +[source, python] +---- +resp = client.indices.create( + index="my-rank-vectors-bit", + mappings={ + "properties": { + "my_vector": { + "type": "rank_vectors", + "element_type": "bit" + } + } + }, +) +print(resp) + +resp1 = client.bulk( + index="my-rank-vectors-bit", + refresh=True, + operations=[ + { + "index": { + "_id": "1" + } + }, + { + "my_vector": [ + 127, + -127, + 0, + 1, + 42 + ] + }, + { + "index": { + "_id": "2" + } + }, + { + "my_vector": "8100012a7f" + } + ], +) +print(resp1) + +resp2 = client.search( + index="my-rank-vectors-bit", + query={ + "script_score": { + "query": { + "match_all": {} + }, + "script": { + "source": "maxSimDotProduct(params.query_vector, 'my_vector')", + "params": { + "query_vector": [ + [ + 0.35, + 0.77, + 0.95, + 0.15, + 0.11, + 0.08, + 0.58, + 0.06, + 0.44, + 0.52, + 0.21, + 0.62, + 0.65, + 0.16, + 0.64, + 0.39, + 0.93, + 0.06, + 0.93, + 0.31, + 0.92, + 0, + 0.66, + 0.86, + 0.92, + 0.03, + 0.81, + 0.31, + 0.2, + 0.92, + 0.95, + 0.64, + 0.19, + 0.26, + 0.77, + 0.64, + 0.78, + 0.32, + 0.97, + 0.84 + ] + ] + } + } + } + }, +) +print(resp2) +---- diff --git a/docs/examples/249bf48252c8cea47ef872541c8a884c.asciidoc b/docs/examples/249bf48252c8cea47ef872541c8a884c.asciidoc index af3cafb7b..7327a497f 100644 --- a/docs/examples/249bf48252c8cea47ef872541c8a884c.asciidoc +++ b/docs/examples/249bf48252c8cea47ef872541c8a884c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/grant-api-keys.asciidoc:127 +// rest-api/security/grant-api-keys.asciidoc:133 [source, python] ---- diff --git a/docs/examples/24ad3c234f69f55a3fbe2d488e70178a.asciidoc b/docs/examples/24ad3c234f69f55a3fbe2d488e70178a.asciidoc index db644f6b7..cb65f4f6d 100644 --- a/docs/examples/24ad3c234f69f55a3fbe2d488e70178a.asciidoc +++ b/docs/examples/24ad3c234f69f55a3fbe2d488e70178a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/df-analytics/apis/evaluate-dfanalytics.asciidoc:354 +// ml/df-analytics/apis/evaluate-dfanalytics.asciidoc:360 [source, python] ---- diff --git a/docs/examples/24aee6033bf77a68ced74e3fd9d34283.asciidoc b/docs/examples/24aee6033bf77a68ced74e3fd9d34283.asciidoc index 7c1bba413..c7c3ce5ab 100644 --- a/docs/examples/24aee6033bf77a68ced74e3fd9d34283.asciidoc +++ b/docs/examples/24aee6033bf77a68ced74e3fd9d34283.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-index-template-v1.asciidoc:79 +// indices/get-index-template-v1.asciidoc:85 [source, python] ---- diff --git a/docs/examples/24d806d1803158dacd4dda73c4204d3e.asciidoc b/docs/examples/24d806d1803158dacd4dda73c4204d3e.asciidoc index bdff603a7..727fcb365 100644 --- a/docs/examples/24d806d1803158dacd4dda73c4204d3e.asciidoc +++ b/docs/examples/24d806d1803158dacd4dda73c4204d3e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-rules/apis/get-query-rule.asciidoc:105 +// query-rules/apis/get-query-rule.asciidoc:111 [source, python] ---- diff --git a/docs/examples/2577acb462b95bd4394523cf2f8a661f.asciidoc b/docs/examples/2577acb462b95bd4394523cf2f8a661f.asciidoc deleted file mode 100644 index d8e5f8698..000000000 --- a/docs/examples/2577acb462b95bd4394523cf2f8a661f.asciidoc +++ /dev/null @@ -1,33 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// esql/processing-commands/lookup.asciidoc:30 - -[source, python] ----- -resp = client.esql.query( - format="txt", - query="\n FROM library\n | SORT page_count DESC\n | KEEP name, author\n | LOOKUP era ON author\n | LIMIT 5\n ", - tables={ - "era": { - "author": { - "keyword": [ - "Frank Herbert", - "Peter F. Hamilton", - "Vernor Vinge", - "Alastair Reynolds", - "James S.A. Corey" - ] - }, - "era": { - "keyword": [ - "The New Wave", - "Diamond", - "Diamond", - "Diamond", - "Hadron" - ] - } - } - }, -) -print(resp) ----- diff --git a/docs/examples/25a0dad6547d432f5a3d394528f1c138.asciidoc b/docs/examples/25a0dad6547d432f5a3d394528f1c138.asciidoc index 7b429b7bd..c81a6c009 100644 --- a/docs/examples/25a0dad6547d432f5a3d394528f1c138.asciidoc +++ b/docs/examples/25a0dad6547d432f5a3d394528f1c138.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/get.asciidoc:395 +// docs/get.asciidoc:401 [source, python] ---- diff --git a/docs/examples/25c0e66a433a0cd596e0641b752ff6d7.asciidoc b/docs/examples/25c0e66a433a0cd596e0641b752ff6d7.asciidoc index 8f0653e9e..c814a4488 100644 --- a/docs/examples/25c0e66a433a0cd596e0641b752ff6d7.asciidoc +++ b/docs/examples/25c0e66a433a0cd596e0641b752ff6d7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/shards.asciidoc:405 +// cat/shards.asciidoc:414 [source, python] ---- diff --git a/docs/examples/25cb9e1da00dfd971065ce182467434d.asciidoc b/docs/examples/25cb9e1da00dfd971065ce182467434d.asciidoc index 4b3a463ba..b6541bf87 100644 --- a/docs/examples/25cb9e1da00dfd971065ce182467434d.asciidoc +++ b/docs/examples/25cb9e1da00dfd971065ce182467434d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/voting-exclusions.asciidoc:116 +// cluster/voting-exclusions.asciidoc:122 [source, python] ---- diff --git a/docs/examples/261480571394632db40e88fbb6c59c2f.asciidoc b/docs/examples/261480571394632db40e88fbb6c59c2f.asciidoc index 73966f9df..061c558a8 100644 --- a/docs/examples/261480571394632db40e88fbb6c59c2f.asciidoc +++ b/docs/examples/261480571394632db40e88fbb6c59c2f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/delete-role-mappings.asciidoc:46 +// rest-api/security/delete-role-mappings.asciidoc:52 [source, python] ---- diff --git a/docs/examples/2623eb122cc0299b42fc9eca6e7f5e56.asciidoc b/docs/examples/2623eb122cc0299b42fc9eca6e7f5e56.asciidoc index cfd59dc5d..9f4e353e0 100644 --- a/docs/examples/2623eb122cc0299b42fc9eca6e7f5e56.asciidoc +++ b/docs/examples/2623eb122cc0299b42fc9eca6e7f5e56.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-builtin-privileges.asciidoc:58 +// rest-api/security/get-builtin-privileges.asciidoc:64 [source, python] ---- diff --git a/docs/examples/2646710ece0c4c843aebeacd370d0396.asciidoc b/docs/examples/2646710ece0c4c843aebeacd370d0396.asciidoc index 978eb8a86..0deba1383 100644 --- a/docs/examples/2646710ece0c4c843aebeacd370d0396.asciidoc +++ b/docs/examples/2646710ece0c4c843aebeacd370d0396.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/dense-vector.asciidoc:142 +// mapping/types/dense-vector.asciidoc:141 [source, python] ---- diff --git a/docs/examples/26d3ab748a855eb383e992eb1ff79662.asciidoc b/docs/examples/26d3ab748a855eb383e992eb1ff79662.asciidoc index 9dba23b31..a46a7e75f 100644 --- a/docs/examples/26d3ab748a855eb383e992eb1ff79662.asciidoc +++ b/docs/examples/26d3ab748a855eb383e992eb1ff79662.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// eql/delete-async-eql-search-api.asciidoc:14 +// eql/delete-async-eql-search-api.asciidoc:20 [source, python] ---- diff --git a/docs/examples/270549e6b062228312c4e7a54a2c2209.asciidoc b/docs/examples/270549e6b062228312c4e7a54a2c2209.asciidoc index 9ae9ebbce..f2fbab21c 100644 --- a/docs/examples/270549e6b062228312c4e7a54a2c2209.asciidoc +++ b/docs/examples/270549e6b062228312c4e7a54a2c2209.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/task-queue-backlog.asciidoc:39 +// troubleshooting/common-issues/task-queue-backlog.asciidoc:55 [source, python] ---- diff --git a/docs/examples/2720e613d520ce352b62e990c2d283f7.asciidoc b/docs/examples/2720e613d520ce352b62e990c2d283f7.asciidoc index 36e3e207f..e1d7d8b4c 100644 --- a/docs/examples/2720e613d520ce352b62e990c2d283f7.asciidoc +++ b/docs/examples/2720e613d520ce352b62e990c2d283f7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ilm/apis/remove-policy-from-index.asciidoc:87 +// ilm/apis/remove-policy-from-index.asciidoc:93 [source, python] ---- diff --git a/docs/examples/27384266370152add76471dd0332a2f1.asciidoc b/docs/examples/27384266370152add76471dd0332a2f1.asciidoc index 534dfb41d..a99bf0468 100644 --- a/docs/examples/27384266370152add76471dd0332a2f1.asciidoc +++ b/docs/examples/27384266370152add76471dd0332a2f1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// transform/apis/update-transform.asciidoc:257 +// transform/apis/update-transform.asciidoc:263 [source, python] ---- diff --git a/docs/examples/275ec358d5d1e4b9ff06cb4ae7e47650.asciidoc b/docs/examples/275ec358d5d1e4b9ff06cb4ae7e47650.asciidoc index b19ba72f6..1bebbcc24 100644 --- a/docs/examples/275ec358d5d1e4b9ff06cb4ae7e47650.asciidoc +++ b/docs/examples/275ec358d5d1e4b9ff06cb4ae7e47650.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-index-template.asciidoc:78 +// indices/get-index-template.asciidoc:84 [source, python] ---- diff --git a/docs/examples/2864d04bf99860ed5dbe1458f1ab5f78.asciidoc b/docs/examples/2864d04bf99860ed5dbe1458f1ab5f78.asciidoc index c9fcedba5..0f6d7a087 100644 --- a/docs/examples/2864d04bf99860ed5dbe1458f1ab5f78.asciidoc +++ b/docs/examples/2864d04bf99860ed5dbe1458f1ab5f78.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// autoscaling/apis/put-autoscaling-policy.asciidoc:16 +// autoscaling/apis/put-autoscaling-policy.asciidoc:22 [source, python] ---- diff --git a/docs/examples/2884eacac3ad05ff794f5296ec7427e7.asciidoc b/docs/examples/2884eacac3ad05ff794f5296ec7427e7.asciidoc index 923e21c79..687e5f0fa 100644 --- a/docs/examples/2884eacac3ad05ff794f5296ec7427e7.asciidoc +++ b/docs/examples/2884eacac3ad05ff794f5296ec7427e7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/knn-query.asciidoc:57 +// query-dsl/knn-query.asciidoc:58 [source, python] ---- diff --git a/docs/examples/2932e6f71e247cf52e11d2f38f114ddf.asciidoc b/docs/examples/2932e6f71e247cf52e11d2f38f114ddf.asciidoc index 871706d7e..da25fd166 100644 --- a/docs/examples/2932e6f71e247cf52e11d2f38f114ddf.asciidoc +++ b/docs/examples/2932e6f71e247cf52e11d2f38f114ddf.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/reindex.asciidoc:294 +// docs/reindex.asciidoc:300 [source, python] ---- diff --git a/docs/examples/295b3aaeb223612afdd991744dc9c873.asciidoc b/docs/examples/295b3aaeb223612afdd991744dc9c873.asciidoc index f54eccc6e..f069fd0f1 100644 --- a/docs/examples/295b3aaeb223612afdd991744dc9c873.asciidoc +++ b/docs/examples/295b3aaeb223612afdd991744dc9c873.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// scripting/using.asciidoc:484 +// scripting/using.asciidoc:489 [source, python] ---- diff --git a/docs/examples/29824032d7d64512d17458fdd687b1f6.asciidoc b/docs/examples/29824032d7d64512d17458fdd687b1f6.asciidoc index 7500f918a..99994b7c2 100644 --- a/docs/examples/29824032d7d64512d17458fdd687b1f6.asciidoc +++ b/docs/examples/29824032d7d64512d17458fdd687b1f6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/tasks.asciidoc:138 +// cluster/tasks.asciidoc:144 [source, python] ---- diff --git a/docs/examples/299900fb08da80fe455cf3f1bb7d62ee.asciidoc b/docs/examples/299900fb08da80fe455cf3f1bb7d62ee.asciidoc index 77842e352..f1e964b2e 100644 --- a/docs/examples/299900fb08da80fe455cf3f1bb7d62ee.asciidoc +++ b/docs/examples/299900fb08da80fe455cf3f1bb7d62ee.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-field-mapping.asciidoc:96 +// indices/get-field-mapping.asciidoc:102 [source, python] ---- diff --git a/docs/examples/29d9df958de292cec50daaf31844b573.asciidoc b/docs/examples/29d9df958de292cec50daaf31844b573.asciidoc index 91441a858..444057fcb 100644 --- a/docs/examples/29d9df958de292cec50daaf31844b573.asciidoc +++ b/docs/examples/29d9df958de292cec50daaf31844b573.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-field-mapping.asciidoc:226 +// indices/get-field-mapping.asciidoc:232 [source, python] ---- diff --git a/docs/examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc b/docs/examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc index e831ba094..e8c3ee1d2 100644 --- a/docs/examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc +++ b/docs/examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc @@ -1,9 +1,12 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/oidc-logout-api.asciidoc:47 +// rest-api/security/oidc-logout-api.asciidoc:53 [source, python] ---- -resp = client.security.oidc_logout( +resp = client.perform_request( + "POST", + "/_security/oidc/logout", + headers={"Content-Type": "application/json"}, body={ "token": "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==", "refresh_token": "vLBPvmAB6KvwvJZr27cS" diff --git a/docs/examples/2a21674c40f9b182a8944769d20b2357.asciidoc b/docs/examples/2a21674c40f9b182a8944769d20b2357.asciidoc new file mode 100644 index 000000000..a9b622970 --- /dev/null +++ b/docs/examples/2a21674c40f9b182a8944769d20b2357.asciidoc @@ -0,0 +1,34 @@ +// This file is autogenerated, DO NOT EDIT +// mapping/types/rank-vectors.asciidoc:137 + +[source, python] +---- +resp = client.search( + index="my-rank-vectors-float", + query={ + "script_score": { + "query": { + "match_all": {} + }, + "script": { + "source": "maxSimDotProduct(params.query_vector, 'my_vector')", + "params": { + "query_vector": [ + [ + 0.5, + 10, + 6 + ], + [ + -0.5, + 10, + 10 + ] + ] + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/2a67608dadbf220a2f040f3a79d3677d.asciidoc b/docs/examples/2a67608dadbf220a2f040f3a79d3677d.asciidoc new file mode 100644 index 000000000..f349cd415 --- /dev/null +++ b/docs/examples/2a67608dadbf220a2f040f3a79d3677d.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// ingest/processors/attachment.asciidoc:162 + +[source, python] +---- +resp = client.ingest.put_pipeline( + id="attachment", + description="Extract attachment information including original binary", + processors=[ + { + "attachment": { + "field": "data", + "remove_binary": False + } + } + ], +) +print(resp) + +resp1 = client.index( + index="my-index-000001", + id="my_id", + pipeline="attachment", + document={ + "data": "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0=" + }, +) +print(resp1) + +resp2 = client.get( + index="my-index-000001", + id="my_id", +) +print(resp2) +---- diff --git a/docs/examples/2aa548b692fc2fe7b6f0d90eb8b2ae29.asciidoc b/docs/examples/2aa548b692fc2fe7b6f0d90eb8b2ae29.asciidoc index aea75cee0..a9a2a489c 100644 --- a/docs/examples/2aa548b692fc2fe7b6f0d90eb8b2ae29.asciidoc +++ b/docs/examples/2aa548b692fc2fe7b6f0d90eb8b2ae29.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/delete-watch.asciidoc:60 +// rest-api/watcher/delete-watch.asciidoc:66 [source, python] ---- diff --git a/docs/examples/2acf75803494fef29f9ca70671aa6be1.asciidoc b/docs/examples/2acf75803494fef29f9ca70671aa6be1.asciidoc index 64607d0d5..fd659fcfc 100644 --- a/docs/examples/2acf75803494fef29f9ca70671aa6be1.asciidoc +++ b/docs/examples/2acf75803494fef29f9ca70671aa6be1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/bulk-delete-roles.asciidoc:94 +// rest-api/security/bulk-delete-roles.asciidoc:100 [source, python] ---- diff --git a/docs/examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc b/docs/examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc index e8b76eaa8..f074730ed 100644 --- a/docs/examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc +++ b/docs/examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc @@ -3,10 +3,12 @@ [source, python] ---- -resp = client.esql.async_query_get( - id="FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", - wait_for_completion_timeout="30s", - body=None, +resp = client.perform_request( + "GET", + "/_query/async/FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", + params={ + "wait_for_completion_timeout": "30s" + }, ) print(resp) ---- diff --git a/docs/examples/2b1c560f00d9bcf5caaf56c03f6b5962.asciidoc b/docs/examples/2b1c560f00d9bcf5caaf56c03f6b5962.asciidoc index ed97c5df2..d7c29ec7b 100644 --- a/docs/examples/2b1c560f00d9bcf5caaf56c03f6b5962.asciidoc +++ b/docs/examples/2b1c560f00d9bcf5caaf56c03f6b5962.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/list-connector-sync-jobs-api.asciidoc:79 +// connector/apis/list-connector-sync-jobs-api.asciidoc:85 [source, python] ---- diff --git a/docs/examples/2b5c69778eb3daba9fbd7242bcc2daf9.asciidoc b/docs/examples/2b5c69778eb3daba9fbd7242bcc2daf9.asciidoc index d8dd88c98..9ada8f2a3 100644 --- a/docs/examples/2b5c69778eb3daba9fbd7242bcc2daf9.asciidoc +++ b/docs/examples/2b5c69778eb3daba9fbd7242bcc2daf9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/query-api-key.asciidoc:724 +// rest-api/security/query-api-key.asciidoc:729 [source, python] ---- diff --git a/docs/examples/2b7687e3d7c06824950e00618c297864.asciidoc b/docs/examples/2b7687e3d7c06824950e00618c297864.asciidoc index c9652cf82..15d07ba55 100644 --- a/docs/examples/2b7687e3d7c06824950e00618c297864.asciidoc +++ b/docs/examples/2b7687e3d7c06824950e00618c297864.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/resolve-cluster.asciidoc:141 +// indices/resolve-cluster.asciidoc:179 [source, python] ---- diff --git a/docs/examples/2bb41b0b4876ce98cd0cd8fb6d337f18.asciidoc b/docs/examples/2bb41b0b4876ce98cd0cd8fb6d337f18.asciidoc deleted file mode 100644 index 7c611874c..000000000 --- a/docs/examples/2bb41b0b4876ce98cd0cd8fb6d337f18.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// migration/migrate_9_0/transient-settings-migration-guide.asciidoc:64 - -[source, python] ----- -resp = client.cluster.put_settings( - persistent={ - "cluster.indices.close.enable": False, - "indices.recovery.max_bytes_per_sec": "50mb" - }, - transient={ - "*": None - }, -) -print(resp) ----- diff --git a/docs/examples/2c0dbdcf400cde5d36f7c9e6c1101011.asciidoc b/docs/examples/2c0dbdcf400cde5d36f7c9e6c1101011.asciidoc index f2ff73eba..412da48fc 100644 --- a/docs/examples/2c0dbdcf400cde5d36f7c9e6c1101011.asciidoc +++ b/docs/examples/2c0dbdcf400cde5d36f7c9e6c1101011.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/health.asciidoc:101 +// cat/health.asciidoc:107 [source, python] ---- diff --git a/docs/examples/2c1e16e9ac24cfea979af2a69900d3c2.asciidoc b/docs/examples/2c1e16e9ac24cfea979af2a69900d3c2.asciidoc index 81f979d28..4b9bf1e58 100644 --- a/docs/examples/2c1e16e9ac24cfea979af2a69900d3c2.asciidoc +++ b/docs/examples/2c1e16e9ac24cfea979af2a69900d3c2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// synonyms/apis/put-synonym-rule.asciidoc:107 +// synonyms/apis/put-synonym-rule.asciidoc:113 [source, python] ---- diff --git a/docs/examples/2c3dff44904d3d73ff47f1afe89c7f86.asciidoc b/docs/examples/2c3dff44904d3d73ff47f1afe89c7f86.asciidoc index 83f9a5add..6770b543c 100644 --- a/docs/examples/2c3dff44904d3d73ff47f1afe89c7f86.asciidoc +++ b/docs/examples/2c3dff44904d3d73ff47f1afe89c7f86.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update-by-query.asciidoc:369 +// docs/update-by-query.asciidoc:375 [source, python] ---- diff --git a/docs/examples/2c602b4ee8f22cda2cdf19bad31da0af.asciidoc b/docs/examples/2c602b4ee8f22cda2cdf19bad31da0af.asciidoc index 1aedc5929..b799fcc28 100644 --- a/docs/examples/2c602b4ee8f22cda2cdf19bad31da0af.asciidoc +++ b/docs/examples/2c602b4ee8f22cda2cdf19bad31da0af.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster.asciidoc:53 +// cluster.asciidoc:59 [source, python] ---- diff --git a/docs/examples/2d0244c020075595acb625aa5ba8f455.asciidoc b/docs/examples/2d0244c020075595acb625aa5ba8f455.asciidoc index 3b4923e2b..483a812fc 100644 --- a/docs/examples/2d0244c020075595acb625aa5ba8f455.asciidoc +++ b/docs/examples/2d0244c020075595acb625aa5ba8f455.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/fields/synthetic-source.asciidoc:260 +// mapping/fields/synthetic-source.asciidoc:253 [source, python] ---- diff --git a/docs/examples/2d60e3bdfee7afbddee149f40450b8b5.asciidoc b/docs/examples/2d60e3bdfee7afbddee149f40450b8b5.asciidoc index 4c868c678..cdd7b1711 100644 --- a/docs/examples/2d60e3bdfee7afbddee149f40450b8b5.asciidoc +++ b/docs/examples/2d60e3bdfee7afbddee149f40450b8b5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/validate.asciidoc:143 +// search/validate.asciidoc:149 [source, python] ---- diff --git a/docs/examples/2d8fcb03de417a71e7888bbdd948a692.asciidoc b/docs/examples/2d8fcb03de417a71e7888bbdd948a692.asciidoc index bc202ff06..7d6b79a11 100644 --- a/docs/examples/2d8fcb03de417a71e7888bbdd948a692.asciidoc +++ b/docs/examples/2d8fcb03de417a71e7888bbdd948a692.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/transforms.asciidoc:191 +// cat/transforms.asciidoc:197 [source, python] ---- diff --git a/docs/examples/2de6885bacb8769b8f22dce253c96b0c.asciidoc b/docs/examples/2de6885bacb8769b8f22dce253c96b0c.asciidoc index 3aa15bd1d..db56adc49 100644 --- a/docs/examples/2de6885bacb8769b8f22dce253c96b0c.asciidoc +++ b/docs/examples/2de6885bacb8769b8f22dce253c96b0c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/intervals-query.asciidoc:414 +// query-dsl/intervals-query.asciidoc:424 [source, python] ---- diff --git a/docs/examples/2e09666d3ad5ad9afc22763ee6e97a2b.asciidoc b/docs/examples/2e09666d3ad5ad9afc22763ee6e97a2b.asciidoc index 26fb74ccc..f99fbc33a 100644 --- a/docs/examples/2e09666d3ad5ad9afc22763ee6e97a2b.asciidoc +++ b/docs/examples/2e09666d3ad5ad9afc22763ee6e97a2b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// slm/apis/slm-put.asciidoc:155 +// slm/apis/slm-put.asciidoc:160 [source, python] ---- diff --git a/docs/examples/2e36fe22051a47e052e349854d9948b9.asciidoc b/docs/examples/2e36fe22051a47e052e349854d9948b9.asciidoc index 84d55190c..271a14e94 100644 --- a/docs/examples/2e36fe22051a47e052e349854d9948b9.asciidoc +++ b/docs/examples/2e36fe22051a47e052e349854d9948b9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/explain.asciidoc:192 +// search/explain.asciidoc:198 [source, python] ---- diff --git a/docs/examples/2e796e5ca59768d4426abbf9a049db3e.asciidoc b/docs/examples/2e796e5ca59768d4426abbf9a049db3e.asciidoc index 11dd4351b..bdcc4a2a4 100644 --- a/docs/examples/2e796e5ca59768d4426abbf9a049db3e.asciidoc +++ b/docs/examples/2e796e5ca59768d4426abbf9a049db3e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/split-index.asciidoc:169 +// indices/split-index.asciidoc:175 [source, python] ---- diff --git a/docs/examples/2e7f4b9be999422a12abb680572b13c8.asciidoc b/docs/examples/2e7f4b9be999422a12abb680572b13c8.asciidoc index 14b98a120..594fdbfb6 100644 --- a/docs/examples/2e7f4b9be999422a12abb680572b13c8.asciidoc +++ b/docs/examples/2e7f4b9be999422a12abb680572b13c8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ilm/apis/get-lifecycle.asciidoc:76 +// ilm/apis/get-lifecycle.asciidoc:82 [source, python] ---- diff --git a/docs/examples/2f07b81fd47ec3b074242a760f0c4e9e.asciidoc b/docs/examples/2f07b81fd47ec3b074242a760f0c4e9e.asciidoc deleted file mode 100644 index 5f4e810a8..000000000 --- a/docs/examples/2f07b81fd47ec3b074242a760f0c4e9e.asciidoc +++ /dev/null @@ -1,13 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// index-modules/slowlog.asciidoc:149 - -[source, python] ----- -resp = client.indices.put_settings( - index="my-index-000001", - settings={ - "index.indexing.slowlog.include.user": True - }, -) -print(resp) ----- diff --git a/docs/examples/2f0b2181c434a879a23b4643bdd92575.asciidoc b/docs/examples/2f0b2181c434a879a23b4643bdd92575.asciidoc index 67cf5f704..31b77bbc0 100644 --- a/docs/examples/2f0b2181c434a879a23b4643bdd92575.asciidoc +++ b/docs/examples/2f0b2181c434a879a23b4643bdd92575.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-settings.asciidoc:76 +// indices/get-settings.asciidoc:82 [source, python] ---- diff --git a/docs/examples/2f2580ea420e1836d922fe48fa8ada97.asciidoc b/docs/examples/2f2580ea420e1836d922fe48fa8ada97.asciidoc index 403ea20b7..e64a402cb 100644 --- a/docs/examples/2f2580ea420e1836d922fe48fa8ada97.asciidoc +++ b/docs/examples/2f2580ea420e1836d922fe48fa8ada97.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc:33 +// ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc:39 [source, python] ---- diff --git a/docs/examples/2f72a63c73dd672ac2dc3997ad15dd41.asciidoc b/docs/examples/2f72a63c73dd672ac2dc3997ad15dd41.asciidoc new file mode 100644 index 000000000..57193131c --- /dev/null +++ b/docs/examples/2f72a63c73dd672ac2dc3997ad15dd41.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// mapping/types/semantic-text.asciidoc:216 + +[source, python] +---- +resp = client.indices.create( + index="test-index", + mappings={ + "properties": { + "source_field": { + "type": "text", + "fields": { + "infer_field": { + "type": "semantic_text", + "inference_id": ".elser-2-elasticsearch" + } + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/2fc80a2ad1ca8b2dcb13ed1895b8e861.asciidoc b/docs/examples/2fc80a2ad1ca8b2dcb13ed1895b8e861.asciidoc index 822d66bd1..f72ef744d 100644 --- a/docs/examples/2fc80a2ad1ca8b2dcb13ed1895b8e861.asciidoc +++ b/docs/examples/2fc80a2ad1ca8b2dcb13ed1895b8e861.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/update-settings.asciidoc:123 +// cluster/update-settings.asciidoc:128 [source, python] ---- diff --git a/docs/examples/2fe28d9a91b3081a9ec4601af8fb7b1c.asciidoc b/docs/examples/2fe28d9a91b3081a9ec4601af8fb7b1c.asciidoc index 08dfe7896..0c2613078 100644 --- a/docs/examples/2fe28d9a91b3081a9ec4601af8fb7b1c.asciidoc +++ b/docs/examples/2fe28d9a91b3081a9ec4601af8fb7b1c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update-by-query.asciidoc:710 +// docs/update-by-query.asciidoc:716 [source, python] ---- diff --git a/docs/examples/2fea3e324939cc7e9c396964aeee7111.asciidoc b/docs/examples/2fea3e324939cc7e9c396964aeee7111.asciidoc index 339421498..aee1dc023 100644 --- a/docs/examples/2fea3e324939cc7e9c396964aeee7111.asciidoc +++ b/docs/examples/2fea3e324939cc7e9c396964aeee7111.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/match-query.asciidoc:252 +// query-dsl/match-query.asciidoc:256 [source, python] ---- diff --git a/docs/examples/2fee452baff92b409cbfc8d71eb5fc0e.asciidoc b/docs/examples/2fee452baff92b409cbfc8d71eb5fc0e.asciidoc index cb576f087..92af294a0 100644 --- a/docs/examples/2fee452baff92b409cbfc8d71eb5fc0e.asciidoc +++ b/docs/examples/2fee452baff92b409cbfc8d71eb5fc0e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/nodes.asciidoc:352 +// cat/nodes.asciidoc:361 [source, python] ---- diff --git a/docs/examples/300576666769b78fa6fa26b232837f81.asciidoc b/docs/examples/300576666769b78fa6fa26b232837f81.asciidoc index e0c8d0f52..45f240604 100644 --- a/docs/examples/300576666769b78fa6fa26b232837f81.asciidoc +++ b/docs/examples/300576666769b78fa6fa26b232837f81.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// autoscaling/apis/get-autoscaling-capacity.asciidoc:16 +// autoscaling/apis/get-autoscaling-capacity.asciidoc:22 [source, python] ---- diff --git a/docs/examples/30abc76a39e551f4b52c65002bb6405d.asciidoc b/docs/examples/30abc76a39e551f4b52c65002bb6405d.asciidoc index b126f55e5..71a436af7 100644 --- a/docs/examples/30abc76a39e551f4b52c65002bb6405d.asciidoc +++ b/docs/examples/30abc76a39e551f4b52c65002bb6405d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-api-keys.asciidoc:279 +// rest-api/security/get-api-keys.asciidoc:285 [source, python] ---- diff --git a/docs/examples/30d051f534aeb884176eedb2c11dac85.asciidoc b/docs/examples/30d051f534aeb884176eedb2c11dac85.asciidoc new file mode 100644 index 000000000..7d95ecb4e --- /dev/null +++ b/docs/examples/30d051f534aeb884176eedb2c11dac85.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// inference/service-elasticsearch.asciidoc:176 + +[source, python] +---- +resp = client.inference.put( + task_type="rerank", + inference_id="my-elastic-rerank", + inference_config={ + "service": "elasticsearch", + "service_settings": { + "model_id": ".rerank-v1", + "num_threads": 1, + "adaptive_allocations": { + "enabled": True, + "min_number_of_allocations": 1, + "max_number_of_allocations": 4 + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/30f3e3b9df46afd12e68bc71f18483b4.asciidoc b/docs/examples/30f3e3b9df46afd12e68bc71f18483b4.asciidoc index 7b431ccc1..594027bb3 100644 --- a/docs/examples/30f3e3b9df46afd12e68bc71f18483b4.asciidoc +++ b/docs/examples/30f3e3b9df46afd12e68bc71f18483b4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:125 +// indices/put-mapping.asciidoc:131 [source, python] ---- diff --git a/docs/examples/31ab4ec26176857280af630bf84a2823.asciidoc b/docs/examples/31ab4ec26176857280af630bf84a2823.asciidoc index 46e965fdf..fbbf52f58 100644 --- a/docs/examples/31ab4ec26176857280af630bf84a2823.asciidoc +++ b/docs/examples/31ab4ec26176857280af630bf84a2823.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/saml-sp-metadata.asciidoc:42 +// rest-api/security/saml-sp-metadata.asciidoc:48 [source, python] ---- diff --git a/docs/examples/31bc93e429ad0de11dd2dd231e8f2c5e.asciidoc b/docs/examples/31bc93e429ad0de11dd2dd231e8f2c5e.asciidoc deleted file mode 100644 index d76914bba..000000000 --- a/docs/examples/31bc93e429ad0de11dd2dd231e8f2c5e.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// indices/apis/unfreeze.asciidoc:51 - -[source, python] ----- -resp = client.indices.unfreeze( - index="my-index-000001", -) -print(resp) ----- diff --git a/docs/examples/31f4400716500149cccbc19aa06bff66.asciidoc b/docs/examples/31f4400716500149cccbc19aa06bff66.asciidoc index 0caf5ace5..65b6cb5b3 100644 --- a/docs/examples/31f4400716500149cccbc19aa06bff66.asciidoc +++ b/docs/examples/31f4400716500149cccbc19aa06bff66.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/dangling-index-delete.asciidoc:13 +// indices/dangling-index-delete.asciidoc:19 [source, python] ---- diff --git a/docs/examples/3218f8ccd59c8c90349816e0428e8fb8.asciidoc b/docs/examples/3218f8ccd59c8c90349816e0428e8fb8.asciidoc index cdc58299a..f44248852 100644 --- a/docs/examples/3218f8ccd59c8c90349816e0428e8fb8.asciidoc +++ b/docs/examples/3218f8ccd59c8c90349816e0428e8fb8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/circuit-breaker-errors.asciidoc:92 +// troubleshooting/common-issues/circuit-breaker-errors.asciidoc:99 [source, python] ---- diff --git a/docs/examples/32b7963c5cabbe9cc7d15da62f5edda9.asciidoc b/docs/examples/32b7963c5cabbe9cc7d15da62f5edda9.asciidoc index bb4f229f4..8ab95653f 100644 --- a/docs/examples/32b7963c5cabbe9cc7d15da62f5edda9.asciidoc +++ b/docs/examples/32b7963c5cabbe9cc7d15da62f5edda9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/update-user-profile-data.asciidoc:118 +// rest-api/security/update-user-profile-data.asciidoc:124 [source, python] ---- diff --git a/docs/examples/23af230e824f48b9cd56a4cf973d788c.asciidoc b/docs/examples/3312c82f81816bf76629db9582991812.asciidoc similarity index 79% rename from docs/examples/23af230e824f48b9cd56a4cf973d788c.asciidoc rename to docs/examples/3312c82f81816bf76629db9582991812.asciidoc index 167bf7db0..15244e685 100644 --- a/docs/examples/23af230e824f48b9cd56a4cf973d788c.asciidoc +++ b/docs/examples/3312c82f81816bf76629db9582991812.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// index-modules/slowlog.asciidoc:31 +// index-modules/slowlog.asciidoc:135 [source, python] ---- @@ -13,7 +13,8 @@ resp = client.indices.put_settings( "index.search.slowlog.threshold.fetch.warn": "1s", "index.search.slowlog.threshold.fetch.info": "800ms", "index.search.slowlog.threshold.fetch.debug": "500ms", - "index.search.slowlog.threshold.fetch.trace": "200ms" + "index.search.slowlog.threshold.fetch.trace": "200ms", + "index.search.slowlog.include.user": True }, ) print(resp) diff --git a/docs/examples/3337c817ebd438254505a31e91c91724.asciidoc b/docs/examples/3337c817ebd438254505a31e91c91724.asciidoc index 5c223af15..c6182f37d 100644 --- a/docs/examples/3337c817ebd438254505a31e91c91724.asciidoc +++ b/docs/examples/3337c817ebd438254505a31e91c91724.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-data-stream.asciidoc:71 +// indices/get-data-stream.asciidoc:77 [source, python] ---- diff --git a/docs/examples/3341d3bbb53052447a37c92a04c14b70.asciidoc b/docs/examples/3341d3bbb53052447a37c92a04c14b70.asciidoc index b9d4f716e..fa4c91226 100644 --- a/docs/examples/3341d3bbb53052447a37c92a04c14b70.asciidoc +++ b/docs/examples/3341d3bbb53052447a37c92a04c14b70.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// scripting/using.asciidoc:351 +// scripting/using.asciidoc:356 [source, python] ---- diff --git a/docs/examples/33610800d9de3c3e6d6b3c611ace7330.asciidoc b/docs/examples/33610800d9de3c3e6d6b3c611ace7330.asciidoc index 06f9cf273..fa7a72bbe 100644 --- a/docs/examples/33610800d9de3c3e6d6b3c611ace7330.asciidoc +++ b/docs/examples/33610800d9de3c3e6d6b3c611ace7330.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/tasks.asciidoc:128 +// cluster/tasks.asciidoc:134 [source, python] ---- diff --git a/docs/examples/3386fe07e90844dbcdbbe7c07f09e04a.asciidoc b/docs/examples/3386fe07e90844dbcdbbe7c07f09e04a.asciidoc index d1902818c..a698a9b98 100644 --- a/docs/examples/3386fe07e90844dbcdbbe7c07f09e04a.asciidoc +++ b/docs/examples/3386fe07e90844dbcdbbe7c07f09e04a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// synonyms/apis/delete-synonyms-set.asciidoc:61 +// synonyms/apis/delete-synonyms-set.asciidoc:66 [source, python] ---- diff --git a/docs/examples/339c4e5af9f9069ad9912aa574488b59.asciidoc b/docs/examples/339c4e5af9f9069ad9912aa574488b59.asciidoc index b027282dd..f0f281db4 100644 --- a/docs/examples/339c4e5af9f9069ad9912aa574488b59.asciidoc +++ b/docs/examples/339c4e5af9f9069ad9912aa574488b59.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// vectors/vector-functions.asciidoc:342 +// vectors/vector-functions.asciidoc:346 [source, python] ---- diff --git a/docs/examples/33d480fc6812ada75756cf5337bc9092.asciidoc b/docs/examples/33d480fc6812ada75756cf5337bc9092.asciidoc index 382153680..221597579 100644 --- a/docs/examples/33d480fc6812ada75756cf5337bc9092.asciidoc +++ b/docs/examples/33d480fc6812ada75756cf5337bc9092.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/list-connector-sync-jobs-api.asciidoc:58 +// connector/apis/list-connector-sync-jobs-api.asciidoc:64 [source, python] ---- diff --git a/docs/examples/342ddf9121aeddd82fea2464665e25da.asciidoc b/docs/examples/342ddf9121aeddd82fea2464665e25da.asciidoc index d2c8c5d77..bbd82aa9d 100644 --- a/docs/examples/342ddf9121aeddd82fea2464665e25da.asciidoc +++ b/docs/examples/342ddf9121aeddd82fea2464665e25da.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/create-connector-api.asciidoc:21 +// connector/apis/create-connector-api.asciidoc:27 [source, python] ---- diff --git a/docs/examples/3477a89d869b1f7f72d50c2ca86c4679.asciidoc b/docs/examples/3477a89d869b1f7f72d50c2ca86c4679.asciidoc index b92aa95bf..8ecac553a 100644 --- a/docs/examples/3477a89d869b1f7f72d50c2ca86c4679.asciidoc +++ b/docs/examples/3477a89d869b1f7f72d50c2ca86c4679.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/activate-watch.asciidoc:82 +// rest-api/watcher/activate-watch.asciidoc:88 [source, python] ---- diff --git a/docs/examples/34be27141e3a476c138546190101c8bc.asciidoc b/docs/examples/34be27141e3a476c138546190101c8bc.asciidoc index 3cee3576c..f5c1d92f1 100644 --- a/docs/examples/34be27141e3a476c138546190101c8bc.asciidoc +++ b/docs/examples/34be27141e3a476c138546190101c8bc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-vector-tile-api.asciidoc:33 +// search/search-vector-tile-api.asciidoc:38 [source, python] ---- diff --git a/docs/examples/34cdeefb09bbbe5206957a8bc1bd513d.asciidoc b/docs/examples/34cdeefb09bbbe5206957a8bc1bd513d.asciidoc deleted file mode 100644 index 41820a845..000000000 --- a/docs/examples/34cdeefb09bbbe5206957a8bc1bd513d.asciidoc +++ /dev/null @@ -1,13 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// index-modules/slowlog.asciidoc:66 - -[source, python] ----- -resp = client.indices.put_settings( - index="my-index-000001", - settings={ - "index.search.slowlog.include.user": True - }, -) -print(resp) ----- diff --git a/docs/examples/3541d4a85e27b2c3896a7a7ee98b4b37.asciidoc b/docs/examples/3541d4a85e27b2c3896a7a7ee98b4b37.asciidoc index f983391dd..2aca9fdab 100644 --- a/docs/examples/3541d4a85e27b2c3896a7a7ee98b4b37.asciidoc +++ b/docs/examples/3541d4a85e27b2c3896a7a7ee98b4b37.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// health/health.asciidoc:480 +// health/health.asciidoc:486 [source, python] ---- diff --git a/docs/examples/3545261682af72f4bee57f2bac0a9590.asciidoc b/docs/examples/3545261682af72f4bee57f2bac0a9590.asciidoc index 7085907ee..f1682e1f6 100644 --- a/docs/examples/3545261682af72f4bee57f2bac0a9590.asciidoc +++ b/docs/examples/3545261682af72f4bee57f2bac0a9590.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/shard-stores.asciidoc:150 +// indices/shard-stores.asciidoc:156 [source, python] ---- diff --git a/docs/examples/355d0ee2fcb6c1fc403c6267f710e25a.asciidoc b/docs/examples/355d0ee2fcb6c1fc403c6267f710e25a.asciidoc index 95c58cd5e..b6122821f 100644 --- a/docs/examples/355d0ee2fcb6c1fc403c6267f710e25a.asciidoc +++ b/docs/examples/355d0ee2fcb6c1fc403c6267f710e25a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/reindex.asciidoc:716 +// docs/reindex.asciidoc:722 [source, python] ---- diff --git a/docs/examples/357edc9d10e98ed776401c7a439a1a55.asciidoc b/docs/examples/357edc9d10e98ed776401c7a439a1a55.asciidoc index 69fe16b28..90b277dc4 100644 --- a/docs/examples/357edc9d10e98ed776401c7a439a1a55.asciidoc +++ b/docs/examples/357edc9d10e98ed776401c7a439a1a55.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/resolve-cluster.asciidoc:206 +// indices/resolve-cluster.asciidoc:244 [source, python] ---- diff --git a/docs/examples/35a272df8c919a12d7c3106a18245748.asciidoc b/docs/examples/35a272df8c919a12d7c3106a18245748.asciidoc index 0a003a7e3..93c86a953 100644 --- a/docs/examples/35a272df8c919a12d7c3106a18245748.asciidoc +++ b/docs/examples/35a272df8c919a12d7c3106a18245748.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/infer-trained-model.asciidoc:950 +// ml/trained-models/apis/infer-trained-model.asciidoc:956 [source, python] ---- diff --git a/docs/examples/35be136ba9df7474a5521631e2a385b1.asciidoc b/docs/examples/35be136ba9df7474a5521631e2a385b1.asciidoc index 890b4dc77..5990b49a5 100644 --- a/docs/examples/35be136ba9df7474a5521631e2a385b1.asciidoc +++ b/docs/examples/35be136ba9df7474a5521631e2a385b1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/lifecycle/apis/explain-lifecycle.asciidoc:50 +// data-streams/lifecycle/apis/explain-lifecycle.asciidoc:56 [source, python] ---- diff --git a/docs/examples/35f892b475a1770f18328158be7039fd.asciidoc b/docs/examples/35f892b475a1770f18328158be7039fd.asciidoc index bd69bcdd1..2ae10c083 100644 --- a/docs/examples/35f892b475a1770f18328158be7039fd.asciidoc +++ b/docs/examples/35f892b475a1770f18328158be7039fd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/dense-vector.asciidoc:72 +// mapping/types/dense-vector.asciidoc:71 [source, python] ---- diff --git a/docs/examples/3608e4fcd17dd8d5f88ec9a3db2f5d89.asciidoc b/docs/examples/3608e4fcd17dd8d5f88ec9a3db2f5d89.asciidoc index 5c0875fce..c71cd49c0 100644 --- a/docs/examples/3608e4fcd17dd8d5f88ec9a3db2f5d89.asciidoc +++ b/docs/examples/3608e4fcd17dd8d5f88ec9a3db2f5d89.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// synonyms/apis/put-synonyms-set.asciidoc:83 +// synonyms/apis/put-synonyms-set.asciidoc:89 [source, python] ---- diff --git a/docs/examples/365256ebdfa47b449780771d9beba8d9.asciidoc b/docs/examples/365256ebdfa47b449780771d9beba8d9.asciidoc index ecfda89c9..09fc5efcd 100644 --- a/docs/examples/365256ebdfa47b449780771d9beba8d9.asciidoc +++ b/docs/examples/365256ebdfa47b449780771d9beba8d9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/check-in-connector-sync-job-api.asciidoc:50 +// connector/apis/check-in-connector-sync-job-api.asciidoc:56 [source, python] ---- diff --git a/docs/examples/36d229f734adcdab00be266a7ce038b1.asciidoc b/docs/examples/36d229f734adcdab00be266a7ce038b1.asciidoc index b2740333f..b52adb699 100644 --- a/docs/examples/36d229f734adcdab00be266a7ce038b1.asciidoc +++ b/docs/examples/36d229f734adcdab00be266a7ce038b1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/dense-vector.asciidoc:406 +// mapping/types/dense-vector.asciidoc:404 [source, python] ---- diff --git a/docs/examples/36da9668fef56910370f16bfb772cc40.asciidoc b/docs/examples/36da9668fef56910370f16bfb772cc40.asciidoc index a2f05d6e8..5af3045ba 100644 --- a/docs/examples/36da9668fef56910370f16bfb772cc40.asciidoc +++ b/docs/examples/36da9668fef56910370f16bfb772cc40.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// modules/indices/request_cache.asciidoc:139 +// shard-request-cache.asciidoc:125 [source, python] ---- diff --git a/docs/examples/370b297ed3433577adf53e64f572d89d.asciidoc b/docs/examples/370b297ed3433577adf53e64f572d89d.asciidoc index 56aace56f..5ab7ae26d 100644 --- a/docs/examples/370b297ed3433577adf53e64f572d89d.asciidoc +++ b/docs/examples/370b297ed3433577adf53e64f572d89d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/delete-connector-sync-job-api.asciidoc:46 +// connector/apis/delete-connector-sync-job-api.asciidoc:52 [source, python] ---- diff --git a/docs/examples/3759ca688c4bd3c838780a9aad63258b.asciidoc b/docs/examples/3759ca688c4bd3c838780a9aad63258b.asciidoc index 4d7be2d86..3cf758d97 100644 --- a/docs/examples/3759ca688c4bd3c838780a9aad63258b.asciidoc +++ b/docs/examples/3759ca688c4bd3c838780a9aad63258b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-index-template.asciidoc:35 +// indices/get-index-template.asciidoc:41 [source, python] ---- diff --git a/docs/examples/37983daac3d9c8582583a507b3adb7f2.asciidoc b/docs/examples/37983daac3d9c8582583a507b3adb7f2.asciidoc index 2086a8afc..d1b1b1359 100644 --- a/docs/examples/37983daac3d9c8582583a507b3adb7f2.asciidoc +++ b/docs/examples/37983daac3d9c8582583a507b3adb7f2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// shutdown/apis/shutdown-delete.asciidoc:51 +// shutdown/apis/shutdown-delete.asciidoc:57 [source, python] ---- diff --git a/docs/examples/37f367ca81a16d3aef4ef7126ec33a2e.asciidoc b/docs/examples/37f367ca81a16d3aef4ef7126ec33a2e.asciidoc new file mode 100644 index 000000000..32c67885f --- /dev/null +++ b/docs/examples/37f367ca81a16d3aef4ef7126ec33a2e.asciidoc @@ -0,0 +1,79 @@ +// This file is autogenerated, DO NOT EDIT +// search/retriever.asciidoc:429 + +[source, python] +---- +resp = client.search( + index="movies", + size=10, + retriever={ + "rescorer": { + "rescore": { + "query": { + "window_size": 50, + "rescore_query": { + "script_score": { + "script": { + "source": "cosineSimilarity(params.queryVector, 'product-vector_final_stage') + 1.0", + "params": { + "queryVector": [ + -0.5, + 90, + -10, + 14.8, + -156 + ] + } + } + } + } + } + }, + "retriever": { + "rrf": { + "rank_window_size": 100, + "retrievers": [ + { + "standard": { + "query": { + "sparse_vector": { + "field": "plot_embedding", + "inference_id": "my-elser-model", + "query": "films that explore psychological depths" + } + } + } + }, + { + "standard": { + "query": { + "multi_match": { + "query": "crime", + "fields": [ + "plot", + "title" + ] + } + } + } + }, + { + "knn": { + "field": "vector", + "query_vector": [ + 10, + 22, + 77 + ], + "k": 10, + "num_candidates": 10 + } + } + ] + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/3819d0a5c2eed635c88e9e7bf2e81584.asciidoc b/docs/examples/3819d0a5c2eed635c88e9e7bf2e81584.asciidoc index b5ef17a87..b8fab2deb 100644 --- a/docs/examples/3819d0a5c2eed635c88e9e7bf2e81584.asciidoc +++ b/docs/examples/3819d0a5c2eed635c88e9e7bf2e81584.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/revert-snapshot.asciidoc:78 +// ml/anomaly-detection/apis/revert-snapshot.asciidoc:84 [source, python] ---- diff --git a/docs/examples/386eb7dcd3149db82605bf22c5d851bf.asciidoc b/docs/examples/386eb7dcd3149db82605bf22c5d851bf.asciidoc index ad82daded..76ae0c4fc 100644 --- a/docs/examples/386eb7dcd3149db82605bf22c5d851bf.asciidoc +++ b/docs/examples/386eb7dcd3149db82605bf22c5d851bf.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/query-api-key.asciidoc:368 +// rest-api/security/query-api-key.asciidoc:373 [source, python] ---- diff --git a/docs/examples/388d3eda4f792d3fce044777739217e6.asciidoc b/docs/examples/388d3eda4f792d3fce044777739217e6.asciidoc index 14dd866c0..1e79fb553 100644 --- a/docs/examples/388d3eda4f792d3fce044777739217e6.asciidoc +++ b/docs/examples/388d3eda4f792d3fce044777739217e6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/df-analytics/apis/evaluate-dfanalytics.asciidoc:436 +// ml/df-analytics/apis/evaluate-dfanalytics.asciidoc:442 [source, python] ---- diff --git a/docs/examples/38ba93890494bfa7beece58dffa44f98.asciidoc b/docs/examples/38ba93890494bfa7beece58dffa44f98.asciidoc deleted file mode 100644 index cf80b3ae8..000000000 --- a/docs/examples/38ba93890494bfa7beece58dffa44f98.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// mapping/types/semantic-text.asciidoc:207 - -[source, python] ----- -resp = client.bulk( - index="test-index", - operations=[ - { - "update": { - "_id": "1" - } - }, - { - "doc": { - "infer_field": "updated inference field", - "source_field": "updated source field" - } - } - ], -) -print(resp) ----- diff --git a/docs/examples/38eed000de433b540116928681c520d3.asciidoc b/docs/examples/38eed000de433b540116928681c520d3.asciidoc index dc8c501d5..01aa0804e 100644 --- a/docs/examples/38eed000de433b540116928681c520d3.asciidoc +++ b/docs/examples/38eed000de433b540116928681c520d3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/preview-datafeed.asciidoc:110 +// ml/anomaly-detection/apis/preview-datafeed.asciidoc:116 [source, python] ---- diff --git a/docs/examples/38f7739f750f1411bccf511a0abaaea3.asciidoc b/docs/examples/38f7739f750f1411bccf511a0abaaea3.asciidoc index 148e2b226..047f0f243 100644 --- a/docs/examples/38f7739f750f1411bccf511a0abaaea3.asciidoc +++ b/docs/examples/38f7739f750f1411bccf511a0abaaea3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/field-caps.asciidoc:13 +// search/field-caps.asciidoc:18 [source, python] ---- diff --git a/docs/examples/3924ee252581ebb96ac0e60046125ae8.asciidoc b/docs/examples/3924ee252581ebb96ac0e60046125ae8.asciidoc index c4035e5f0..f516951de 100644 --- a/docs/examples/3924ee252581ebb96ac0e60046125ae8.asciidoc +++ b/docs/examples/3924ee252581ebb96ac0e60046125ae8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-users.asciidoc:63 +// rest-api/security/get-users.asciidoc:68 [source, python] ---- diff --git a/docs/examples/3951d7fcd7f849fa278daf342872125a.asciidoc b/docs/examples/3951d7fcd7f849fa278daf342872125a.asciidoc index 86b07b0f5..d36bff51e 100644 --- a/docs/examples/3951d7fcd7f849fa278daf342872125a.asciidoc +++ b/docs/examples/3951d7fcd7f849fa278daf342872125a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/analyze.asciidoc:372 +// indices/analyze.asciidoc:378 [source, python] ---- diff --git a/docs/examples/39760996f94ad34aaceaa16a5cc97993.asciidoc b/docs/examples/39760996f94ad34aaceaa16a5cc97993.asciidoc index a7560e618..7fb510cfb 100644 --- a/docs/examples/39760996f94ad34aaceaa16a5cc97993.asciidoc +++ b/docs/examples/39760996f94ad34aaceaa16a5cc97993.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// shutdown/apis/shutdown-get.asciidoc:61 +// shutdown/apis/shutdown-get.asciidoc:67 [source, python] ---- diff --git a/docs/examples/397ab5f9ea0b69ae85038bb0b9915180.asciidoc b/docs/examples/397ab5f9ea0b69ae85038bb0b9915180.asciidoc index 404236df4..a8a6d1c8b 100644 --- a/docs/examples/397ab5f9ea0b69ae85038bb0b9915180.asciidoc +++ b/docs/examples/397ab5f9ea0b69ae85038bb0b9915180.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/downsampling-ilm.asciidoc:512 +// data-streams/downsampling-dsl.asciidoc:523 [source, python] ---- diff --git a/docs/examples/398389933901b572a06a752bc780af7c.asciidoc b/docs/examples/398389933901b572a06a752bc780af7c.asciidoc index f27040fba..9f4033e64 100644 --- a/docs/examples/398389933901b572a06a752bc780af7c.asciidoc +++ b/docs/examples/398389933901b572a06a752bc780af7c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-anthropic.asciidoc:131 +// inference/service-anthropic.asciidoc:137 [source, python] ---- diff --git a/docs/examples/39ce44333d28ed2b833722d3e3cb06f3.asciidoc b/docs/examples/39ce44333d28ed2b833722d3e3cb06f3.asciidoc index 6886f0a54..4aa468359 100644 --- a/docs/examples/39ce44333d28ed2b833722d3e3cb06f3.asciidoc +++ b/docs/examples/39ce44333d28ed2b833722d3e3cb06f3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/bool-query.asciidoc:184 +// query-dsl/bool-query.asciidoc:187 [source, python] ---- diff --git a/docs/examples/39d6f575c9458d9c941364dfd0493fa0.asciidoc b/docs/examples/39d6f575c9458d9c941364dfd0493fa0.asciidoc index fc9b88784..deee416bf 100644 --- a/docs/examples/39d6f575c9458d9c941364dfd0493fa0.asciidoc +++ b/docs/examples/39d6f575c9458d9c941364dfd0493fa0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/get-calendar-event.asciidoc:112 +// ml/anomaly-detection/apis/get-calendar-event.asciidoc:118 [source, python] ---- diff --git a/docs/examples/3a12feb0de224bfaaf518d95b9f516ff.asciidoc b/docs/examples/3a12feb0de224bfaaf518d95b9f516ff.asciidoc index aea9c1758..887795465 100644 --- a/docs/examples/3a12feb0de224bfaaf518d95b9f516ff.asciidoc +++ b/docs/examples/3a12feb0de224bfaaf518d95b9f516ff.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/put-watch.asciidoc:120 +// rest-api/watcher/put-watch.asciidoc:126 [source, python] ---- diff --git a/docs/examples/3a489743e49902df38e3368cae00717a.asciidoc b/docs/examples/3a489743e49902df38e3368cae00717a.asciidoc index 91462454d..bb1aeb077 100644 --- a/docs/examples/3a489743e49902df38e3368cae00717a.asciidoc +++ b/docs/examples/3a489743e49902df38e3368cae00717a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/high-cpu-usage.asciidoc:43 +// troubleshooting/common-issues/high-cpu-usage.asciidoc:47 [source, python] ---- diff --git a/docs/examples/3a5f2e2313614ea9693545edee22ac43.asciidoc b/docs/examples/3a5f2e2313614ea9693545edee22ac43.asciidoc index 51594a734..58caf00b1 100644 --- a/docs/examples/3a5f2e2313614ea9693545edee22ac43.asciidoc +++ b/docs/examples/3a5f2e2313614ea9693545edee22ac43.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/delete-service-token.asciidoc:47 +// rest-api/security/delete-service-token.asciidoc:53 [source, python] ---- diff --git a/docs/examples/3ac8b5234e9d53859245cf8ab0094ca5.asciidoc b/docs/examples/3ac8b5234e9d53859245cf8ab0094ca5.asciidoc index e510a9c1b..06db20d8f 100644 --- a/docs/examples/3ac8b5234e9d53859245cf8ab0094ca5.asciidoc +++ b/docs/examples/3ac8b5234e9d53859245cf8ab0094ca5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/delete-job.asciidoc:68 +// ml/anomaly-detection/apis/delete-job.asciidoc:74 [source, python] ---- diff --git a/docs/examples/3b04cc894e6a47d57983484010feac0c.asciidoc b/docs/examples/3b04cc894e6a47d57983484010feac0c.asciidoc index 7cc78c0b8..f652beadc 100644 --- a/docs/examples/3b04cc894e6a47d57983484010feac0c.asciidoc +++ b/docs/examples/3b04cc894e6a47d57983484010feac0c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/reindex.asciidoc:863 +// docs/reindex.asciidoc:869 [source, python] ---- diff --git a/docs/examples/3b18e9de638ff0b1c7a1f1f6bf1c24f3.asciidoc b/docs/examples/3b18e9de638ff0b1c7a1f1f6bf1c24f3.asciidoc index 4e2dc399f..b25a5f0c5 100644 --- a/docs/examples/3b18e9de638ff0b1c7a1f1f6bf1c24f3.asciidoc +++ b/docs/examples/3b18e9de638ff0b1c7a1f1f6bf1c24f3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-app-privileges.asciidoc:88 +// rest-api/security/get-app-privileges.asciidoc:94 [source, python] ---- diff --git a/docs/examples/3b1ff884f3bab390ae357e622c0544a9.asciidoc b/docs/examples/3b1ff884f3bab390ae357e622c0544a9.asciidoc index 401012b2d..41861b264 100644 --- a/docs/examples/3b1ff884f3bab390ae357e622c0544a9.asciidoc +++ b/docs/examples/3b1ff884f3bab390ae357e622c0544a9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/rrf.asciidoc:180 +// search/rrf.asciidoc:186 [source, python] ---- diff --git a/docs/examples/3b40db1c5c6b36f087d7a09a4ce285c6.asciidoc b/docs/examples/3b40db1c5c6b36f087d7a09a4ce285c6.asciidoc index 1bdcf73a4..c337b8791 100644 --- a/docs/examples/3b40db1c5c6b36f087d7a09a4ce285c6.asciidoc +++ b/docs/examples/3b40db1c5c6b36f087d7a09a4ce285c6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-index-template.asciidoc:87 +// indices/get-index-template.asciidoc:93 [source, python] ---- diff --git a/docs/examples/3b606631284877f9bca15051630995ad.asciidoc b/docs/examples/3b606631284877f9bca15051630995ad.asciidoc index ccbcc0ec2..b9145064c 100644 --- a/docs/examples/3b606631284877f9bca15051630995ad.asciidoc +++ b/docs/examples/3b606631284877f9bca15051630995ad.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// scripting/using.asciidoc:436 +// scripting/using.asciidoc:441 [source, python] ---- diff --git a/docs/examples/3b64821fe9db73eb03860c60d775d7ff.asciidoc b/docs/examples/3b64821fe9db73eb03860c60d775d7ff.asciidoc index e8fb70a41..674654235 100644 --- a/docs/examples/3b64821fe9db73eb03860c60d775d7ff.asciidoc +++ b/docs/examples/3b64821fe9db73eb03860c60d775d7ff.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/update-cross-cluster-api-key.asciidoc:191 +// rest-api/security/update-cross-cluster-api-key.asciidoc:197 [source, python] ---- diff --git a/docs/examples/3b9c54604535d97e8368d47148aecc6f.asciidoc b/docs/examples/3b9c54604535d97e8368d47148aecc6f.asciidoc index 8f8853fb3..90feec12e 100644 --- a/docs/examples/3b9c54604535d97e8368d47148aecc6f.asciidoc +++ b/docs/examples/3b9c54604535d97e8368d47148aecc6f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/update-snapshot.asciidoc:49 +// ml/anomaly-detection/apis/update-snapshot.asciidoc:55 [source, python] ---- diff --git a/docs/examples/3bb491db29deba25e1cc82bcaa1aa1a1.asciidoc b/docs/examples/3bb491db29deba25e1cc82bcaa1aa1a1.asciidoc index 8520d79fc..356d37aac 100644 --- a/docs/examples/3bb491db29deba25e1cc82bcaa1aa1a1.asciidoc +++ b/docs/examples/3bb491db29deba25e1cc82bcaa1aa1a1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/reindex.asciidoc:775 +// docs/reindex.asciidoc:781 [source, python] ---- diff --git a/docs/examples/3bfa2362add163802fc2210cc2f37ba2.asciidoc b/docs/examples/3bfa2362add163802fc2210cc2f37ba2.asciidoc index d5d96fb70..0ac911e10 100644 --- a/docs/examples/3bfa2362add163802fc2210cc2f37ba2.asciidoc +++ b/docs/examples/3bfa2362add163802fc2210cc2f37ba2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/clone-snapshot-api.asciidoc:10 +// snapshot-restore/apis/clone-snapshot-api.asciidoc:16 [source, python] ---- diff --git a/docs/examples/3c345feb7c52fd54bcb5d5505fd8bc3b.asciidoc b/docs/examples/3c345feb7c52fd54bcb5d5505fd8bc3b.asciidoc index 727da4e7c..9abe7bfa5 100644 --- a/docs/examples/3c345feb7c52fd54bcb5d5505fd8bc3b.asciidoc +++ b/docs/examples/3c345feb7c52fd54bcb5d5505fd8bc3b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/infer-trained-model.asciidoc:1109 +// ml/trained-models/apis/infer-trained-model.asciidoc:1115 [source, python] ---- diff --git a/docs/examples/3c6abb9885cb1a997fcdd16f7fa4f673.asciidoc b/docs/examples/3c6abb9885cb1a997fcdd16f7fa4f673.asciidoc index e56708bad..cdd703ba8 100644 --- a/docs/examples/3c6abb9885cb1a997fcdd16f7fa4f673.asciidoc +++ b/docs/examples/3c6abb9885cb1a997fcdd16f7fa4f673.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/shrink-index.asciidoc:11 +// indices/shrink-index.asciidoc:17 [source, python] ---- diff --git a/docs/examples/3c7621a81fa982b79f040a6d2611530e.asciidoc b/docs/examples/3c7621a81fa982b79f040a6d2611530e.asciidoc index 8b35b2a9b..71e813344 100644 --- a/docs/examples/3c7621a81fa982b79f040a6d2611530e.asciidoc +++ b/docs/examples/3c7621a81fa982b79f040a6d2611530e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/simulate-template.asciidoc:151 +// indices/simulate-template.asciidoc:157 [source, python] ---- diff --git a/docs/examples/3d1ff6097e2359f927c88c2ccdb36252.asciidoc b/docs/examples/3d1ff6097e2359f927c88c2ccdb36252.asciidoc index 2ae1accfc..f1781b5f4 100644 --- a/docs/examples/3d1ff6097e2359f927c88c2ccdb36252.asciidoc +++ b/docs/examples/3d1ff6097e2359f927c88c2ccdb36252.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/root.asciidoc:11 +// rest-api/root.asciidoc:17 [source, python] ---- diff --git a/docs/examples/3d316bddd8503a6cc10566630a4155d3.asciidoc b/docs/examples/3d316bddd8503a6cc10566630a4155d3.asciidoc index 8f6c809f3..dce950a5f 100644 --- a/docs/examples/3d316bddd8503a6cc10566630a4155d3.asciidoc +++ b/docs/examples/3d316bddd8503a6cc10566630a4155d3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/get-settings.asciidoc:16 +// rest-api/watcher/get-settings.asciidoc:22 [source, python] ---- diff --git a/docs/examples/3d6a56dd3d93ece0e3da3fb66b4696d3.asciidoc b/docs/examples/3d6a56dd3d93ece0e3da3fb66b4696d3.asciidoc index 04eade572..ab8d1a44f 100644 --- a/docs/examples/3d6a56dd3d93ece0e3da3fb66b4696d3.asciidoc +++ b/docs/examples/3d6a56dd3d93ece0e3da3fb66b4696d3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/nodes-usage.asciidoc:66 +// cluster/nodes-usage.asciidoc:71 [source, python] ---- diff --git a/docs/examples/3da35090e093c2d83c3b7d0d83bcb4ae.asciidoc b/docs/examples/3da35090e093c2d83c3b7d0d83bcb4ae.asciidoc index e76879c83..bf8d48f17 100644 --- a/docs/examples/3da35090e093c2d83c3b7d0d83bcb4ae.asciidoc +++ b/docs/examples/3da35090e093c2d83c3b7d0d83bcb4ae.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// setup/important-settings/path-settings.asciidoc:71 +// path-settings-overview.asciidoc:51 [source, python] ---- diff --git a/docs/examples/3db2b5a6424aa92ecab7a8640c38685a.asciidoc b/docs/examples/3db2b5a6424aa92ecab7a8640c38685a.asciidoc index 51ec05d35..0f80fcace 100644 --- a/docs/examples/3db2b5a6424aa92ecab7a8640c38685a.asciidoc +++ b/docs/examples/3db2b5a6424aa92ecab7a8640c38685a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/delete.asciidoc:180 +// docs/delete.asciidoc:186 [source, python] ---- diff --git a/docs/examples/3e121b43773cbb6dffa9b483c86a1f8d.asciidoc b/docs/examples/3e121b43773cbb6dffa9b483c86a1f8d.asciidoc index 12737c951..2c5368e9f 100644 --- a/docs/examples/3e121b43773cbb6dffa9b483c86a1f8d.asciidoc +++ b/docs/examples/3e121b43773cbb6dffa9b483c86a1f8d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/bulk-update-api-keys.asciidoc:81 +// rest-api/security/bulk-update-api-keys.asciidoc:87 [source, python] ---- diff --git a/docs/examples/3e33c1a4298ea6a0dec65a3ebf9ba973.asciidoc b/docs/examples/3e33c1a4298ea6a0dec65a3ebf9ba973.asciidoc index 657a6b6c1..01636840a 100644 --- a/docs/examples/3e33c1a4298ea6a0dec65a3ebf9ba973.asciidoc +++ b/docs/examples/3e33c1a4298ea6a0dec65a3ebf9ba973.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/termvectors.asciidoc:333 +// docs/termvectors.asciidoc:339 [source, python] ---- diff --git a/docs/examples/3e8ed6ae016eb823cb00d9035b8ac459.asciidoc b/docs/examples/3e8ed6ae016eb823cb00d9035b8ac459.asciidoc index 50e9dce77..7d8074935 100644 --- a/docs/examples/3e8ed6ae016eb823cb00d9035b8ac459.asciidoc +++ b/docs/examples/3e8ed6ae016eb823cb00d9035b8ac459.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search.asciidoc:10 +// search/search.asciidoc:16 [source, python] ---- diff --git a/docs/examples/3ea4c971b3f47735dcc207ee2645fa03.asciidoc b/docs/examples/3ea4c971b3f47735dcc207ee2645fa03.asciidoc new file mode 100644 index 000000000..9aed3ad38 --- /dev/null +++ b/docs/examples/3ea4c971b3f47735dcc207ee2645fa03.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// alias.asciidoc:418 + +[source, python] +---- +resp = client.indices.update_aliases( + actions=[ + { + "remove_index": { + "index": "my-index-2099.05.06-000001" + } + } + ], +) +print(resp) +---- diff --git a/docs/examples/3ed79871d956bfb2d6d2721d7272520c.asciidoc b/docs/examples/3ed79871d956bfb2d6d2721d7272520c.asciidoc index c7be516e2..1f0745d69 100644 --- a/docs/examples/3ed79871d956bfb2d6d2721d7272520c.asciidoc +++ b/docs/examples/3ed79871d956bfb2d6d2721d7272520c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/stats.asciidoc:112 +// rest-api/watcher/stats.asciidoc:118 [source, python] ---- diff --git a/docs/examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc b/docs/examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc index 680282f79..1202241d6 100644 --- a/docs/examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc +++ b/docs/examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc @@ -1,9 +1,12 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-async-query-api.asciidoc:17 +// esql/esql-async-query-api.asciidoc:23 [source, python] ---- -resp = client.esql.async_query( +resp = client.perform_request( + "POST", + "/_query/async", + headers={"Content-Type": "application/json"}, body={ "query": "\n FROM library\n | EVAL year = DATE_TRUNC(1 YEARS, release_date)\n | STATS MAX(page_count) BY year\n | SORT year\n | LIMIT 5\n ", "wait_for_completion_timeout": "2s" diff --git a/docs/examples/3f30310cc6d0adae6b0f61705624a695.asciidoc b/docs/examples/3f30310cc6d0adae6b0f61705624a695.asciidoc index b8624b2ff..63c092f08 100644 --- a/docs/examples/3f30310cc6d0adae6b0f61705624a695.asciidoc +++ b/docs/examples/3f30310cc6d0adae6b0f61705624a695.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/create-snapshot-api.asciidoc:160 +// snapshot-restore/apis/create-snapshot-api.asciidoc:166 [source, python] ---- diff --git a/docs/examples/3f8dc309b63fa0437898107b0d964217.asciidoc b/docs/examples/3f8dc309b63fa0437898107b0d964217.asciidoc index d97f352cd..333e71a68 100644 --- a/docs/examples/3f8dc309b63fa0437898107b0d964217.asciidoc +++ b/docs/examples/3f8dc309b63fa0437898107b0d964217.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/anomaly-detectors.asciidoc:281 +// cat/anomaly-detectors.asciidoc:287 [source, python] ---- diff --git a/docs/examples/3f9dcf2aa42f3ecfb5ebfe48c1774103.asciidoc b/docs/examples/3f9dcf2aa42f3ecfb5ebfe48c1774103.asciidoc new file mode 100644 index 000000000..7f8d2cc65 --- /dev/null +++ b/docs/examples/3f9dcf2aa42f3ecfb5ebfe48c1774103.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/aggs-tutorial.asciidoc:360 + +[source, python] +---- +resp = client.search( + index="kibana_sample_data_ecommerce", + size=0, + aggs={ + "order_stats": { + "stats": { + "field": "taxful_total_price" + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/3fe4264ace04405989141c43aadfff81.asciidoc b/docs/examples/3fe4264ace04405989141c43aadfff81.asciidoc index 28dd407e9..84122ddd4 100644 --- a/docs/examples/3fe4264ace04405989141c43aadfff81.asciidoc +++ b/docs/examples/3fe4264ace04405989141c43aadfff81.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/create-roles.asciidoc:167 +// rest-api/security/create-roles.asciidoc:173 [source, python] ---- diff --git a/docs/examples/3fe5e6c0d5ea4586aa04f989ae54b72e.asciidoc b/docs/examples/3fe5e6c0d5ea4586aa04f989ae54b72e.asciidoc index e8e5b0d59..66637dbe9 100644 --- a/docs/examples/3fe5e6c0d5ea4586aa04f989ae54b72e.asciidoc +++ b/docs/examples/3fe5e6c0d5ea4586aa04f989ae54b72e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/verify-repo-api.asciidoc:25 +// snapshot-restore/apis/verify-repo-api.asciidoc:31 [source, python] ---- diff --git a/docs/examples/400e89eb46ead8e9c9e40f123fd5e590.asciidoc b/docs/examples/400e89eb46ead8e9c9e40f123fd5e590.asciidoc index 8e863d72b..d4266e99d 100644 --- a/docs/examples/400e89eb46ead8e9c9e40f123fd5e590.asciidoc +++ b/docs/examples/400e89eb46ead8e9c9e40f123fd5e590.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/reindex.asciidoc:428 +// docs/reindex.asciidoc:434 [source, python] ---- diff --git a/docs/examples/4029af36cb3f8202549017f7378803b4.asciidoc b/docs/examples/4029af36cb3f8202549017f7378803b4.asciidoc index 215136b33..558cc0bd3 100644 --- a/docs/examples/4029af36cb3f8202549017f7378803b4.asciidoc +++ b/docs/examples/4029af36cb3f8202549017f7378803b4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/get-settings.asciidoc:10 +// cluster/get-settings.asciidoc:16 [source, python] ---- diff --git a/docs/examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc b/docs/examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc index 500d3e1c5..376f8c0f4 100644 --- a/docs/examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc +++ b/docs/examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc @@ -1,11 +1,11 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-async-query-get-api.asciidoc:11 +// esql/esql-async-query-get-api.asciidoc:17 [source, python] ---- -resp = client.esql.async_query_get( - id="FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", - body=None, +resp = client.perform_request( + "GET", + "/_query/async/FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", ) print(resp) ---- diff --git a/docs/examples/405ac843a9156d3cab374e199cac87fb.asciidoc b/docs/examples/405ac843a9156d3cab374e199cac87fb.asciidoc index 15e9b0326..efc7cddb3 100644 --- a/docs/examples/405ac843a9156d3cab374e199cac87fb.asciidoc +++ b/docs/examples/405ac843a9156d3cab374e199cac87fb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/create-connector-sync-job-api.asciidoc:16 +// connector/apis/create-connector-sync-job-api.asciidoc:21 [source, python] ---- diff --git a/docs/examples/405db6f3a01eceacfaa8b0ed3e4b3ac2.asciidoc b/docs/examples/405db6f3a01eceacfaa8b0ed3e4b3ac2.asciidoc index 33c666fed..d9a5a7e2b 100644 --- a/docs/examples/405db6f3a01eceacfaa8b0ed3e4b3ac2.asciidoc +++ b/docs/examples/405db6f3a01eceacfaa8b0ed3e4b3ac2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/get-overall-buckets.asciidoc:175 +// ml/anomaly-detection/apis/get-overall-buckets.asciidoc:181 [source, python] ---- diff --git a/docs/examples/4061fd5ba7221ca85805ed14d59a6bc5.asciidoc b/docs/examples/4061fd5ba7221ca85805ed14d59a6bc5.asciidoc index 62d2fd139..0b040a384 100644 --- a/docs/examples/4061fd5ba7221ca85805ed14d59a6bc5.asciidoc +++ b/docs/examples/4061fd5ba7221ca85805ed14d59a6bc5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// scripting/using.asciidoc:266 +// scripting/using.asciidoc:271 [source, python] ---- diff --git a/docs/examples/408060f0c52300588a6dee774f4fd6a5.asciidoc b/docs/examples/408060f0c52300588a6dee774f4fd6a5.asciidoc index 71728aef6..28a8deeb5 100644 --- a/docs/examples/408060f0c52300588a6dee774f4fd6a5.asciidoc +++ b/docs/examples/408060f0c52300588a6dee774f4fd6a5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/downsampling-ilm.asciidoc:260 +// data-streams/downsampling-dsl.asciidoc:218 [source, python] ---- diff --git a/docs/examples/4113c57384aa37c58d11579e20c00760.asciidoc b/docs/examples/4113c57384aa37c58d11579e20c00760.asciidoc index b76e6d369..6da608277 100644 --- a/docs/examples/4113c57384aa37c58d11579e20c00760.asciidoc +++ b/docs/examples/4113c57384aa37c58d11579e20c00760.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/get.asciidoc:59 +// docs/get.asciidoc:65 [source, python] ---- diff --git a/docs/examples/41175d304e660da2931764f9a4418fd3.asciidoc b/docs/examples/41175d304e660da2931764f9a4418fd3.asciidoc index d37e82323..c5847cd73 100644 --- a/docs/examples/41175d304e660da2931764f9a4418fd3.asciidoc +++ b/docs/examples/41175d304e660da2931764f9a4418fd3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-pipeline-api.asciidoc:88 +// connector/apis/update-connector-pipeline-api.asciidoc:94 [source, python] ---- diff --git a/docs/examples/41195ef13af0465cdee1ae18f6c00fde.asciidoc b/docs/examples/41195ef13af0465cdee1ae18f6c00fde.asciidoc index 3febeb06c..3b320bb1e 100644 --- a/docs/examples/41195ef13af0465cdee1ae18f6c00fde.asciidoc +++ b/docs/examples/41195ef13af0465cdee1ae18f6c00fde.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// slm/apis/slm-stop.asciidoc:47 +// slm/apis/slm-stop.asciidoc:52 [source, python] ---- diff --git a/docs/examples/412f8238ab5182678f1d8f6383031b11.asciidoc b/docs/examples/412f8238ab5182678f1d8f6383031b11.asciidoc index c57e81fa5..8450dd9a2 100644 --- a/docs/examples/412f8238ab5182678f1d8f6383031b11.asciidoc +++ b/docs/examples/412f8238ab5182678f1d8f6383031b11.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-alias.asciidoc:10 +// indices/get-alias.asciidoc:16 [source, python] ---- diff --git a/docs/examples/41d24383d29b2808a65258a0a3256e96.asciidoc b/docs/examples/41d24383d29b2808a65258a0a3256e96.asciidoc new file mode 100644 index 000000000..ea815d5f7 --- /dev/null +++ b/docs/examples/41d24383d29b2808a65258a0a3256e96.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// inference/service-jinaai.asciidoc:188 + +[source, python] +---- +resp = client.indices.create( + index="jinaai-index", + mappings={ + "properties": { + "content": { + "type": "semantic_text", + "inference_id": "jinaai-embeddings" + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/41dbd79f624b998d01c10921e9a35c4b.asciidoc b/docs/examples/41dbd79f624b998d01c10921e9a35c4b.asciidoc index 6f7efef11..7a5985b03 100644 --- a/docs/examples/41dbd79f624b998d01c10921e9a35c4b.asciidoc +++ b/docs/examples/41dbd79f624b998d01c10921e9a35c4b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update.asciidoc:290 +// docs/update.asciidoc:296 [source, python] ---- diff --git a/docs/examples/41fd33a293a575bd71a1fac7bcc8b47c.asciidoc b/docs/examples/41fd33a293a575bd71a1fac7bcc8b47c.asciidoc index 234b4b474..b9c9770e5 100644 --- a/docs/examples/41fd33a293a575bd71a1fac7bcc8b47c.asciidoc +++ b/docs/examples/41fd33a293a575bd71a1fac7bcc8b47c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search-application/apis/put-search-application.asciidoc:148 +// search-application/apis/put-search-application.asciidoc:153 [source, python] ---- diff --git a/docs/examples/4207219a892339e8f3abe0df8723dd27.asciidoc b/docs/examples/4207219a892339e8f3abe0df8723dd27.asciidoc index 92314f5f5..f929c54b2 100644 --- a/docs/examples/4207219a892339e8f3abe0df8723dd27.asciidoc +++ b/docs/examples/4207219a892339e8f3abe0df8723dd27.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// modules/cluster/misc.asciidoc:133 +// modules/cluster/misc.asciidoc:136 [source, python] ---- diff --git a/docs/examples/421e68e2b9789f0e8c08760d9e685d1c.asciidoc b/docs/examples/421e68e2b9789f0e8c08760d9e685d1c.asciidoc index 7184d8b40..2c831b393 100644 --- a/docs/examples/421e68e2b9789f0e8c08760d9e685d1c.asciidoc +++ b/docs/examples/421e68e2b9789f0e8c08760d9e685d1c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/update-job.asciidoc:258 +// ml/anomaly-detection/apis/update-job.asciidoc:264 [source, python] ---- diff --git a/docs/examples/424fbf082cd4affb84439abfc916b597.asciidoc b/docs/examples/424fbf082cd4affb84439abfc916b597.asciidoc index c09038abb..96b24b354 100644 --- a/docs/examples/424fbf082cd4affb84439abfc916b597.asciidoc +++ b/docs/examples/424fbf082cd4affb84439abfc916b597.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/downsample-data-stream.asciidoc:59 +// indices/downsample-data-stream.asciidoc:65 [source, python] ---- diff --git a/docs/examples/425eaaf9c7e3b1e77a3474fbab4183b4.asciidoc b/docs/examples/425eaaf9c7e3b1e77a3474fbab4183b4.asciidoc index b6bb4fb2c..7b8038083 100644 --- a/docs/examples/425eaaf9c7e3b1e77a3474fbab4183b4.asciidoc +++ b/docs/examples/425eaaf9c7e3b1e77a3474fbab4183b4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/task-queue-backlog.asciidoc:25 +// troubleshooting/common-issues/task-queue-backlog.asciidoc:36 [source, python] ---- diff --git a/docs/examples/430705509f8367aef92be413f702520b.asciidoc b/docs/examples/430705509f8367aef92be413f702520b.asciidoc index 7e2d3af29..41b09e782 100644 --- a/docs/examples/430705509f8367aef92be413f702520b.asciidoc +++ b/docs/examples/430705509f8367aef92be413f702520b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-status-api.asciidoc:76 +// connector/apis/update-connector-status-api.asciidoc:82 [source, python] ---- diff --git a/docs/examples/436d50b85fc8f0977d02059eec00719b.asciidoc b/docs/examples/436d50b85fc8f0977d02059eec00719b.asciidoc index 041633ca9..bd4578790 100644 --- a/docs/examples/436d50b85fc8f0977d02059eec00719b.asciidoc +++ b/docs/examples/436d50b85fc8f0977d02059eec00719b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update.asciidoc:309 +// docs/update.asciidoc:315 [source, python] ---- diff --git a/docs/examples/43d9e314431336a6f084cea76dfd6489.asciidoc b/docs/examples/43d9e314431336a6f084cea76dfd6489.asciidoc index 14c7b4a3c..251530a9e 100644 --- a/docs/examples/43d9e314431336a6f084cea76dfd6489.asciidoc +++ b/docs/examples/43d9e314431336a6f084cea76dfd6489.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:235 +// search/retriever.asciidoc:247 [source, python] ---- diff --git a/docs/examples/43e86fbaeed068dcc981214338559b5a.asciidoc b/docs/examples/43e86fbaeed068dcc981214338559b5a.asciidoc index 254f835ae..d0a87d736 100644 --- a/docs/examples/43e86fbaeed068dcc981214338559b5a.asciidoc +++ b/docs/examples/43e86fbaeed068dcc981214338559b5a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/resolve-cluster.asciidoc:58 +// indices/resolve-cluster.asciidoc:89 [source, python] ---- diff --git a/docs/examples/43fe75fa9f3fca846598fdad58fd98cb.asciidoc b/docs/examples/43fe75fa9f3fca846598fdad58fd98cb.asciidoc index a5f3ab8d5..134df95bc 100644 --- a/docs/examples/43fe75fa9f3fca846598fdad58fd98cb.asciidoc +++ b/docs/examples/43fe75fa9f3fca846598fdad58fd98cb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/usage.asciidoc:38 +// rest-api/usage.asciidoc:44 [source, python] ---- diff --git a/docs/examples/44198781d164a15be633d4469485a544.asciidoc b/docs/examples/44198781d164a15be633d4469485a544.asciidoc index db8b96e88..b51ba5ba8 100644 --- a/docs/examples/44198781d164a15be633d4469485a544.asciidoc +++ b/docs/examples/44198781d164a15be633d4469485a544.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// vectors/vector-functions.asciidoc:379 +// vectors/vector-functions.asciidoc:383 [source, python] ---- diff --git a/docs/examples/44231f7cdd5c3a21025861cdef31e355.asciidoc b/docs/examples/44231f7cdd5c3a21025861cdef31e355.asciidoc index d4067263d..3b58c3f01 100644 --- a/docs/examples/44231f7cdd5c3a21025861cdef31e355.asciidoc +++ b/docs/examples/44231f7cdd5c3a21025861cdef31e355.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:201 +// troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:206 [source, python] ---- diff --git a/docs/examples/44385b61342e20ea05f254015b2b04d7.asciidoc b/docs/examples/44385b61342e20ea05f254015b2b04d7.asciidoc index e8c93ac34..809e5607b 100644 --- a/docs/examples/44385b61342e20ea05f254015b2b04d7.asciidoc +++ b/docs/examples/44385b61342e20ea05f254015b2b04d7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/bulk-delete-roles.asciidoc:48 +// rest-api/security/bulk-delete-roles.asciidoc:54 [source, python] ---- diff --git a/docs/examples/4479e8c63a04fa22207a6a8803eadcad.asciidoc b/docs/examples/4479e8c63a04fa22207a6a8803eadcad.asciidoc index b5d3ca633..3abec964f 100644 --- a/docs/examples/4479e8c63a04fa22207a6a8803eadcad.asciidoc +++ b/docs/examples/4479e8c63a04fa22207a6a8803eadcad.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// modules/cluster/allocation_awareness.asciidoc:67 +// modules/cluster/allocation_awareness.asciidoc:62 [source, python] ---- diff --git a/docs/examples/4498b9d3b0c77e1b9ef6664ff5963ce2.asciidoc b/docs/examples/4498b9d3b0c77e1b9ef6664ff5963ce2.asciidoc index 8d22ea53a..66d57f24a 100644 --- a/docs/examples/4498b9d3b0c77e1b9ef6664ff5963ce2.asciidoc +++ b/docs/examples/4498b9d3b0c77e1b9ef6664ff5963ce2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// modules/indices/request_cache.asciidoc:59 +// shard-request-cache.asciidoc:61 [source, python] ---- diff --git a/docs/examples/44b8a236d7cfb31c43c6d066ae16d8cd.asciidoc b/docs/examples/44b8a236d7cfb31c43c6d066ae16d8cd.asciidoc index db0e61236..a3bd120f1 100644 --- a/docs/examples/44b8a236d7cfb31c43c6d066ae16d8cd.asciidoc +++ b/docs/examples/44b8a236d7cfb31c43c6d066ae16d8cd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/profile.asciidoc:35 +// search/profile.asciidoc:40 [source, python] ---- diff --git a/docs/examples/44da736ce0e1587c1e7c45eee606ead7.asciidoc b/docs/examples/44da736ce0e1587c1e7c45eee606ead7.asciidoc index f41b42c8d..44157ab00 100644 --- a/docs/examples/44da736ce0e1587c1e7c45eee606ead7.asciidoc +++ b/docs/examples/44da736ce0e1587c1e7c45eee606ead7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update-by-query.asciidoc:403 +// docs/update-by-query.asciidoc:409 [source, python] ---- diff --git a/docs/examples/44db41b8465af951e366da97ade63bc1.asciidoc b/docs/examples/44db41b8465af951e366da97ade63bc1.asciidoc index 08d8e6708..19a3b1d21 100644 --- a/docs/examples/44db41b8465af951e366da97ade63bc1.asciidoc +++ b/docs/examples/44db41b8465af951e366da97ade63bc1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/apis/reload-analyzers.asciidoc:154 +// indices/apis/reload-analyzers.asciidoc:160 [source, python] ---- diff --git a/docs/examples/458b2228aed7464d915a5d73cb6b98f6.asciidoc b/docs/examples/458b2228aed7464d915a5d73cb6b98f6.asciidoc index d8d3104df..654e8e078 100644 --- a/docs/examples/458b2228aed7464d915a5d73cb6b98f6.asciidoc +++ b/docs/examples/458b2228aed7464d915a5d73cb6b98f6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/snapshots.asciidoc:129 +// cat/snapshots.asciidoc:135 [source, python] ---- diff --git a/docs/examples/45954b8aaedfed57012be8b6538b0a24.asciidoc b/docs/examples/45954b8aaedfed57012be8b6538b0a24.asciidoc new file mode 100644 index 000000000..860441617 --- /dev/null +++ b/docs/examples/45954b8aaedfed57012be8b6538b0a24.asciidoc @@ -0,0 +1,48 @@ +// This file is autogenerated, DO NOT EDIT +// inference/chat-completion-inference.asciidoc:352 + +[source, python] +---- +resp = client.perform_request( + "POST", + "/_inference/chat_completion/openai-completion/_stream", + headers={"Content-Type": "application/json"}, + body={ + "messages": [ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "What's the price of a scarf?" + } + ] + } + ], + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_price", + "description": "Get the current price of a item", + "parameters": { + "type": "object", + "properties": { + "item": { + "id": "123" + } + } + } + } + } + ], + "tool_choice": { + "type": "function", + "function": { + "name": "get_current_price" + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/46064e81620162a23e75002a7eeb8b10.asciidoc b/docs/examples/46064e81620162a23e75002a7eeb8b10.asciidoc index 3c332a0b2..b99e20fcf 100644 --- a/docs/examples/46064e81620162a23e75002a7eeb8b10.asciidoc +++ b/docs/examples/46064e81620162a23e75002a7eeb8b10.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ilm/apis/move-to-step.asciidoc:188 +// ilm/apis/move-to-step.asciidoc:194 [source, python] ---- diff --git a/docs/examples/46658f00edc4865dfe472a392374cd0f.asciidoc b/docs/examples/46658f00edc4865dfe472a392374cd0f.asciidoc index 83abbcf70..185af87c5 100644 --- a/docs/examples/46658f00edc4865dfe472a392374cd0f.asciidoc +++ b/docs/examples/46658f00edc4865dfe472a392374cd0f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-index-template-v1.asciidoc:252 +// indices/put-index-template-v1.asciidoc:258 [source, python] ---- diff --git a/docs/examples/468f7ec42cdd8287cdea3ec1cea4a514.asciidoc b/docs/examples/468f7ec42cdd8287cdea3ec1cea4a514.asciidoc index 20d03bb33..25ae2ca45 100644 --- a/docs/examples/468f7ec42cdd8287cdea3ec1cea4a514.asciidoc +++ b/docs/examples/468f7ec42cdd8287cdea3ec1cea4a514.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// scripting/using.asciidoc:333 +// scripting/using.asciidoc:338 [source, python] ---- diff --git a/docs/examples/46b1c1f6e0c86528be84c373eeb8d425.asciidoc b/docs/examples/46b1c1f6e0c86528be84c373eeb8d425.asciidoc index a187319f3..27eda1700 100644 --- a/docs/examples/46b1c1f6e0c86528be84c373eeb8d425.asciidoc +++ b/docs/examples/46b1c1f6e0c86528be84c373eeb8d425.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// licensing/update-license.asciidoc:139 +// licensing/update-license.asciidoc:145 [source, python] ---- diff --git a/docs/examples/480e531db799c4c909afd8e2a73a8d0b.asciidoc b/docs/examples/480e531db799c4c909afd8e2a73a8d0b.asciidoc index 21c1254ff..f2f7b1dd4 100644 --- a/docs/examples/480e531db799c4c909afd8e2a73a8d0b.asciidoc +++ b/docs/examples/480e531db799c4c909afd8e2a73a8d0b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/forcemerge.asciidoc:193 +// indices/forcemerge.asciidoc:199 [source, python] ---- diff --git a/docs/examples/483d669ec0768bc4e275a568c6164704.asciidoc b/docs/examples/483d669ec0768bc4e275a568c6164704.asciidoc index e568ac3f6..ac531f427 100644 --- a/docs/examples/483d669ec0768bc4e275a568c6164704.asciidoc +++ b/docs/examples/483d669ec0768bc4e275a568c6164704.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ccr/apis/follow/post-pause-follow.asciidoc:29 +// ccr/apis/follow/post-pause-follow.asciidoc:35 [source, python] ---- diff --git a/docs/examples/488f6df1df71972392b670ce557f7ff3.asciidoc b/docs/examples/488f6df1df71972392b670ce557f7ff3.asciidoc index f44dfa132..6d60ffbe5 100644 --- a/docs/examples/488f6df1df71972392b670ce557f7ff3.asciidoc +++ b/docs/examples/488f6df1df71972392b670ce557f7ff3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-index-template-v1.asciidoc:234 +// indices/put-index-template-v1.asciidoc:240 [source, python] ---- diff --git a/docs/examples/9f16fca9813304e398ee052aa857dbcd.asciidoc b/docs/examples/48e142e6c69014e0509d4c9251749d77.asciidoc similarity index 72% rename from docs/examples/9f16fca9813304e398ee052aa857dbcd.asciidoc rename to docs/examples/48e142e6c69014e0509d4c9251749d77.asciidoc index ecef289d9..1f8ce6f36 100644 --- a/docs/examples/9f16fca9813304e398ee052aa857dbcd.asciidoc +++ b/docs/examples/48e142e6c69014e0509d4c9251749d77.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-openai.asciidoc:141 +// inference/service-openai.asciidoc:161 [source, python] ---- @@ -10,7 +10,8 @@ resp = client.inference.put( "service": "openai", "service_settings": { "api_key": "", - "model_id": "text-embedding-ada-002" + "model_id": "text-embedding-3-small", + "dimensions": 128 } }, ) diff --git a/docs/examples/4982c547be1ad9455ae836990aea92c5.asciidoc b/docs/examples/4982c547be1ad9455ae836990aea92c5.asciidoc index b7a6d50b1..c907beaf6 100644 --- a/docs/examples/4982c547be1ad9455ae836990aea92c5.asciidoc +++ b/docs/examples/4982c547be1ad9455ae836990aea92c5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/start-trained-model-deployment.asciidoc:222 +// ml/trained-models/apis/start-trained-model-deployment.asciidoc:228 [source, python] ---- diff --git a/docs/examples/49b31e23f8b9667b6a7b2734d55fb6ed.asciidoc b/docs/examples/49b31e23f8b9667b6a7b2734d55fb6ed.asciidoc deleted file mode 100644 index a3fcb5fab..000000000 --- a/docs/examples/49b31e23f8b9667b6a7b2734d55fb6ed.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// search/knn-search.asciidoc:36 - -[source, python] ----- -resp = client.knn_search( - index="my-index", - knn={ - "field": "image_vector", - "query_vector": [ - 0.3, - 0.1, - 1.2 - ], - "k": 10, - "num_candidates": 100 - }, - source=[ - "name", - "file_type" - ], -) -print(resp) ----- diff --git a/docs/examples/49c052a748c943180db78fee8e144239.asciidoc b/docs/examples/49c052a748c943180db78fee8e144239.asciidoc index 35f82f055..d40b88497 100644 --- a/docs/examples/49c052a748c943180db78fee8e144239.asciidoc +++ b/docs/examples/49c052a748c943180db78fee8e144239.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/clear-api-key-cache.asciidoc:50 +// rest-api/security/clear-api-key-cache.asciidoc:56 [source, python] ---- diff --git a/docs/examples/49d87c2eb7314ed34221c5fb4f21dfcc.asciidoc b/docs/examples/49d87c2eb7314ed34221c5fb4f21dfcc.asciidoc index 50a554339..26e15bc31 100644 --- a/docs/examples/49d87c2eb7314ed34221c5fb4f21dfcc.asciidoc +++ b/docs/examples/49d87c2eb7314ed34221c5fb4f21dfcc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/analyze.asciidoc:257 +// indices/analyze.asciidoc:263 [source, python] ---- diff --git a/docs/examples/49e8773a34fcbf825de38426cff5509c.asciidoc b/docs/examples/49e8773a34fcbf825de38426cff5509c.asciidoc index 1723f1af4..4e7fd6c8f 100644 --- a/docs/examples/49e8773a34fcbf825de38426cff5509c.asciidoc +++ b/docs/examples/49e8773a34fcbf825de38426cff5509c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/profile.asciidoc:1270 +// search/profile.asciidoc:1275 [source, python] ---- diff --git a/docs/examples/49f4d2a461536d150e16b1e0a3148678.asciidoc b/docs/examples/49f4d2a461536d150e16b1e0a3148678.asciidoc index c32439da7..4163a05da 100644 --- a/docs/examples/49f4d2a461536d150e16b1e0a3148678.asciidoc +++ b/docs/examples/49f4d2a461536d150e16b1e0a3148678.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/clearcache.asciidoc:110 +// indices/clearcache.asciidoc:116 [source, python] ---- diff --git a/docs/examples/4a1951844bd39f26961bfc965f3432b1.asciidoc b/docs/examples/4a1951844bd39f26961bfc965f3432b1.asciidoc index ed58cd4a6..487751e38 100644 --- a/docs/examples/4a1951844bd39f26961bfc965f3432b1.asciidoc +++ b/docs/examples/4a1951844bd39f26961bfc965f3432b1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/multi-get.asciidoc:138 +// docs/multi-get.asciidoc:144 [source, python] ---- diff --git a/docs/examples/4a4b8a406681584a91c0e614c1fa4344.asciidoc b/docs/examples/4a4b8a406681584a91c0e614c1fa4344.asciidoc index 2b0e13e14..7327152dc 100644 --- a/docs/examples/4a4b8a406681584a91c0e614c1fa4344.asciidoc +++ b/docs/examples/4a4b8a406681584a91c0e614c1fa4344.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/create-api-keys.asciidoc:128 +// rest-api/security/create-api-keys.asciidoc:134 [source, python] ---- diff --git a/docs/examples/4ae494d1e62231e832fc0436b04e2014.asciidoc b/docs/examples/4ae494d1e62231e832fc0436b04e2014.asciidoc index 2aed5eb79..66d40366f 100644 --- a/docs/examples/4ae494d1e62231e832fc0436b04e2014.asciidoc +++ b/docs/examples/4ae494d1e62231e832fc0436b04e2014.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/validate.asciidoc:116 +// search/validate.asciidoc:122 [source, python] ---- diff --git a/docs/examples/4b1044259a6d777d87529eae25675005.asciidoc b/docs/examples/4b1044259a6d777d87529eae25675005.asciidoc index 3d9ff4dca..4422e2335 100644 --- a/docs/examples/4b1044259a6d777d87529eae25675005.asciidoc +++ b/docs/examples/4b1044259a6d777d87529eae25675005.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update-by-query.asciidoc:444 +// docs/update-by-query.asciidoc:450 [source, python] ---- diff --git a/docs/examples/4b5110a21676cc0e26e050a4b4552235.asciidoc b/docs/examples/4b5110a21676cc0e26e050a4b4552235.asciidoc index b631d63a0..7881b4323 100644 --- a/docs/examples/4b5110a21676cc0e26e050a4b4552235.asciidoc +++ b/docs/examples/4b5110a21676cc0e26e050a4b4552235.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// synonyms/apis/get-synonyms-set.asciidoc:75 +// synonyms/apis/get-synonyms-set.asciidoc:81 [source, python] ---- diff --git a/docs/examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc b/docs/examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc index 46d369d7e..3a3a91764 100644 --- a/docs/examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc +++ b/docs/examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc @@ -1,11 +1,12 @@ // This file is autogenerated, DO NOT EDIT -// inference/stream-inference.asciidoc:78 +// inference/stream-inference.asciidoc:88 [source, python] ---- -resp = client.inference.stream_inference( - task_type="completion", - inference_id="openai-completion", +resp = client.perform_request( + "POST", + "/_inference/completion/openai-completion/_stream", + headers={"Content-Type": "application/json"}, body={ "input": "What is Elastic?" }, diff --git a/docs/examples/4bc4db44b8c74610b73f21a421099a13.asciidoc b/docs/examples/4bc4db44b8c74610b73f21a421099a13.asciidoc index 488080f02..1773348bd 100644 --- a/docs/examples/4bc4db44b8c74610b73f21a421099a13.asciidoc +++ b/docs/examples/4bc4db44b8c74610b73f21a421099a13.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/invalidate-tokens.asciidoc:188 +// rest-api/security/invalidate-tokens.asciidoc:194 [source, python] ---- diff --git a/docs/examples/4be07b34db282044c88d5021c7ea08ee.asciidoc b/docs/examples/4be07b34db282044c88d5021c7ea08ee.asciidoc index 5c8c2d90c..5b9e08deb 100644 --- a/docs/examples/4be07b34db282044c88d5021c7ea08ee.asciidoc +++ b/docs/examples/4be07b34db282044c88d5021c7ea08ee.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/dense-vector.asciidoc:18 +// mapping/types/dense-vector.asciidoc:17 [source, python] ---- diff --git a/docs/examples/4bef98a2dac575a50ee0783c2269f1db.asciidoc b/docs/examples/4bef98a2dac575a50ee0783c2269f1db.asciidoc index 3a7b0bcd6..80814dcf0 100644 --- a/docs/examples/4bef98a2dac575a50ee0783c2269f1db.asciidoc +++ b/docs/examples/4bef98a2dac575a50ee0783c2269f1db.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/dense-vector.asciidoc:500 +// mapping/types/dense-vector.asciidoc:498 [source, python] ---- diff --git a/docs/examples/4bfcb2861f1d572bd0d66acd66deab0b.asciidoc b/docs/examples/4bfcb2861f1d572bd0d66acd66deab0b.asciidoc index b83d3fc8a..a476373ec 100644 --- a/docs/examples/4bfcb2861f1d572bd0d66acd66deab0b.asciidoc +++ b/docs/examples/4bfcb2861f1d572bd0d66acd66deab0b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/update-datafeed.asciidoc:160 +// ml/anomaly-detection/apis/update-datafeed.asciidoc:166 [source, python] ---- diff --git a/docs/examples/4c174e228b6b74497b73ef2be80de7ad.asciidoc b/docs/examples/4c174e228b6b74497b73ef2be80de7ad.asciidoc index aec9f0336..3dd959ac0 100644 --- a/docs/examples/4c174e228b6b74497b73ef2be80de7ad.asciidoc +++ b/docs/examples/4c174e228b6b74497b73ef2be80de7ad.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/get-trained-models.asciidoc:1460 +// ml/trained-models/apis/get-trained-models.asciidoc:1467 [source, python] ---- diff --git a/docs/examples/4c3db8987d7b2d3d3df78ff1e71e7ede.asciidoc b/docs/examples/4c3db8987d7b2d3d3df78ff1e71e7ede.asciidoc index a8ec61ba0..4fb637fd2 100644 --- a/docs/examples/4c3db8987d7b2d3d3df78ff1e71e7ede.asciidoc +++ b/docs/examples/4c3db8987d7b2d3d3df78ff1e71e7ede.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/match-query.asciidoc:18 +// query-dsl/match-query.asciidoc:22 [source, python] ---- diff --git a/docs/examples/4c5f0d7af287618062bb627b44ccb23e.asciidoc b/docs/examples/4c5f0d7af287618062bb627b44ccb23e.asciidoc index 4d151a625..b54dcffd9 100644 --- a/docs/examples/4c5f0d7af287618062bb627b44ccb23e.asciidoc +++ b/docs/examples/4c5f0d7af287618062bb627b44ccb23e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:192 +// troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:197 [source, python] ---- diff --git a/docs/examples/4c712bd5637892a11f16b8975a0a98ed.asciidoc b/docs/examples/4c712bd5637892a11f16b8975a0a98ed.asciidoc index 33328d0f1..d67e3f986 100644 --- a/docs/examples/4c712bd5637892a11f16b8975a0a98ed.asciidoc +++ b/docs/examples/4c712bd5637892a11f16b8975a0a98ed.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/dataframeanalytics.asciidoc:131 +// cat/dataframeanalytics.asciidoc:137 [source, python] ---- diff --git a/docs/examples/4c9350ed09b28f00e297ebe73c3b95a2.asciidoc b/docs/examples/4c9350ed09b28f00e297ebe73c3b95a2.asciidoc index df7d893d3..85dd4b15b 100644 --- a/docs/examples/4c9350ed09b28f00e297ebe73c3b95a2.asciidoc +++ b/docs/examples/4c9350ed09b28f00e297ebe73c3b95a2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-elasticsearch.asciidoc:195 +// inference/service-elasticsearch.asciidoc:236 [source, python] ---- diff --git a/docs/examples/4ca15672fc5ab1d80a127d086b6d2837.asciidoc b/docs/examples/4ca15672fc5ab1d80a127d086b6d2837.asciidoc index 77d2d7733..b0a3784fb 100644 --- a/docs/examples/4ca15672fc5ab1d80a127d086b6d2837.asciidoc +++ b/docs/examples/4ca15672fc5ab1d80a127d086b6d2837.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/allocation-explain.asciidoc:447 +// cluster/allocation-explain.asciidoc:457 [source, python] ---- diff --git a/docs/examples/4cb44556b8c699f43489b17b42ddd475.asciidoc b/docs/examples/4cb44556b8c699f43489b17b42ddd475.asciidoc index 7e66cc7c7..533486e82 100644 --- a/docs/examples/4cb44556b8c699f43489b17b42ddd475.asciidoc +++ b/docs/examples/4cb44556b8c699f43489b17b42ddd475.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/multi-get.asciidoc:216 +// docs/multi-get.asciidoc:222 [source, python] ---- diff --git a/docs/examples/4cdbd53f08df4bf66e2a47c0f1fcb3f8.asciidoc b/docs/examples/4cdbd53f08df4bf66e2a47c0f1fcb3f8.asciidoc index fb2f2e2c8..5f643884b 100644 --- a/docs/examples/4cdbd53f08df4bf66e2a47c0f1fcb3f8.asciidoc +++ b/docs/examples/4cdbd53f08df4bf66e2a47c0f1fcb3f8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/clearcache.asciidoc:130 +// indices/clearcache.asciidoc:136 [source, python] ---- diff --git a/docs/examples/4cdcc3fde5cea165a3a7567962b9bd61.asciidoc b/docs/examples/4cdcc3fde5cea165a3a7567962b9bd61.asciidoc index a74c2c8cb..fcc5123ad 100644 --- a/docs/examples/4cdcc3fde5cea165a3a7567962b9bd61.asciidoc +++ b/docs/examples/4cdcc3fde5cea165a3a7567962b9bd61.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// synonyms/apis/put-synonyms-set.asciidoc:125 +// synonyms/apis/put-synonyms-set.asciidoc:131 [source, python] ---- diff --git a/docs/examples/4d21725453955582ff12b4a1104aa7b6.asciidoc b/docs/examples/4d21725453955582ff12b4a1104aa7b6.asciidoc index 4f877c366..dafdda283 100644 --- a/docs/examples/4d21725453955582ff12b4a1104aa7b6.asciidoc +++ b/docs/examples/4d21725453955582ff12b4a1104aa7b6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/update-filter.asciidoc:44 +// ml/anomaly-detection/apis/update-filter.asciidoc:50 [source, python] ---- diff --git a/docs/examples/4d2e6eb7fea407deeb7a859c267fda62.asciidoc b/docs/examples/4d2e6eb7fea407deeb7a859c267fda62.asciidoc index cc1d2081a..7defd89a7 100644 --- a/docs/examples/4d2e6eb7fea407deeb7a859c267fda62.asciidoc +++ b/docs/examples/4d2e6eb7fea407deeb7a859c267fda62.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/put-job.asciidoc:254 +// rollup/apis/put-job.asciidoc:260 [source, python] ---- diff --git a/docs/examples/4d7c0b52d3c0a084157428624c543c90.asciidoc b/docs/examples/4d7c0b52d3c0a084157428624c543c90.asciidoc index f949a6474..df65fde44 100644 --- a/docs/examples/4d7c0b52d3c0a084157428624c543c90.asciidoc +++ b/docs/examples/4d7c0b52d3c0a084157428624c543c90.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/common/apis/get-ml-info.asciidoc:38 +// ml/common/apis/get-ml-info.asciidoc:44 [source, python] ---- diff --git a/docs/examples/4da0cb8693e9ceceee2ba3b558014bbf.asciidoc b/docs/examples/4da0cb8693e9ceceee2ba3b558014bbf.asciidoc index 7ea6ba0c0..1e3760d1b 100644 --- a/docs/examples/4da0cb8693e9ceceee2ba3b558014bbf.asciidoc +++ b/docs/examples/4da0cb8693e9ceceee2ba3b558014bbf.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/docs/connectors-sharepoint-online.asciidoc:934 +// connector/docs/connectors-sharepoint-online.asciidoc:1088 [source, python] ---- diff --git a/docs/examples/4dab4c5168047ba596af1beb0e55b845.asciidoc b/docs/examples/4dab4c5168047ba596af1beb0e55b845.asciidoc deleted file mode 100644 index 314cff18a..000000000 --- a/docs/examples/4dab4c5168047ba596af1beb0e55b845.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// migration/migrate_9_0/transient-settings-migration-guide.asciidoc:82 - -[source, python] ----- -resp = client.cluster.get_settings( - flat_settings=True, -) -print(resp) ----- diff --git a/docs/examples/4e3414fc712b16311f9e433dd366f49d.asciidoc b/docs/examples/4e3414fc712b16311f9e433dd366f49d.asciidoc index f39150a79..0335ed397 100644 --- a/docs/examples/4e3414fc712b16311f9e433dd366f49d.asciidoc +++ b/docs/examples/4e3414fc712b16311f9e433dd366f49d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/delete-inference.asciidoc:64 +// inference/delete-inference.asciidoc:70 [source, python] ---- diff --git a/docs/examples/4e931cfac74e46e221cf4a9ab88a182d.asciidoc b/docs/examples/4e931cfac74e46e221cf4a9ab88a182d.asciidoc index 6970d0a99..ae1dda74c 100644 --- a/docs/examples/4e931cfac74e46e221cf4a9ab88a182d.asciidoc +++ b/docs/examples/4e931cfac74e46e221cf4a9ab88a182d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/field-caps.asciidoc:246 +// search/field-caps.asciidoc:251 [source, python] ---- diff --git a/docs/examples/4ed946065faa92f9950f04e402676a97.asciidoc b/docs/examples/4ed946065faa92f9950f04e402676a97.asciidoc index 60d9e2c87..72f00c320 100644 --- a/docs/examples/4ed946065faa92f9950f04e402676a97.asciidoc +++ b/docs/examples/4ed946065faa92f9950f04e402676a97.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/info.asciidoc:204 +// rest-api/info.asciidoc:206 [source, python] ---- diff --git a/docs/examples/191074b2eebd5f74e628c2ada4b6d2e4.asciidoc b/docs/examples/4edfb5934d14ad7655bd7e19a112b5c0.asciidoc similarity index 92% rename from docs/examples/191074b2eebd5f74e628c2ada4b6d2e4.asciidoc rename to docs/examples/4edfb5934d14ad7655bd7e19a112b5c0.asciidoc index 157634563..562471a1c 100644 --- a/docs/examples/191074b2eebd5f74e628c2ada4b6d2e4.asciidoc +++ b/docs/examples/4edfb5934d14ad7655bd7e19a112b5c0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// quickstart/full-text-filtering-tutorial.asciidoc:521 +// quickstart/full-text-filtering-tutorial.asciidoc:522 [source, python] ---- @@ -8,11 +8,6 @@ resp = client.search( query={ "bool": { "must": [ - { - "term": { - "category.keyword": "Main Course" - } - }, { "term": { "tags": "vegetarian" @@ -27,6 +22,11 @@ resp = client.search( } ], "should": [ + { + "term": { + "category": "Main Course" + } + }, { "multi_match": { "query": "curry spicy", diff --git a/docs/examples/4ee31fd4ea6d18f32ec28b7fa433441d.asciidoc b/docs/examples/4ee31fd4ea6d18f32ec28b7fa433441d.asciidoc index 3a608567f..5211b63ec 100644 --- a/docs/examples/4ee31fd4ea6d18f32ec28b7fa433441d.asciidoc +++ b/docs/examples/4ee31fd4ea6d18f32ec28b7fa433441d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/put-app-privileges.asciidoc:88 +// rest-api/security/put-app-privileges.asciidoc:94 [source, python] ---- diff --git a/docs/examples/4f08d9e21d9f199acc77abfb83287878.asciidoc b/docs/examples/4f08d9e21d9f199acc77abfb83287878.asciidoc index 97db55509..c13a9cfa3 100644 --- a/docs/examples/4f08d9e21d9f199acc77abfb83287878.asciidoc +++ b/docs/examples/4f08d9e21d9f199acc77abfb83287878.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search-application/apis/search-application-search.asciidoc:125 +// search-application/apis/search-application-search.asciidoc:130 [source, python] ---- diff --git a/docs/examples/4f140d8922efdf3420e41b1cb669a289.asciidoc b/docs/examples/4f140d8922efdf3420e41b1cb669a289.asciidoc index 1f5787c1c..9febe3c0c 100644 --- a/docs/examples/4f140d8922efdf3420e41b1cb669a289.asciidoc +++ b/docs/examples/4f140d8922efdf3420e41b1cb669a289.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/delete-component-template.asciidoc:25 +// indices/delete-component-template.asciidoc:31 [source, python] ---- diff --git a/docs/examples/4f666d710758578e2582850dac3ad144.asciidoc b/docs/examples/4f666d710758578e2582850dac3ad144.asciidoc index 60e20c858..0474f05fc 100644 --- a/docs/examples/4f666d710758578e2582850dac3ad144.asciidoc +++ b/docs/examples/4f666d710758578e2582850dac3ad144.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/update-user-profile-data.asciidoc:135 +// rest-api/security/update-user-profile-data.asciidoc:141 [source, python] ---- diff --git a/docs/examples/4f6694ef147a73b1163bde3c13779d26.asciidoc b/docs/examples/4f6694ef147a73b1163bde3c13779d26.asciidoc index 0167e378e..249c1bddd 100644 --- a/docs/examples/4f6694ef147a73b1163bde3c13779d26.asciidoc +++ b/docs/examples/4f6694ef147a73b1163bde3c13779d26.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/rejected-requests.asciidoc:58 +// troubleshooting/common-issues/rejected-requests.asciidoc:68 [source, python] ---- diff --git a/docs/examples/4f8a4ad49e2bca6784c88ede18a1a709.asciidoc b/docs/examples/4f8a4ad49e2bca6784c88ede18a1a709.asciidoc index c7576d78e..3cabc3382 100644 --- a/docs/examples/4f8a4ad49e2bca6784c88ede18a1a709.asciidoc +++ b/docs/examples/4f8a4ad49e2bca6784c88ede18a1a709.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// licensing/delete-license.asciidoc:37 +// licensing/delete-license.asciidoc:43 [source, python] ---- diff --git a/docs/examples/4fa9ee04188cbf0b38cfc28f6a56527d.asciidoc b/docs/examples/4fa9ee04188cbf0b38cfc28f6a56527d.asciidoc index ac9f9eb67..aab1b8537 100644 --- a/docs/examples/4fa9ee04188cbf0b38cfc28f6a56527d.asciidoc +++ b/docs/examples/4fa9ee04188cbf0b38cfc28f6a56527d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/get-datafeed.asciidoc:74 +// ml/anomaly-detection/apis/get-datafeed.asciidoc:80 [source, python] ---- diff --git a/docs/examples/4fb0629146ca78b85e823edd405497bb.asciidoc b/docs/examples/4fb0629146ca78b85e823edd405497bb.asciidoc index ce649ee73..2c8596f9b 100644 --- a/docs/examples/4fb0629146ca78b85e823edd405497bb.asciidoc +++ b/docs/examples/4fb0629146ca78b85e823edd405497bb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/df-analytics/apis/put-dfanalytics.asciidoc:908 +// ml/df-analytics/apis/put-dfanalytics.asciidoc:914 [source, python] ---- diff --git a/docs/examples/5093bfd281dbe41bd0dba8ff979e6e47.asciidoc b/docs/examples/5093bfd281dbe41bd0dba8ff979e6e47.asciidoc index 1dfd1c859..a25580013 100644 --- a/docs/examples/5093bfd281dbe41bd0dba8ff979e6e47.asciidoc +++ b/docs/examples/5093bfd281dbe41bd0dba8ff979e6e47.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// scripting/apis/get-stored-script-api.asciidoc:24 +// scripting/apis/get-stored-script-api.asciidoc:30 [source, python] ---- diff --git a/docs/examples/50a9623c153cabe64101efb633e10e6c.asciidoc b/docs/examples/50a9623c153cabe64101efb633e10e6c.asciidoc index b84d11c31..08678e3bd 100644 --- a/docs/examples/50a9623c153cabe64101efb633e10e6c.asciidoc +++ b/docs/examples/50a9623c153cabe64101efb633e10e6c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// autoscaling/apis/delete-autoscaling-policy.asciidoc:31 +// autoscaling/apis/delete-autoscaling-policy.asciidoc:37 [source, python] ---- diff --git a/docs/examples/50b5c0332949d2154c72b629b5fa6222.asciidoc b/docs/examples/50b5c0332949d2154c72b629b5fa6222.asciidoc index 0922defcd..b096a417a 100644 --- a/docs/examples/50b5c0332949d2154c72b629b5fa6222.asciidoc +++ b/docs/examples/50b5c0332949d2154c72b629b5fa6222.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:339 +// indices/put-mapping.asciidoc:345 [source, python] ---- diff --git a/docs/examples/50d9c0508ddb0fc5ba5a893eec219dd8.asciidoc b/docs/examples/50d9c0508ddb0fc5ba5a893eec219dd8.asciidoc index 98b732223..a078c4d75 100644 --- a/docs/examples/50d9c0508ddb0fc5ba5a893eec219dd8.asciidoc +++ b/docs/examples/50d9c0508ddb0fc5ba5a893eec219dd8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/fields/synthetic-source.asciidoc:136 +// mapping/fields/synthetic-source.asciidoc:129 [source, python] ---- diff --git a/docs/examples/50dc35d3d8705bd62aed20a15209476c.asciidoc b/docs/examples/50dc35d3d8705bd62aed20a15209476c.asciidoc index a51c7d773..8919d476a 100644 --- a/docs/examples/50dc35d3d8705bd62aed20a15209476c.asciidoc +++ b/docs/examples/50dc35d3d8705bd62aed20a15209476c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/create-role-mappings.asciidoc:358 +// rest-api/security/create-role-mappings.asciidoc:364 [source, python] ---- diff --git a/docs/examples/511e5bb8ab881171b7e8629095e30b85.asciidoc b/docs/examples/511e5bb8ab881171b7e8629095e30b85.asciidoc index f99c9de99..0b4ccfb09 100644 --- a/docs/examples/511e5bb8ab881171b7e8629095e30b85.asciidoc +++ b/docs/examples/511e5bb8ab881171b7e8629095e30b85.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/downsampling-ilm.asciidoc:417 +// data-streams/downsampling-dsl.asciidoc:400 [source, python] ---- diff --git a/docs/examples/5174c3c731fc1703e5b43ae2bae7a80e.asciidoc b/docs/examples/5174c3c731fc1703e5b43ae2bae7a80e.asciidoc index 4488c4343..cef5594ec 100644 --- a/docs/examples/5174c3c731fc1703e5b43ae2bae7a80e.asciidoc +++ b/docs/examples/5174c3c731fc1703e5b43ae2bae7a80e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// sql/apis/clear-sql-cursor-api.asciidoc:23 +// sql/apis/clear-sql-cursor-api.asciidoc:29 [source, python] ---- diff --git a/docs/examples/5195a88194f7a139c635a84398d76205.asciidoc b/docs/examples/5195a88194f7a139c635a84398d76205.asciidoc index 812b30e06..93151dd86 100644 --- a/docs/examples/5195a88194f7a139c635a84398d76205.asciidoc +++ b/docs/examples/5195a88194f7a139c635a84398d76205.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/restore-snapshot-api.asciidoc:54 +// snapshot-restore/apis/restore-snapshot-api.asciidoc:60 [source, python] ---- diff --git a/docs/examples/519e46350316a33162740e5d7968aa2c.asciidoc b/docs/examples/519e46350316a33162740e5d7968aa2c.asciidoc new file mode 100644 index 000000000..a0daa1e65 --- /dev/null +++ b/docs/examples/519e46350316a33162740e5d7968aa2c.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/knn-search.asciidoc:1103 + +[source, python] +---- +resp = client.search( + index="image-index", + knn={ + "field": "image-vector", + "query_vector": [ + -5, + 9, + -12 + ], + "k": 10, + "num_candidates": 100, + "rescore_vector": { + "oversample": 2 + } + }, + fields=[ + "title", + "file-type" + ], +) +print(resp) +---- diff --git a/docs/examples/51b44224feee6e2e5974824334474c77.asciidoc b/docs/examples/51b44224feee6e2e5974824334474c77.asciidoc index 9aada93d3..93e483a64 100644 --- a/docs/examples/51b44224feee6e2e5974824334474c77.asciidoc +++ b/docs/examples/51b44224feee6e2e5974824334474c77.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/repository-s3.asciidoc:364 +// snapshot-restore/repository-s3.asciidoc:371 [source, python] ---- diff --git a/docs/examples/51f1a0930362594b231a5bcc17673768.asciidoc b/docs/examples/51f1a0930362594b231a5bcc17673768.asciidoc index 0950c1dee..4c986a6ed 100644 --- a/docs/examples/51f1a0930362594b231a5bcc17673768.asciidoc +++ b/docs/examples/51f1a0930362594b231a5bcc17673768.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/modify-data-streams-api.asciidoc:11 +// data-streams/modify-data-streams-api.asciidoc:17 [source, python] ---- diff --git a/docs/examples/5275842787967b6db876025f4a1c6942.asciidoc b/docs/examples/5275842787967b6db876025f4a1c6942.asciidoc index 679b69a09..dd4e9ae6e 100644 --- a/docs/examples/5275842787967b6db876025f4a1c6942.asciidoc +++ b/docs/examples/5275842787967b6db876025f4a1c6942.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/suggesters.asciidoc:122 +// search/suggesters.asciidoc:128 [source, python] ---- diff --git a/docs/examples/52a2d119addb15366a935115518335fd.asciidoc b/docs/examples/52a2d119addb15366a935115518335fd.asciidoc index 2ee9ea4c7..221acc952 100644 --- a/docs/examples/52a2d119addb15366a935115518335fd.asciidoc +++ b/docs/examples/52a2d119addb15366a935115518335fd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/shrink-index.asciidoc:46 +// indices/shrink-index.asciidoc:52 [source, python] ---- diff --git a/docs/examples/52b71aa4ae6563abae78cd20ff06d1e9.asciidoc b/docs/examples/52b71aa4ae6563abae78cd20ff06d1e9.asciidoc index 2c3ecbaf6..eda2a03b6 100644 --- a/docs/examples/52b71aa4ae6563abae78cd20ff06d1e9.asciidoc +++ b/docs/examples/52b71aa4ae6563abae78cd20ff06d1e9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/hotspotting.asciidoc:144 +// troubleshooting/common-issues/hotspotting.asciidoc:148 [source, python] ---- diff --git a/docs/examples/52c2b4c180388f5ae044588ba70b70f0.asciidoc b/docs/examples/52c2b4c180388f5ae044588ba70b70f0.asciidoc index 0a7997675..1a2dbaaae 100644 --- a/docs/examples/52c2b4c180388f5ae044588ba70b70f0.asciidoc +++ b/docs/examples/52c2b4c180388f5ae044588ba70b70f0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/knn-query.asciidoc:172 +// query-dsl/knn-query.asciidoc:178 [source, python] ---- diff --git a/docs/examples/52f4c5eb08d39f98e2e2f5527ece9731.asciidoc b/docs/examples/52f4c5eb08d39f98e2e2f5527ece9731.asciidoc index 282fd0579..89fa235ad 100644 --- a/docs/examples/52f4c5eb08d39f98e2e2f5527ece9731.asciidoc +++ b/docs/examples/52f4c5eb08d39f98e2e2f5527ece9731.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-alibabacloud-ai-search.asciidoc:204 +// inference/service-alibabacloud-ai-search.asciidoc:210 [source, python] ---- diff --git a/docs/examples/5305bc07c1bf90bab3e8db1de3e31b26.asciidoc b/docs/examples/5305bc07c1bf90bab3e8db1de3e31b26.asciidoc index 4458a453d..b8649ab3c 100644 --- a/docs/examples/5305bc07c1bf90bab3e8db1de3e31b26.asciidoc +++ b/docs/examples/5305bc07c1bf90bab3e8db1de3e31b26.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// shutdown/apis/shutdown-put.asciidoc:96 +// shutdown/apis/shutdown-put.asciidoc:102 [source, python] ---- diff --git a/docs/examples/532f371934b61fb4992d37bedcc085de.asciidoc b/docs/examples/532f371934b61fb4992d37bedcc085de.asciidoc index dab4d1e81..d558dc5d6 100644 --- a/docs/examples/532f371934b61fb4992d37bedcc085de.asciidoc +++ b/docs/examples/532f371934b61fb4992d37bedcc085de.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// shutdown/apis/shutdown-get.asciidoc:49 +// shutdown/apis/shutdown-get.asciidoc:55 [source, python] ---- diff --git a/docs/examples/53c6256295111524d5ff2885bdcb99a9.asciidoc b/docs/examples/53c6256295111524d5ff2885bdcb99a9.asciidoc index f59be3554..0777d6f7c 100644 --- a/docs/examples/53c6256295111524d5ff2885bdcb99a9.asciidoc +++ b/docs/examples/53c6256295111524d5ff2885bdcb99a9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// transform/apis/get-transform-stats.asciidoc:322 +// transform/apis/get-transform-stats.asciidoc:328 [source, python] ---- diff --git a/docs/examples/53d9d2ec9cb8d211772d764e76fe6890.asciidoc b/docs/examples/53d9d2ec9cb8d211772d764e76fe6890.asciidoc new file mode 100644 index 000000000..40f6deb33 --- /dev/null +++ b/docs/examples/53d9d2ec9cb8d211772d764e76fe6890.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// ingest/processors/inference.asciidoc:784 + +[source, python] +---- +resp = client.ingest.simulate( + id="query_helper_pipeline", + docs=[ + { + "_source": { + "content": "artificial intelligence in medicine articles published in the last 12 months" + } + } + ], +) +print(resp) +---- diff --git a/docs/examples/5457c94f0039c6b95c7f9f305d0c6b58.asciidoc b/docs/examples/5457c94f0039c6b95c7f9f305d0c6b58.asciidoc index 81c231f99..f538d29ea 100644 --- a/docs/examples/5457c94f0039c6b95c7f9f305d0c6b58.asciidoc +++ b/docs/examples/5457c94f0039c6b95c7f9f305d0c6b58.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/nodes-stats.asciidoc:2532 +// cluster/nodes-stats.asciidoc:2538 [source, python] ---- diff --git a/docs/examples/55096381f811388fafd8e244dd2402c8.asciidoc b/docs/examples/55096381f811388fafd8e244dd2402c8.asciidoc index e460dfbb5..9c38bd334 100644 --- a/docs/examples/55096381f811388fafd8e244dd2402c8.asciidoc +++ b/docs/examples/55096381f811388fafd8e244dd2402c8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/rollover-index.asciidoc:445 +// indices/rollover-index.asciidoc:451 [source, python] ---- diff --git a/docs/examples/558b3f9b987771e9f9f35e51a0d7e062.asciidoc b/docs/examples/558b3f9b987771e9f9f35e51a0d7e062.asciidoc index 053704c71..a5d7653a2 100644 --- a/docs/examples/558b3f9b987771e9f9f35e51a0d7e062.asciidoc +++ b/docs/examples/558b3f9b987771e9f9f35e51a0d7e062.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/profile.asciidoc:1155 +// search/profile.asciidoc:1160 [source, python] ---- diff --git a/docs/examples/55e8ddf643726dec51531ada0bec7143.asciidoc b/docs/examples/55e8ddf643726dec51531ada0bec7143.asciidoc index e1f3f9765..0ef93680e 100644 --- a/docs/examples/55e8ddf643726dec51531ada0bec7143.asciidoc +++ b/docs/examples/55e8ddf643726dec51531ada0bec7143.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// slm/apis/slm-stats.asciidoc:26 +// slm/apis/slm-stats.asciidoc:32 [source, python] ---- diff --git a/docs/examples/55f4a15b84b724b9fbf2efd29a4da120.asciidoc b/docs/examples/55f4a15b84b724b9fbf2efd29a4da120.asciidoc index e47fdd6ae..5ab51f4c9 100644 --- a/docs/examples/55f4a15b84b724b9fbf2efd29a4da120.asciidoc +++ b/docs/examples/55f4a15b84b724b9fbf2efd29a4da120.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/authenticate.asciidoc:35 +// rest-api/security/authenticate.asciidoc:41 [source, python] ---- diff --git a/docs/examples/5619103306878d58a058bce87c5bd82b.asciidoc b/docs/examples/5619103306878d58a058bce87c5bd82b.asciidoc index d73c08bc4..fac3847b5 100644 --- a/docs/examples/5619103306878d58a058bce87c5bd82b.asciidoc +++ b/docs/examples/5619103306878d58a058bce87c5bd82b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/recovery.asciidoc:336 +// indices/recovery.asciidoc:342 [source, python] ---- diff --git a/docs/examples/563dfbf421422c837ee6929ae2ede876.asciidoc b/docs/examples/563dfbf421422c837ee6929ae2ede876.asciidoc index a15e07d5c..fe461e602 100644 --- a/docs/examples/563dfbf421422c837ee6929ae2ede876.asciidoc +++ b/docs/examples/563dfbf421422c837ee6929ae2ede876.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/migrate-to-data-stream.asciidoc:53 +// indices/migrate-to-data-stream.asciidoc:59 [source, python] ---- diff --git a/docs/examples/565386eee0951865a684e41fab53b40c.asciidoc b/docs/examples/565386eee0951865a684e41fab53b40c.asciidoc index 449bbfbe6..4ee080da8 100644 --- a/docs/examples/565386eee0951865a684e41fab53b40c.asciidoc +++ b/docs/examples/565386eee0951865a684e41fab53b40c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-elser.asciidoc:162 +// inference/service-elser.asciidoc:128 [source, python] ---- diff --git a/docs/examples/56563f91d9f0b74e9e4aae9cb221845b.asciidoc b/docs/examples/56563f91d9f0b74e9e4aae9cb221845b.asciidoc index 67cb1a01c..519d06ace 100644 --- a/docs/examples/56563f91d9f0b74e9e4aae9cb221845b.asciidoc +++ b/docs/examples/56563f91d9f0b74e9e4aae9cb221845b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/create-cross-cluster-api-key.asciidoc:106 +// rest-api/security/create-cross-cluster-api-key.asciidoc:111 [source, python] ---- diff --git a/docs/examples/568979150ce18739f8d3ea859355aaa3.asciidoc b/docs/examples/568979150ce18739f8d3ea859355aaa3.asciidoc index 9accb5678..534e62d6e 100644 --- a/docs/examples/568979150ce18739f8d3ea859355aaa3.asciidoc +++ b/docs/examples/568979150ce18739f8d3ea859355aaa3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-users.asciidoc:87 +// rest-api/security/get-users.asciidoc:92 [source, python] ---- diff --git a/docs/examples/56a1aa4f7fa62f2289e20607e3039bf3.asciidoc b/docs/examples/56a1aa4f7fa62f2289e20607e3039bf3.asciidoc index c43a52a11..3d50d720a 100644 --- a/docs/examples/56a1aa4f7fa62f2289e20607e3039bf3.asciidoc +++ b/docs/examples/56a1aa4f7fa62f2289e20607e3039bf3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:13 +// indices/put-mapping.asciidoc:19 [source, python] ---- diff --git a/docs/examples/56b6b50b174a935d368301ebd717231d.asciidoc b/docs/examples/56b6b50b174a935d368301ebd717231d.asciidoc index d4873c9da..b7f802b9d 100644 --- a/docs/examples/56b6b50b174a935d368301ebd717231d.asciidoc +++ b/docs/examples/56b6b50b174a935d368301ebd717231d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/stats.asciidoc:119 +// rest-api/watcher/stats.asciidoc:125 [source, python] ---- diff --git a/docs/examples/56db76c987106a870357854d3068ad98.asciidoc b/docs/examples/56db76c987106a870357854d3068ad98.asciidoc index ede6b5e5c..fc05e3283 100644 --- a/docs/examples/56db76c987106a870357854d3068ad98.asciidoc +++ b/docs/examples/56db76c987106a870357854d3068ad98.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-rules/apis/list-query-rulesets.asciidoc:158 +// query-rules/apis/list-query-rulesets.asciidoc:164 [source, python] ---- diff --git a/docs/examples/578808065fee8691355b8f25c35782cd.asciidoc b/docs/examples/578808065fee8691355b8f25c35782cd.asciidoc index 8bd882564..4ec94c9f5 100644 --- a/docs/examples/578808065fee8691355b8f25c35782cd.asciidoc +++ b/docs/examples/578808065fee8691355b8f25c35782cd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/profile.asciidoc:1018 +// search/profile.asciidoc:1023 [source, python] ---- diff --git a/docs/examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc b/docs/examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc index a18b9a0a8..871171fb6 100644 --- a/docs/examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc +++ b/docs/examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc @@ -1,9 +1,12 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/oidc-prepare-authentication-api.asciidoc:100 +// rest-api/security/oidc-prepare-authentication-api.asciidoc:106 [source, python] ---- -resp = client.security.oidc_prepare_authentication( +resp = client.perform_request( + "POST", + "/_security/oidc/prepare", + headers={"Content-Type": "application/json"}, body={ "realm": "oidc1", "state": "lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO", diff --git a/docs/examples/57e0bbab98f17d5b564d1ea146a55fe4.asciidoc b/docs/examples/57e0bbab98f17d5b564d1ea146a55fe4.asciidoc index 398c41932..19c524c34 100644 --- a/docs/examples/57e0bbab98f17d5b564d1ea146a55fe4.asciidoc +++ b/docs/examples/57e0bbab98f17d5b564d1ea146a55fe4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-index-template.asciidoc:221 +// indices/put-index-template.asciidoc:227 [source, python] ---- diff --git a/docs/examples/582c4b05401dbc190b19411282d85310.asciidoc b/docs/examples/582c4b05401dbc190b19411282d85310.asciidoc index ed9d3ea23..299d52aec 100644 --- a/docs/examples/582c4b05401dbc190b19411282d85310.asciidoc +++ b/docs/examples/582c4b05401dbc190b19411282d85310.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// scripting/using.asciidoc:375 +// scripting/using.asciidoc:380 [source, python] ---- diff --git a/docs/examples/5836b09198feb1269ed12839b416123d.asciidoc b/docs/examples/5836b09198feb1269ed12839b416123d.asciidoc new file mode 100644 index 000000000..5f11e91e2 --- /dev/null +++ b/docs/examples/5836b09198feb1269ed12839b416123d.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// inference/service-jinaai.asciidoc:218 + +[source, python] +---- +resp = client.search( + index="jinaai-index", + query={ + "semantic": { + "field": "content", + "query": "who inspired taking care of the sea?" + } + }, +) +print(resp) +---- diff --git a/docs/examples/5837d5f50665ac0a26181d3aaeb3f204.asciidoc b/docs/examples/5837d5f50665ac0a26181d3aaeb3f204.asciidoc index 71163305a..c5c20efb5 100644 --- a/docs/examples/5837d5f50665ac0a26181d3aaeb3f204.asciidoc +++ b/docs/examples/5837d5f50665ac0a26181d3aaeb3f204.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/start-trained-model-deployment.asciidoc:208 +// ml/trained-models/apis/start-trained-model-deployment.asciidoc:214 [source, python] ---- diff --git a/docs/examples/585a34ad79aee16678b37da785933ac8.asciidoc b/docs/examples/585a34ad79aee16678b37da785933ac8.asciidoc index 4ae188bd7..069d5ef6e 100644 --- a/docs/examples/585a34ad79aee16678b37da785933ac8.asciidoc +++ b/docs/examples/585a34ad79aee16678b37da785933ac8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ilm/apis/stop.asciidoc:79 +// ilm/apis/stop.asciidoc:85 [source, python] ---- diff --git a/docs/examples/58ca855be30049f8f0879e532db51ee2.asciidoc b/docs/examples/58ca855be30049f8f0879e532db51ee2.asciidoc index d043f45f6..b6c5fea9f 100644 --- a/docs/examples/58ca855be30049f8f0879e532db51ee2.asciidoc +++ b/docs/examples/58ca855be30049f8f0879e532db51ee2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// transform/apis/put-transform.asciidoc:314 +// transform/apis/put-transform.asciidoc:320 [source, python] ---- diff --git a/docs/examples/58f72be60c25752d7899a35fc60fe6eb.asciidoc b/docs/examples/58f72be60c25752d7899a35fc60fe6eb.asciidoc index 95b46f3fe..cea199fa0 100644 --- a/docs/examples/58f72be60c25752d7899a35fc60fe6eb.asciidoc +++ b/docs/examples/58f72be60c25752d7899a35fc60fe6eb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// modules/cluster/misc.asciidoc:179 +// modules/cluster/misc.asciidoc:182 [source, python] ---- diff --git a/docs/examples/591c7fb7451069829a14bba593136f1f.asciidoc b/docs/examples/591c7fb7451069829a14bba593136f1f.asciidoc index 85d4c0f71..bdebba748 100644 --- a/docs/examples/591c7fb7451069829a14bba593136f1f.asciidoc +++ b/docs/examples/591c7fb7451069829a14bba593136f1f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/forecast.asciidoc:82 +// ml/anomaly-detection/apis/forecast.asciidoc:88 [source, python] ---- diff --git a/docs/examples/5987afb2c17c73fe3d860937565ef115.asciidoc b/docs/examples/5987afb2c17c73fe3d860937565ef115.asciidoc index d777e9bd5..b2c20b385 100644 --- a/docs/examples/5987afb2c17c73fe3d860937565ef115.asciidoc +++ b/docs/examples/5987afb2c17c73fe3d860937565ef115.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/point-in-time-api.asciidoc:40 +// search/point-in-time-api.asciidoc:46 [source, python] ---- diff --git a/docs/examples/599f693cc7d30b1153f5eeecec8eb23a.asciidoc b/docs/examples/599f693cc7d30b1153f5eeecec8eb23a.asciidoc index 8b43c7453..df07501d6 100644 --- a/docs/examples/599f693cc7d30b1153f5eeecec8eb23a.asciidoc +++ b/docs/examples/599f693cc7d30b1153f5eeecec8eb23a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/delete-index-template-v1.asciidoc:29 +// indices/delete-index-template-v1.asciidoc:35 [source, python] ---- diff --git a/docs/examples/59b8b9555f4aa30bc4613f819e9fc8f0.asciidoc b/docs/examples/59b8b9555f4aa30bc4613f819e9fc8f0.asciidoc index 06bef0b08..e5617647b 100644 --- a/docs/examples/59b8b9555f4aa30bc4613f819e9fc8f0.asciidoc +++ b/docs/examples/59b8b9555f4aa30bc4613f819e9fc8f0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/close.asciidoc:72 +// indices/close.asciidoc:78 [source, python] ---- diff --git a/docs/examples/59d736a4d064ed2013c7ead8e32e0998.asciidoc b/docs/examples/59d736a4d064ed2013c7ead8e32e0998.asciidoc index e0e4ba3c9..691256d73 100644 --- a/docs/examples/59d736a4d064ed2013c7ead8e32e0998.asciidoc +++ b/docs/examples/59d736a4d064ed2013c7ead8e32e0998.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-openai.asciidoc:157 +// inference/service-openai.asciidoc:177 [source, python] ---- diff --git a/docs/examples/59f0ad2a6f97200e98e8eb079cdd8334.asciidoc b/docs/examples/59f0ad2a6f97200e98e8eb079cdd8334.asciidoc index 44e6fc0af..48a62341f 100644 --- a/docs/examples/59f0ad2a6f97200e98e8eb079cdd8334.asciidoc +++ b/docs/examples/59f0ad2a6f97200e98e8eb079cdd8334.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/multi-get.asciidoc:156 +// docs/multi-get.asciidoc:162 [source, python] ---- diff --git a/docs/examples/5a70db31f587b7ffed5e9bc1445430cb.asciidoc b/docs/examples/5a70db31f587b7ffed5e9bc1445430cb.asciidoc deleted file mode 100644 index 157fc4da6..000000000 --- a/docs/examples/5a70db31f587b7ffed5e9bc1445430cb.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// search/search-your-data/semantic-text-hybrid-search:56 - -[source, python] ----- -resp = client.indices.create( - index="semantic-embeddings", - mappings={ - "properties": { - "semantic_text": { - "type": "semantic_text", - "inference_id": "my-elser-endpoint" - }, - "content": { - "type": "text", - "copy_to": "semantic_text" - } - } - }, -) -print(resp) ----- diff --git a/docs/examples/5ad365ed9e1a3c26093a0f09666c133a.asciidoc b/docs/examples/5ad365ed9e1a3c26093a0f09666c133a.asciidoc index 921bfe844..eb54a1bb6 100644 --- a/docs/examples/5ad365ed9e1a3c26093a0f09666c133a.asciidoc +++ b/docs/examples/5ad365ed9e1a3c26093a0f09666c133a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/create-role-mappings.asciidoc:246 +// rest-api/security/create-role-mappings.asciidoc:252 [source, python] ---- diff --git a/docs/examples/5afbd9caed88c32f8a2968c07054f096.asciidoc b/docs/examples/5afbd9caed88c32f8a2968c07054f096.asciidoc index 912ae0d8c..0f4d555f8 100644 --- a/docs/examples/5afbd9caed88c32f8a2968c07054f096.asciidoc +++ b/docs/examples/5afbd9caed88c32f8a2968c07054f096.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/logstash/delete-pipeline.asciidoc:67 +// rest-api/logstash/delete-pipeline.asciidoc:73 [source, python] ---- diff --git a/docs/examples/5b0cc9e186a8f765a11141809b8b17b7.asciidoc b/docs/examples/5b0cc9e186a8f765a11141809b8b17b7.asciidoc index 6766b1c62..d3044eb49 100644 --- a/docs/examples/5b0cc9e186a8f765a11141809b8b17b7.asciidoc +++ b/docs/examples/5b0cc9e186a8f765a11141809b8b17b7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search-application/apis/list-search-applications.asciidoc:101 +// search-application/apis/list-search-applications.asciidoc:106 [source, python] ---- diff --git a/docs/examples/5b191f2dbfa46c774cc9b9b9e8d1d831.asciidoc b/docs/examples/5b191f2dbfa46c774cc9b9b9e8d1d831.asciidoc index 5a0713e09..5c8dd5b84 100644 --- a/docs/examples/5b191f2dbfa46c774cc9b9b9e8d1d831.asciidoc +++ b/docs/examples/5b191f2dbfa46c774cc9b9b9e8d1d831.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-user-privileges.asciidoc:34 +// rest-api/security/get-user-privileges.asciidoc:40 [source, python] ---- diff --git a/docs/examples/5b281956e35a26e734c482b42b356c0d.asciidoc b/docs/examples/5b281956e35a26e734c482b42b356c0d.asciidoc index 4b77d9f45..02c2cfbd7 100644 --- a/docs/examples/5b281956e35a26e734c482b42b356c0d.asciidoc +++ b/docs/examples/5b281956e35a26e734c482b42b356c0d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/alias-exists.asciidoc:10 +// indices/alias-exists.asciidoc:16 [source, python] ---- diff --git a/docs/examples/5b2a13366bd4e1ab4b25d04d360570dc.asciidoc b/docs/examples/5b2a13366bd4e1ab4b25d04d360570dc.asciidoc index e47c2638d..270362136 100644 --- a/docs/examples/5b2a13366bd4e1ab4b25d04d360570dc.asciidoc +++ b/docs/examples/5b2a13366bd4e1ab4b25d04d360570dc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-component-template.asciidoc:255 +// indices/put-component-template.asciidoc:261 [source, python] ---- diff --git a/docs/examples/5b7d6f1db88ca6f42c48fa3dbb4341e8.asciidoc b/docs/examples/5b7d6f1db88ca6f42c48fa3dbb4341e8.asciidoc index b58e6bc27..9990e5c7b 100644 --- a/docs/examples/5b7d6f1db88ca6f42c48fa3dbb4341e8.asciidoc +++ b/docs/examples/5b7d6f1db88ca6f42c48fa3dbb4341e8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-mapping.asciidoc:79 +// indices/get-mapping.asciidoc:85 [source, python] ---- diff --git a/docs/examples/5bba213a7f543190139d1a69ab2ed076.asciidoc b/docs/examples/5bba213a7f543190139d1a69ab2ed076.asciidoc index 02874e925..1bf8744d9 100644 --- a/docs/examples/5bba213a7f543190139d1a69ab2ed076.asciidoc +++ b/docs/examples/5bba213a7f543190139d1a69ab2ed076.asciidoc @@ -1,10 +1,15 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-across-clusters.asciidoc:301 +// esql/esql-across-clusters.asciidoc:296 [source, python] ---- -resp = client.esql.async_query( - format="json", +resp = client.perform_request( + "POST", + "/_query/async", + params={ + "format": "json" + }, + headers={"Content-Type": "application/json"}, body={ "query": "\n FROM cluster_one:my-index*,cluster_two:logs*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ", "include_ccs_metadata": True diff --git a/docs/examples/5bbccf103107e505c17ae59863753efd.asciidoc b/docs/examples/5bbccf103107e505c17ae59863753efd.asciidoc index 056d688e7..2b1bde38c 100644 --- a/docs/examples/5bbccf103107e505c17ae59863753efd.asciidoc +++ b/docs/examples/5bbccf103107e505c17ae59863753efd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/get-influencer.asciidoc:152 +// ml/anomaly-detection/apis/get-influencer.asciidoc:158 [source, python] ---- diff --git a/docs/examples/5c7ece1f30267adabdb832424871900a.asciidoc b/docs/examples/5c7ece1f30267adabdb832424871900a.asciidoc index 4e8858788..46a493cee 100644 --- a/docs/examples/5c7ece1f30267adabdb832424871900a.asciidoc +++ b/docs/examples/5c7ece1f30267adabdb832424871900a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/troubleshooting-unbalanced-cluster.asciidoc:20 +// troubleshooting/troubleshooting-unbalanced-cluster.asciidoc:24 [source, python] ---- diff --git a/docs/examples/5ccfd9f4698dcd7cdfbc6bad60081aab.asciidoc b/docs/examples/5ccfd9f4698dcd7cdfbc6bad60081aab.asciidoc index 6ec562e73..c953d972f 100644 --- a/docs/examples/5ccfd9f4698dcd7cdfbc6bad60081aab.asciidoc +++ b/docs/examples/5ccfd9f4698dcd7cdfbc6bad60081aab.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/df-analytics/apis/get-dfanalytics.asciidoc:212 +// ml/df-analytics/apis/get-dfanalytics.asciidoc:218 [source, python] ---- diff --git a/docs/examples/5ceb734e3affe00e2cdc29af748d95bf.asciidoc b/docs/examples/5ceb734e3affe00e2cdc29af748d95bf.asciidoc index b873d1d6e..8e8095f7b 100644 --- a/docs/examples/5ceb734e3affe00e2cdc29af748d95bf.asciidoc +++ b/docs/examples/5ceb734e3affe00e2cdc29af748d95bf.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/inference-apis.asciidoc:98 +// inference/inference-apis.asciidoc:114 [source, python] ---- diff --git a/docs/examples/5cf12cc4f98d98dc79bead7e6556679c.asciidoc b/docs/examples/5cf12cc4f98d98dc79bead7e6556679c.asciidoc index 25d500e11..b920f8437 100644 --- a/docs/examples/5cf12cc4f98d98dc79bead7e6556679c.asciidoc +++ b/docs/examples/5cf12cc4f98d98dc79bead7e6556679c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/fields/synthetic-source.asciidoc:17 +// mapping/fields/synthetic-source.asciidoc:10 [source, python] ---- diff --git a/docs/examples/5d03bb385904d20c5323885706738459.asciidoc b/docs/examples/5d03bb385904d20c5323885706738459.asciidoc index b5272d856..baeaa644a 100644 --- a/docs/examples/5d03bb385904d20c5323885706738459.asciidoc +++ b/docs/examples/5d03bb385904d20c5323885706738459.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/aliases.asciidoc:10 +// indices/aliases.asciidoc:16 [source, python] ---- diff --git a/docs/examples/5deeed427f35cbaee4b8ddc45002a9d7.asciidoc b/docs/examples/5deeed427f35cbaee4b8ddc45002a9d7.asciidoc index 5b7e50ced..1edbb7e20 100644 --- a/docs/examples/5deeed427f35cbaee4b8ddc45002a9d7.asciidoc +++ b/docs/examples/5deeed427f35cbaee4b8ddc45002a9d7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/bulk-delete-roles.asciidoc:71 +// rest-api/security/bulk-delete-roles.asciidoc:77 [source, python] ---- diff --git a/docs/examples/5dfb23f6e36ef484f1d3271bae76a8d1.asciidoc b/docs/examples/5dfb23f6e36ef484f1d3271bae76a8d1.asciidoc index a7d0b7013..c99773099 100644 --- a/docs/examples/5dfb23f6e36ef484f1d3271bae76a8d1.asciidoc +++ b/docs/examples/5dfb23f6e36ef484f1d3271bae76a8d1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/recovery.asciidoc:234 +// indices/recovery.asciidoc:240 [source, python] ---- diff --git a/docs/examples/5e021307d331a4483a5aa2198168451b.asciidoc b/docs/examples/5e021307d331a4483a5aa2198168451b.asciidoc index 1c6919af5..cd316527b 100644 --- a/docs/examples/5e021307d331a4483a5aa2198168451b.asciidoc +++ b/docs/examples/5e021307d331a4483a5aa2198168451b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/create-roles.asciidoc:183 +// rest-api/security/create-roles.asciidoc:189 [source, python] ---- diff --git a/docs/examples/5e099493f135ff7bd614e935c4f2bf5a.asciidoc b/docs/examples/5e099493f135ff7bd614e935c4f2bf5a.asciidoc index d1f47e0f5..207872edc 100644 --- a/docs/examples/5e099493f135ff7bd614e935c4f2bf5a.asciidoc +++ b/docs/examples/5e099493f135ff7bd614e935c4f2bf5a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// modules/indices/request_cache.asciidoc:86 +// shard-request-cache.asciidoc:88 [source, python] ---- diff --git a/docs/examples/5e124875d97c27362ae858160ae1c6d5.asciidoc b/docs/examples/5e124875d97c27362ae858160ae1c6d5.asciidoc index d83f724ea..9381112b4 100644 --- a/docs/examples/5e124875d97c27362ae858160ae1c6d5.asciidoc +++ b/docs/examples/5e124875d97c27362ae858160ae1c6d5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc:44 +// ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc:50 [source, python] ---- diff --git a/docs/examples/5e415c490a46358643ee2aab554b4876.asciidoc b/docs/examples/5e415c490a46358643ee2aab554b4876.asciidoc index b86c34e4d..e19029d48 100644 --- a/docs/examples/5e415c490a46358643ee2aab554b4876.asciidoc +++ b/docs/examples/5e415c490a46358643ee2aab554b4876.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:57 +// troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:63 [source, python] ---- diff --git a/docs/examples/5f16358ebb5d14b86f57612d5f92d923.asciidoc b/docs/examples/5f16358ebb5d14b86f57612d5f92d923.asciidoc new file mode 100644 index 000000000..565804a37 --- /dev/null +++ b/docs/examples/5f16358ebb5d14b86f57612d5f92d923.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// mapping/types/semantic-text.asciidoc:26 + +[source, python] +---- +resp = client.indices.create( + index="my-index-000001", + mappings={ + "properties": { + "inference_field": { + "type": "semantic_text" + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/5f1ed9cfdc149763b444acfbe10b0e16.asciidoc b/docs/examples/5f1ed9cfdc149763b444acfbe10b0e16.asciidoc index 4b346a298..118ad732e 100644 --- a/docs/examples/5f1ed9cfdc149763b444acfbe10b0e16.asciidoc +++ b/docs/examples/5f1ed9cfdc149763b444acfbe10b0e16.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:265 +// indices/put-mapping.asciidoc:271 [source, python] ---- diff --git a/docs/examples/5f8acd1e367b048b5542dbc6079bcc88.asciidoc b/docs/examples/5f8acd1e367b048b5542dbc6079bcc88.asciidoc index 5b8040c61..3d8c97850 100644 --- a/docs/examples/5f8acd1e367b048b5542dbc6079bcc88.asciidoc +++ b/docs/examples/5f8acd1e367b048b5542dbc6079bcc88.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// analysis/tokenfilters/hyphenation-decompounder-tokenfilter.asciidoc:132 +// analysis/tokenfilters/hyphenation-decompounder-tokenfilter.asciidoc:144 [source, python] ---- diff --git a/docs/examples/5faa121e00a0582160b2adb2b72fed67.asciidoc b/docs/examples/5faa121e00a0582160b2adb2b72fed67.asciidoc index 6342a01eb..47a0bb0a1 100644 --- a/docs/examples/5faa121e00a0582160b2adb2b72fed67.asciidoc +++ b/docs/examples/5faa121e00a0582160b2adb2b72fed67.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-settings.asciidoc:92 +// indices/get-settings.asciidoc:98 [source, python] ---- diff --git a/docs/examples/5fca6671bc8eaddc44ac488d1c3c6909.asciidoc b/docs/examples/5fca6671bc8eaddc44ac488d1c3c6909.asciidoc index f3234a306..5007151bf 100644 --- a/docs/examples/5fca6671bc8eaddc44ac488d1c3c6909.asciidoc +++ b/docs/examples/5fca6671bc8eaddc44ac488d1c3c6909.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/get-calendar.asciidoc:89 +// ml/anomaly-detection/apis/get-calendar.asciidoc:95 [source, python] ---- diff --git a/docs/examples/5fde0d78e9b2cc0519f8a63848ed344e.asciidoc b/docs/examples/5fde0d78e9b2cc0519f8a63848ed344e.asciidoc index 74cfdcf7b..df3038e41 100644 --- a/docs/examples/5fde0d78e9b2cc0519f8a63848ed344e.asciidoc +++ b/docs/examples/5fde0d78e9b2cc0519f8a63848ed344e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-rules/apis/get-query-ruleset.asciidoc:102 +// query-rules/apis/get-query-ruleset.asciidoc:108 [source, python] ---- diff --git a/docs/examples/601ad3b0ceccb3fcd282e5ec36748954.asciidoc b/docs/examples/601ad3b0ceccb3fcd282e5ec36748954.asciidoc index 41fbc65df..be80f02ef 100644 --- a/docs/examples/601ad3b0ceccb3fcd282e5ec36748954.asciidoc +++ b/docs/examples/601ad3b0ceccb3fcd282e5ec36748954.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-service-credentials.asciidoc:59 +// rest-api/security/get-service-credentials.asciidoc:64 [source, python] ---- diff --git a/docs/examples/602e04051c092cf77de2f75a563661b8.asciidoc b/docs/examples/602e04051c092cf77de2f75a563661b8.asciidoc index 4d7617acd..8e7d57b80 100644 --- a/docs/examples/602e04051c092cf77de2f75a563661b8.asciidoc +++ b/docs/examples/602e04051c092cf77de2f75a563661b8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat.asciidoc:57 +// cat.asciidoc:63 [source, python] ---- diff --git a/docs/examples/604da59fe41160efa10a846a9dacc07a.asciidoc b/docs/examples/604da59fe41160efa10a846a9dacc07a.asciidoc index b25e552f6..771bd1742 100644 --- a/docs/examples/604da59fe41160efa10a846a9dacc07a.asciidoc +++ b/docs/examples/604da59fe41160efa10a846a9dacc07a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// eql/get-async-eql-status-api.asciidoc:18 +// eql/get-async-eql-status-api.asciidoc:25 [source, python] ---- diff --git a/docs/examples/6061aadb3b870791278212d1e8f52b39.asciidoc b/docs/examples/6061aadb3b870791278212d1e8f52b39.asciidoc index 7a90e38ee..077977e52 100644 --- a/docs/examples/6061aadb3b870791278212d1e8f52b39.asciidoc +++ b/docs/examples/6061aadb3b870791278212d1e8f52b39.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/common/apis/get-ml-memory.asciidoc:228 +// ml/common/apis/get-ml-memory.asciidoc:234 [source, python] ---- diff --git a/docs/examples/6097ae69c64454a92a89ef01b994e9f9.asciidoc b/docs/examples/6097ae69c64454a92a89ef01b994e9f9.asciidoc index 4cad7c0a3..b22dce60a 100644 --- a/docs/examples/6097ae69c64454a92a89ef01b994e9f9.asciidoc +++ b/docs/examples/6097ae69c64454a92a89ef01b994e9f9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// synonyms/apis/put-synonym-rule.asciidoc:145 +// synonyms/apis/put-synonym-rule.asciidoc:151 [source, python] ---- diff --git a/docs/examples/60d3f9a99cc91b43aaa7524a9a74dba0.asciidoc b/docs/examples/60d3f9a99cc91b43aaa7524a9a74dba0.asciidoc index bf47c1f20..6fe2dc982 100644 --- a/docs/examples/60d3f9a99cc91b43aaa7524a9a74dba0.asciidoc +++ b/docs/examples/60d3f9a99cc91b43aaa7524a9a74dba0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/rejected-requests.asciidoc:43 +// troubleshooting/common-issues/rejected-requests.asciidoc:50 [source, python] ---- diff --git a/docs/examples/60f889fbed5df3185444f7015b48ed76.asciidoc b/docs/examples/60f889fbed5df3185444f7015b48ed76.asciidoc index f0991a95c..c742c3978 100644 --- a/docs/examples/60f889fbed5df3185444f7015b48ed76.asciidoc +++ b/docs/examples/60f889fbed5df3185444f7015b48ed76.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/create-index.asciidoc:10 +// indices/create-index.asciidoc:16 [source, python] ---- diff --git a/docs/examples/612c2e975f833de9815651135735eae5.asciidoc b/docs/examples/612c2e975f833de9815651135735eae5.asciidoc index b3e378bbb..4a1a4e169 100644 --- a/docs/examples/612c2e975f833de9815651135735eae5.asciidoc +++ b/docs/examples/612c2e975f833de9815651135735eae5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/tasks.asciidoc:247 +// cluster/tasks.asciidoc:253 [source, python] ---- diff --git a/docs/examples/618c9d42284c067891fb57034a4fd834.asciidoc b/docs/examples/618c9d42284c067891fb57034a4fd834.asciidoc index cb2e2f133..3bab5a6e7 100644 --- a/docs/examples/618c9d42284c067891fb57034a4fd834.asciidoc +++ b/docs/examples/618c9d42284c067891fb57034a4fd834.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/start-job.asciidoc:50 +// rollup/apis/start-job.asciidoc:56 [source, python] ---- diff --git a/docs/examples/61d6b9503459914c436930c3ae87d454.asciidoc b/docs/examples/61d6b9503459914c436930c3ae87d454.asciidoc index 12f47766f..4782a49cf 100644 --- a/docs/examples/61d6b9503459914c436930c3ae87d454.asciidoc +++ b/docs/examples/61d6b9503459914c436930c3ae87d454.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-rules/apis/list-query-rulesets.asciidoc:165 +// query-rules/apis/list-query-rulesets.asciidoc:171 [source, python] ---- diff --git a/docs/examples/6244204213f60edf2f23295f9059f2c9.asciidoc b/docs/examples/6244204213f60edf2f23295f9059f2c9.asciidoc index eacb1ead7..ac7a3d39c 100644 --- a/docs/examples/6244204213f60edf2f23295f9059f2c9.asciidoc +++ b/docs/examples/6244204213f60edf2f23295f9059f2c9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/stats.asciidoc:163 +// rest-api/watcher/stats.asciidoc:169 [source, python] ---- diff --git a/docs/examples/62ccee6ad356428c2d625742f961ceb7.asciidoc b/docs/examples/62ccee6ad356428c2d625742f961ceb7.asciidoc index d94a8ced9..156da376e 100644 --- a/docs/examples/62ccee6ad356428c2d625742f961ceb7.asciidoc +++ b/docs/examples/62ccee6ad356428c2d625742f961ceb7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/update-api-key.asciidoc:200 +// rest-api/security/update-api-key.asciidoc:206 [source, python] ---- diff --git a/docs/examples/62d3c8fccb11471bdc12555c1a7777f2.asciidoc b/docs/examples/62d3c8fccb11471bdc12555c1a7777f2.asciidoc index e772c2097..3e5631809 100644 --- a/docs/examples/62d3c8fccb11471bdc12555c1a7777f2.asciidoc +++ b/docs/examples/62d3c8fccb11471bdc12555c1a7777f2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/fields/synthetic-source.asciidoc:100 +// mapping/fields/synthetic-source.asciidoc:93 [source, python] ---- diff --git a/docs/examples/62eafc5b3ab75cc67314d5a8567d6077.asciidoc b/docs/examples/62eafc5b3ab75cc67314d5a8567d6077.asciidoc index 1af60a98f..880a8fc68 100644 --- a/docs/examples/62eafc5b3ab75cc67314d5a8567d6077.asciidoc +++ b/docs/examples/62eafc5b3ab75cc67314d5a8567d6077.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-api-keys.asciidoc:225 +// rest-api/security/get-api-keys.asciidoc:231 [source, python] ---- diff --git a/docs/examples/63893e7e9479a9b60db71dcddcc79aaf.asciidoc b/docs/examples/63893e7e9479a9b60db71dcddcc79aaf.asciidoc index 1ee6a288d..753107965 100644 --- a/docs/examples/63893e7e9479a9b60db71dcddcc79aaf.asciidoc +++ b/docs/examples/63893e7e9479a9b60db71dcddcc79aaf.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/delete-calendar.asciidoc:38 +// ml/anomaly-detection/apis/delete-calendar.asciidoc:44 [source, python] ---- diff --git a/docs/examples/63a53fcb0717ae9033a679cbfc932851.asciidoc b/docs/examples/63a53fcb0717ae9033a679cbfc932851.asciidoc index 15c170aba..746b8a42d 100644 --- a/docs/examples/63a53fcb0717ae9033a679cbfc932851.asciidoc +++ b/docs/examples/63a53fcb0717ae9033a679cbfc932851.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-alibabacloud-ai-search.asciidoc:168 +// inference/service-alibabacloud-ai-search.asciidoc:174 [source, python] ---- diff --git a/docs/examples/63bf3480627a89b4b4ede4150e1d6bc0.asciidoc b/docs/examples/63bf3480627a89b4b4ede4150e1d6bc0.asciidoc index c7d05b973..91a1b7c2e 100644 --- a/docs/examples/63bf3480627a89b4b4ede4150e1d6bc0.asciidoc +++ b/docs/examples/63bf3480627a89b4b4ede4150e1d6bc0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/bulk-create-roles.asciidoc:119 +// rest-api/security/bulk-create-roles.asciidoc:125 [source, python] ---- diff --git a/docs/examples/63cc960215ae83b359c12df3c0993bfa.asciidoc b/docs/examples/63cc960215ae83b359c12df3c0993bfa.asciidoc index 5529b0b89..bbf69419f 100644 --- a/docs/examples/63cc960215ae83b359c12df3c0993bfa.asciidoc +++ b/docs/examples/63cc960215ae83b359c12df3c0993bfa.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/create-index.asciidoc:130 +// indices/create-index.asciidoc:136 [source, python] ---- diff --git a/docs/examples/63e20883732ec30b5400046be2efb0f1.asciidoc b/docs/examples/63e20883732ec30b5400046be2efb0f1.asciidoc index e6e5a0dd6..d2846a3b6 100644 --- a/docs/examples/63e20883732ec30b5400046be2efb0f1.asciidoc +++ b/docs/examples/63e20883732ec30b5400046be2efb0f1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/flush.asciidoc:121 +// indices/flush.asciidoc:127 [source, python] ---- diff --git a/docs/examples/63ecdab34940af053acc409164914c32.asciidoc b/docs/examples/63ecdab34940af053acc409164914c32.asciidoc index c44c3147f..0c5d51106 100644 --- a/docs/examples/63ecdab34940af053acc409164914c32.asciidoc +++ b/docs/examples/63ecdab34940af053acc409164914c32.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/sparse-vector.asciidoc:46 +// mapping/types/sparse-vector.asciidoc:63 [source, python] ---- diff --git a/docs/examples/640621cea39cdeeb76fbc95bff31a18d.asciidoc b/docs/examples/640621cea39cdeeb76fbc95bff31a18d.asciidoc index 34660de39..1837f3899 100644 --- a/docs/examples/640621cea39cdeeb76fbc95bff31a18d.asciidoc +++ b/docs/examples/640621cea39cdeeb76fbc95bff31a18d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-last-sync-api.asciidoc:116 +// connector/apis/update-connector-last-sync-api.asciidoc:122 [source, python] ---- diff --git a/docs/examples/640dbeecb736bd25f6f2b392b76a7531.asciidoc b/docs/examples/640dbeecb736bd25f6f2b392b76a7531.asciidoc index 2df817d3f..08ac55001 100644 --- a/docs/examples/640dbeecb736bd25f6f2b392b76a7531.asciidoc +++ b/docs/examples/640dbeecb736bd25f6f2b392b76a7531.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/stats.asciidoc:1902 +// cluster/stats.asciidoc:1914 [source, python] ---- diff --git a/docs/examples/6414b9276ba1c63898c3ff5cbe03c54e.asciidoc b/docs/examples/6414b9276ba1c63898c3ff5cbe03c54e.asciidoc index 5227631bd..561422a4b 100644 --- a/docs/examples/6414b9276ba1c63898c3ff5cbe03c54e.asciidoc +++ b/docs/examples/6414b9276ba1c63898c3ff5cbe03c54e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/segments.asciidoc:128 +// indices/segments.asciidoc:134 [source, python] ---- diff --git a/docs/examples/642161d70dacf7d153767d37d3726838.asciidoc b/docs/examples/642161d70dacf7d153767d37d3726838.asciidoc index 3fb9f5242..a7c877f17 100644 --- a/docs/examples/642161d70dacf7d153767d37d3726838.asciidoc +++ b/docs/examples/642161d70dacf7d153767d37d3726838.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/rollup-index-caps.asciidoc:163 +// rollup/apis/rollup-index-caps.asciidoc:169 [source, python] ---- diff --git a/docs/examples/643e19c3b6ac1134554dd890e2249c2b.asciidoc b/docs/examples/643e19c3b6ac1134554dd890e2249c2b.asciidoc index 0c085cc7e..c77ffb136 100644 --- a/docs/examples/643e19c3b6ac1134554dd890e2249c2b.asciidoc +++ b/docs/examples/643e19c3b6ac1134554dd890e2249c2b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/logs.asciidoc:18 +// data-streams/logs.asciidoc:20 [source, python] ---- diff --git a/docs/examples/64622409407316d2d47094e692d9b516.asciidoc b/docs/examples/64622409407316d2d47094e692d9b516.asciidoc index b5cabc544..606ac367b 100644 --- a/docs/examples/64622409407316d2d47094e692d9b516.asciidoc +++ b/docs/examples/64622409407316d2d47094e692d9b516.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/df-analytics/apis/evaluate-dfanalytics.asciidoc:395 +// ml/df-analytics/apis/evaluate-dfanalytics.asciidoc:401 [source, python] ---- diff --git a/docs/examples/646d71869f1a18c5bede7759559bfc47.asciidoc b/docs/examples/646d71869f1a18c5bede7759559bfc47.asciidoc index 4b28aad02..036a676f3 100644 --- a/docs/examples/646d71869f1a18c5bede7759559bfc47.asciidoc +++ b/docs/examples/646d71869f1a18c5bede7759559bfc47.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-field-mapping.asciidoc:236 +// indices/get-field-mapping.asciidoc:242 [source, python] ---- diff --git a/docs/examples/64a79861225553799b26e118d7851dcc.asciidoc b/docs/examples/64a79861225553799b26e118d7851dcc.asciidoc index d9c6bb56c..1d24bea18 100644 --- a/docs/examples/64a79861225553799b26e118d7851dcc.asciidoc +++ b/docs/examples/64a79861225553799b26e118d7851dcc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ilm/error-handling.asciidoc:60 +// ilm/error-handling.asciidoc:61 [source, python] ---- diff --git a/docs/examples/64c572abc23394a77b6cca0b5368ee1d.asciidoc b/docs/examples/64c572abc23394a77b6cca0b5368ee1d.asciidoc index 87b81e8e4..adbb544fe 100644 --- a/docs/examples/64c572abc23394a77b6cca0b5368ee1d.asciidoc +++ b/docs/examples/64c572abc23394a77b6cca0b5368ee1d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// features/apis/get-features-api.asciidoc:12 +// features/apis/get-features-api.asciidoc:18 [source, python] ---- diff --git a/docs/examples/64d24f4b2a57dba48092dafe3eb68ad1.asciidoc b/docs/examples/64d24f4b2a57dba48092dafe3eb68ad1.asciidoc index 381ddf143..d916c5e3d 100644 --- a/docs/examples/64d24f4b2a57dba48092dafe3eb68ad1.asciidoc +++ b/docs/examples/64d24f4b2a57dba48092dafe3eb68ad1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/multi-get.asciidoc:239 +// docs/multi-get.asciidoc:245 [source, python] ---- diff --git a/docs/examples/654882f545eca8d7047695f867c63072.asciidoc b/docs/examples/654882f545eca8d7047695f867c63072.asciidoc index ed2907890..f2cf570e4 100644 --- a/docs/examples/654882f545eca8d7047695f867c63072.asciidoc +++ b/docs/examples/654882f545eca8d7047695f867c63072.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// transform/apis/stop-transform.asciidoc:81 +// transform/apis/stop-transform.asciidoc:87 [source, python] ---- diff --git a/docs/examples/65b6185356f16f2f0d84bc5aee2ed0fc.asciidoc b/docs/examples/65b6185356f16f2f0d84bc5aee2ed0fc.asciidoc index 6c0013855..48abc6096 100644 --- a/docs/examples/65b6185356f16f2f0d84bc5aee2ed0fc.asciidoc +++ b/docs/examples/65b6185356f16f2f0d84bc5aee2ed0fc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/sparse-vector-query.asciidoc:25 +// query-dsl/sparse-vector-query.asciidoc:26 [source, python] ---- diff --git a/docs/examples/6606d46685d10377b996b5f20f1229b5.asciidoc b/docs/examples/6606d46685d10377b996b5f20f1229b5.asciidoc index 793512aa2..1ab17d6c4 100644 --- a/docs/examples/6606d46685d10377b996b5f20f1229b5.asciidoc +++ b/docs/examples/6606d46685d10377b996b5f20f1229b5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-index-name-api.asciidoc:76 +// connector/apis/update-connector-index-name-api.asciidoc:82 [source, python] ---- diff --git a/docs/examples/66539dc6011dd2e0282cf81db1f3df27.asciidoc b/docs/examples/66539dc6011dd2e0282cf81db1f3df27.asciidoc index 8e89820c4..77fde631d 100644 --- a/docs/examples/66539dc6011dd2e0282cf81db1f3df27.asciidoc +++ b/docs/examples/66539dc6011dd2e0282cf81db1f3df27.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat.asciidoc:85 +// cat.asciidoc:91 [source, python] ---- diff --git a/docs/examples/6689aa213884196b47a6f482d4993749.asciidoc b/docs/examples/6689aa213884196b47a6f482d4993749.asciidoc index c850d3ee8..c522b28a5 100644 --- a/docs/examples/6689aa213884196b47a6f482d4993749.asciidoc +++ b/docs/examples/6689aa213884196b47a6f482d4993749.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/apis/put-pipeline.asciidoc:11 +// ingest/apis/put-pipeline.asciidoc:17 [source, python] ---- diff --git a/docs/examples/674bb755111c6fbaa4c5ac759395c122.asciidoc b/docs/examples/674bb755111c6fbaa4c5ac759395c122.asciidoc index a61c8c89c..dc4f21124 100644 --- a/docs/examples/674bb755111c6fbaa4c5ac759395c122.asciidoc +++ b/docs/examples/674bb755111c6fbaa4c5ac759395c122.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:128 +// troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:132 [source, python] ---- diff --git a/docs/examples/67aac8882fa476db8a5878b67ea08eb3.asciidoc b/docs/examples/67aac8882fa476db8a5878b67ea08eb3.asciidoc index 4ca7f4d84..d8dfbd7a0 100644 --- a/docs/examples/67aac8882fa476db8a5878b67ea08eb3.asciidoc +++ b/docs/examples/67aac8882fa476db8a5878b67ea08eb3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/repo-analysis-api.asciidoc:26 +// snapshot-restore/apis/repo-analysis-api.asciidoc:32 [source, python] ---- diff --git a/docs/examples/67bab07fda27ef77e3bc948211051a33.asciidoc b/docs/examples/67bab07fda27ef77e3bc948211051a33.asciidoc index e7072b9dc..b0dbc1131 100644 --- a/docs/examples/67bab07fda27ef77e3bc948211051a33.asciidoc +++ b/docs/examples/67bab07fda27ef77e3bc948211051a33.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/hotspotting.asciidoc:156 +// troubleshooting/common-issues/hotspotting.asciidoc:160 [source, python] ---- diff --git a/docs/examples/67ffa135c50c43d6788636c88078c7d1.asciidoc b/docs/examples/67ffa135c50c43d6788636c88078c7d1.asciidoc index 263c02e41..ce2a93601 100644 --- a/docs/examples/67ffa135c50c43d6788636c88078c7d1.asciidoc +++ b/docs/examples/67ffa135c50c43d6788636c88078c7d1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/apis/simulate-pipeline.asciidoc:150 +// ingest/apis/simulate-pipeline.asciidoc:156 [source, python] ---- diff --git a/docs/examples/681d24c2633f598fc43d6afff8996dbb.asciidoc b/docs/examples/681d24c2633f598fc43d6afff8996dbb.asciidoc new file mode 100644 index 000000000..79fe95816 --- /dev/null +++ b/docs/examples/681d24c2633f598fc43d6afff8996dbb.asciidoc @@ -0,0 +1,138 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/retrievers-examples.asciidoc:14 + +[source, python] +---- +resp = client.indices.create( + index="retrievers_example", + settings={ + "number_of_shards": 1 + }, + mappings={ + "properties": { + "vector": { + "type": "dense_vector", + "dims": 3, + "similarity": "l2_norm", + "index": True, + "index_options": { + "type": "flat" + } + }, + "text": { + "type": "text" + }, + "year": { + "type": "integer" + }, + "topic": { + "type": "keyword" + } + } + }, +) +print(resp) + +resp1 = client.index( + index="retrievers_example", + id="1", + document={ + "vector": [ + 0.23, + 0.67, + 0.89 + ], + "text": "Large language models are revolutionizing information retrieval by boosting search precision, deepening contextual understanding, and reshaping user experiences in data-rich environments.", + "year": 2024, + "topic": [ + "llm", + "ai", + "information_retrieval" + ] + }, +) +print(resp1) + +resp2 = client.index( + index="retrievers_example", + id="2", + document={ + "vector": [ + 0.12, + 0.56, + 0.78 + ], + "text": "Artificial intelligence is transforming medicine, from advancing diagnostics and tailoring treatment plans to empowering predictive patient care for improved health outcomes.", + "year": 2023, + "topic": [ + "ai", + "medicine" + ] + }, +) +print(resp2) + +resp3 = client.index( + index="retrievers_example", + id="3", + document={ + "vector": [ + 0.45, + 0.32, + 0.91 + ], + "text": "AI is redefining security by enabling advanced threat detection, proactive risk analysis, and dynamic defenses against increasingly sophisticated cyber threats.", + "year": 2024, + "topic": [ + "ai", + "security" + ] + }, +) +print(resp3) + +resp4 = client.index( + index="retrievers_example", + id="4", + document={ + "vector": [ + 0.34, + 0.21, + 0.98 + ], + "text": "Elastic introduces Elastic AI Assistant, the open, generative AI sidekick powered by ESRE to democratize cybersecurity and enable users of every skill level.", + "year": 2023, + "topic": [ + "ai", + "elastic", + "assistant" + ] + }, +) +print(resp4) + +resp5 = client.index( + index="retrievers_example", + id="5", + document={ + "vector": [ + 0.11, + 0.65, + 0.47 + ], + "text": "Learn how to spin up a deployment of our hosted Elasticsearch Service and use Elastic Observability to gain deeper insight into the behavior of your applications and systems.", + "year": 2024, + "topic": [ + "documentation", + "observability", + "elastic" + ] + }, +) +print(resp5) + +resp6 = client.indices.refresh( + index="retrievers_example", +) +print(resp6) +---- diff --git a/docs/examples/68738b4fd0dda177022be45be95b4c84.asciidoc b/docs/examples/68738b4fd0dda177022be45be95b4c84.asciidoc index 632e5ba7d..56a80efcd 100644 --- a/docs/examples/68738b4fd0dda177022be45be95b4c84.asciidoc +++ b/docs/examples/68738b4fd0dda177022be45be95b4c84.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/reindex.asciidoc:202 +// docs/reindex.asciidoc:208 [source, python] ---- diff --git a/docs/examples/68b64313bf89ec3f2c645da61999dbb4.asciidoc b/docs/examples/68b64313bf89ec3f2c645da61999dbb4.asciidoc index bd40f4871..3134ba195 100644 --- a/docs/examples/68b64313bf89ec3f2c645da61999dbb4.asciidoc +++ b/docs/examples/68b64313bf89ec3f2c645da61999dbb4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/nodes-info.asciidoc:221 +// cluster/nodes-info.asciidoc:226 [source, python] ---- diff --git a/docs/examples/2fd458d37aab509fe2d970c0b6e2a10f.asciidoc b/docs/examples/68d7f7d4d268ee98caead5aef19933d6.asciidoc similarity index 98% rename from docs/examples/2fd458d37aab509fe2d970c0b6e2a10f.asciidoc rename to docs/examples/68d7f7d4d268ee98caead5aef19933d6.asciidoc index 54adac201..d486506c0 100644 --- a/docs/examples/2fd458d37aab509fe2d970c0b6e2a10f.asciidoc +++ b/docs/examples/68d7f7d4d268ee98caead5aef19933d6.asciidoc @@ -48,7 +48,7 @@ print(resp) resp1 = client.indices.put_index_template( name="2", index_patterns=[ - "k8s*" + "k9s*" ], composed_of=[ "destination_template" diff --git a/docs/examples/692606cc6d6462becc321d92961a3bac.asciidoc b/docs/examples/692606cc6d6462becc321d92961a3bac.asciidoc index 176019bf1..d237ae7f7 100644 --- a/docs/examples/692606cc6d6462becc321d92961a3bac.asciidoc +++ b/docs/examples/692606cc6d6462becc321d92961a3bac.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// text-structure/apis/test-grok-pattern.asciidoc:54 +// text-structure/apis/test-grok-pattern.asciidoc:60 [source, python] ---- diff --git a/docs/examples/69541f0bb81ab3797926bb2a00607cda.asciidoc b/docs/examples/69541f0bb81ab3797926bb2a00607cda.asciidoc index 72ee5b9f0..88e55f89c 100644 --- a/docs/examples/69541f0bb81ab3797926bb2a00607cda.asciidoc +++ b/docs/examples/69541f0bb81ab3797926bb2a00607cda.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:512 +// search/retriever.asciidoc:703 [source, python] ---- diff --git a/docs/examples/69a08e7bdcc616f3bdcb8ae842d9e30e.asciidoc b/docs/examples/69a08e7bdcc616f3bdcb8ae842d9e30e.asciidoc index 87d427403..0b36e7def 100644 --- a/docs/examples/69a08e7bdcc616f3bdcb8ae842d9e30e.asciidoc +++ b/docs/examples/69a08e7bdcc616f3bdcb8ae842d9e30e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/get.asciidoc:354 +// docs/get.asciidoc:360 [source, python] ---- diff --git a/docs/examples/69c07cfdf8054c301cd6186c5d71aa02.asciidoc b/docs/examples/69c07cfdf8054c301cd6186c5d71aa02.asciidoc index b4add7199..24327ea21 100644 --- a/docs/examples/69c07cfdf8054c301cd6186c5d71aa02.asciidoc +++ b/docs/examples/69c07cfdf8054c301cd6186c5d71aa02.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update-by-query.asciidoc:344 +// docs/update-by-query.asciidoc:350 [source, python] ---- diff --git a/docs/examples/69f8b0f2a9ba47e11f363d788cee9d6d.asciidoc b/docs/examples/69f8b0f2a9ba47e11f363d788cee9d6d.asciidoc index 0325c8d27..18bd0f1ce 100644 --- a/docs/examples/69f8b0f2a9ba47e11f363d788cee9d6d.asciidoc +++ b/docs/examples/69f8b0f2a9ba47e11f363d788cee9d6d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// migration/apis/deprecation.asciidoc:140 +// migration/apis/deprecation.asciidoc:146 [source, python] ---- diff --git a/docs/examples/6a350a17701e8c8158407191f2718b66.asciidoc b/docs/examples/6a350a17701e8c8158407191f2718b66.asciidoc index a721d4b83..a3290564b 100644 --- a/docs/examples/6a350a17701e8c8158407191f2718b66.asciidoc +++ b/docs/examples/6a350a17701e8c8158407191f2718b66.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ccr/apis/follow/post-unfollow.asciidoc:74 +// ccr/apis/follow/post-unfollow.asciidoc:80 [source, python] ---- diff --git a/docs/examples/6a3a86ff58e5f20950d429cf2832c229.asciidoc b/docs/examples/6a3a86ff58e5f20950d429cf2832c229.asciidoc index a6761640a..624c974fa 100644 --- a/docs/examples/6a3a86ff58e5f20950d429cf2832c229.asciidoc +++ b/docs/examples/6a3a86ff58e5f20950d429cf2832c229.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/apis/get-pipeline.asciidoc:76 +// ingest/apis/get-pipeline.asciidoc:82 [source, python] ---- diff --git a/docs/examples/6aca241c0361d26f134712821e2d09a9.asciidoc b/docs/examples/6aca241c0361d26f134712821e2d09a9.asciidoc index 72da86fba..c0a72833c 100644 --- a/docs/examples/6aca241c0361d26f134712821e2d09a9.asciidoc +++ b/docs/examples/6aca241c0361d26f134712821e2d09a9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/clean-up-repo-api.asciidoc:79 +// snapshot-restore/apis/clean-up-repo-api.asciidoc:85 [source, python] ---- diff --git a/docs/examples/6b0288acb739c4667d41339e5100c327.asciidoc b/docs/examples/6b0288acb739c4667d41339e5100c327.asciidoc index 26a2af93b..421376449 100644 --- a/docs/examples/6b0288acb739c4667d41339e5100c327.asciidoc +++ b/docs/examples/6b0288acb739c4667d41339e5100c327.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/match-query.asciidoc:230 +// query-dsl/match-query.asciidoc:234 [source, python] ---- diff --git a/docs/examples/6b1336ff477f91d4a0db0b06db546ff0.asciidoc b/docs/examples/6b1336ff477f91d4a0db0b06db546ff0.asciidoc index 1dccfee87..162a22abf 100644 --- a/docs/examples/6b1336ff477f91d4a0db0b06db546ff0.asciidoc +++ b/docs/examples/6b1336ff477f91d4a0db0b06db546ff0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/stop.asciidoc:45 +// rest-api/watcher/stop.asciidoc:51 [source, python] ---- diff --git a/docs/examples/6b3dcde0656d3a96dbcfed1ec814e10a.asciidoc b/docs/examples/6b3dcde0656d3a96dbcfed1ec814e10a.asciidoc index 650690a2c..81ce7ccd1 100644 --- a/docs/examples/6b3dcde0656d3a96dbcfed1ec814e10a.asciidoc +++ b/docs/examples/6b3dcde0656d3a96dbcfed1ec814e10a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// shutdown/apis/shutdown-delete.asciidoc:65 +// shutdown/apis/shutdown-delete.asciidoc:71 [source, python] ---- diff --git a/docs/examples/6b67c6121efb86ee100d40c2646f77b5.asciidoc b/docs/examples/6b67c6121efb86ee100d40c2646f77b5.asciidoc new file mode 100644 index 000000000..bc427ddf0 --- /dev/null +++ b/docs/examples/6b67c6121efb86ee100d40c2646f77b5.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// index-modules/slowlog.asciidoc:219 + +[source, python] +---- +resp = client.indices.put_settings( + index="*", + settings={ + "index.search.slowlog.include.user": True, + "index.search.slowlog.threshold.fetch.warn": "30s", + "index.search.slowlog.threshold.query.warn": "30s" + }, +) +print(resp) +---- diff --git a/docs/examples/6b6f5e0ab4ef523fc9a3a4a655848f64.asciidoc b/docs/examples/6b6f5e0ab4ef523fc9a3a4a655848f64.asciidoc index 9c8ff6008..10f198dce 100644 --- a/docs/examples/6b6f5e0ab4ef523fc9a3a4a655848f64.asciidoc +++ b/docs/examples/6b6f5e0ab4ef523fc9a3a4a655848f64.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/sparse-vector-query.asciidoc:43 +// query-dsl/sparse-vector-query.asciidoc:44 [source, python] ---- diff --git a/docs/examples/6b6fd0a5942dfb9762ad2790cf421a80.asciidoc b/docs/examples/6b6fd0a5942dfb9762ad2790cf421a80.asciidoc index 941c5899b..db8d1d9dc 100644 --- a/docs/examples/6b6fd0a5942dfb9762ad2790cf421a80.asciidoc +++ b/docs/examples/6b6fd0a5942dfb9762ad2790cf421a80.asciidoc @@ -12,7 +12,7 @@ resp = client.search_application.put( "template": { "script": { "lang": "mustache", - "source": "\n {\n \"query\": {\n \"bool\": {\n \"must\": [\n {{#query}}\n \n {{/query}}\n ],\n \"filter\": {{#toJson}}_es_filters{{/toJson}}\n }\n },\n \"_source\": {\n \"includes\": [\"title\", \"plot\"]\n },\n \"aggs\": {{#toJson}}_es_aggs{{/toJson}},\n \"from\": {{from}},\n \"size\": {{size}},\n \"sort\": {{#toJson}}_es_sort_fields{{/toJson}}\n }\n ", + "source": "\n {\n \"query\": {\n \"bool\": {\n \"must\": [\n {{#query}}\n {{/query}}\n ],\n \"filter\": {{#toJson}}_es_filters{{/toJson}}\n }\n },\n \"_source\": {\n \"includes\": [\"title\", \"plot\"]\n },\n \"aggs\": {{#toJson}}_es_aggs{{/toJson}},\n \"from\": {{from}},\n \"size\": {{size}},\n \"sort\": {{#toJson}}_es_sort_fields{{/toJson}}\n }\n ", "params": { "query": "", "_es_filters": {}, diff --git a/docs/examples/6b8c5c8145c287c4fc535fa57ccf95a7.asciidoc b/docs/examples/6b8c5c8145c287c4fc535fa57ccf95a7.asciidoc index 5afe00179..39589af02 100644 --- a/docs/examples/6b8c5c8145c287c4fc535fa57ccf95a7.asciidoc +++ b/docs/examples/6b8c5c8145c287c4fc535fa57ccf95a7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/list-connector-sync-jobs-api.asciidoc:65 +// connector/apis/list-connector-sync-jobs-api.asciidoc:71 [source, python] ---- diff --git a/docs/examples/6ba332596f5eb29660c90ab2d480e7dc.asciidoc b/docs/examples/6ba332596f5eb29660c90ab2d480e7dc.asciidoc index a24ccbc71..fbd0709e9 100644 --- a/docs/examples/6ba332596f5eb29660c90ab2d480e7dc.asciidoc +++ b/docs/examples/6ba332596f5eb29660c90ab2d480e7dc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-index-template-v1.asciidoc:183 +// indices/put-index-template-v1.asciidoc:189 [source, python] ---- diff --git a/docs/examples/6c70b022a8a74b887fe46e514feb38c0.asciidoc b/docs/examples/6c70b022a8a74b887fe46e514feb38c0.asciidoc index 4b7511daa..4ad22e7db 100644 --- a/docs/examples/6c70b022a8a74b887fe46e514feb38c0.asciidoc +++ b/docs/examples/6c70b022a8a74b887fe46e514feb38c0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/recovery.asciidoc:12 +// indices/recovery.asciidoc:18 [source, python] ---- diff --git a/docs/examples/6c72460570307f23478100db04a84c8e.asciidoc b/docs/examples/6c72460570307f23478100db04a84c8e.asciidoc index 883ddf576..be86726e6 100644 --- a/docs/examples/6c72460570307f23478100db04a84c8e.asciidoc +++ b/docs/examples/6c72460570307f23478100db04a84c8e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-component-template.asciidoc:86 +// indices/get-component-template.asciidoc:92 [source, python] ---- diff --git a/docs/examples/6c8bf6d4d68b7756f953be4c07655337.asciidoc b/docs/examples/6c8bf6d4d68b7756f953be4c07655337.asciidoc index 4b01b060e..522f1b320 100644 --- a/docs/examples/6c8bf6d4d68b7756f953be4c07655337.asciidoc +++ b/docs/examples/6c8bf6d4d68b7756f953be4c07655337.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/nodes-reload-secure-settings.asciidoc:63 +// cluster/nodes-reload-secure-settings.asciidoc:69 [source, python] ---- diff --git a/docs/examples/6c927313867647e0ef3cd3a37cb410cc.asciidoc b/docs/examples/6c927313867647e0ef3cd3a37cb410cc.asciidoc index 0c0e8d4e0..b34f7c02f 100644 --- a/docs/examples/6c927313867647e0ef3cd3a37cb410cc.asciidoc +++ b/docs/examples/6c927313867647e0ef3cd3a37cb410cc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/invalidate-api-keys.asciidoc:179 +// rest-api/security/invalidate-api-keys.asciidoc:185 [source, python] ---- diff --git a/docs/examples/6ce6cac9df216c52371c2e77e6e07ba1.asciidoc b/docs/examples/6ce6cac9df216c52371c2e77e6e07ba1.asciidoc index 7f5bcdd06..4458d2c85 100644 --- a/docs/examples/6ce6cac9df216c52371c2e77e6e07ba1.asciidoc +++ b/docs/examples/6ce6cac9df216c52371c2e77e6e07ba1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-rules/apis/put-query-ruleset.asciidoc:117 +// query-rules/apis/put-query-ruleset.asciidoc:123 [source, python] ---- diff --git a/docs/examples/6dd2a107bc64fd6f058fb17c21640649.asciidoc b/docs/examples/6dd2a107bc64fd6f058fb17c21640649.asciidoc index 4cf7cbec2..1ec99e3ef 100644 --- a/docs/examples/6dd2a107bc64fd6f058fb17c21640649.asciidoc +++ b/docs/examples/6dd2a107bc64fd6f058fb17c21640649.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/invalidate-tokens.asciidoc:210 +// rest-api/security/invalidate-tokens.asciidoc:216 [source, python] ---- diff --git a/docs/examples/6dd4c02fe3d6b800648a04d3e2d29fc1.asciidoc b/docs/examples/6dd4c02fe3d6b800648a04d3e2d29fc1.asciidoc index 7682dc8ef..17a4a4a4c 100644 --- a/docs/examples/6dd4c02fe3d6b800648a04d3e2d29fc1.asciidoc +++ b/docs/examples/6dd4c02fe3d6b800648a04d3e2d29fc1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/delete-snapshot-api.asciidoc:72 +// snapshot-restore/apis/delete-snapshot-api.asciidoc:78 [source, python] ---- diff --git a/docs/examples/6ddd4e657efbf45def430a6419825796.asciidoc b/docs/examples/6ddd4e657efbf45def430a6419825796.asciidoc index f3e419d1f..1742c8750 100644 --- a/docs/examples/6ddd4e657efbf45def430a6419825796.asciidoc +++ b/docs/examples/6ddd4e657efbf45def430a6419825796.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-azure-ai-studio.asciidoc:179 +// inference/service-azure-ai-studio.asciidoc:185 [source, python] ---- diff --git a/docs/examples/6e6b78e6b689a5d6aa637271b6d084e2.asciidoc b/docs/examples/6e6b78e6b689a5d6aa637271b6d084e2.asciidoc index 798e0b46a..f0612e3b4 100644 --- a/docs/examples/6e6b78e6b689a5d6aa637271b6d084e2.asciidoc +++ b/docs/examples/6e6b78e6b689a5d6aa637271b6d084e2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:328 +// search/retriever.asciidoc:340 [source, python] ---- diff --git a/docs/examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc b/docs/examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc index 0ffe6b433..4ffc08b07 100644 --- a/docs/examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc +++ b/docs/examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc @@ -1,9 +1,12 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/bulk-update-api-keys.asciidoc:296 +// rest-api/security/bulk-update-api-keys.asciidoc:302 [source, python] ---- -resp = client.security.bulk_update_api_keys( +resp = client.perform_request( + "POST", + "/_security/api_key/_bulk_update", + headers={"Content-Type": "application/json"}, body={ "ids": [ "VuaCfGcBCdbkQm-e5aOx", diff --git a/docs/examples/6f48ab7cbb8a4a46d0e9272c07166eaf.asciidoc b/docs/examples/6f48ab7cbb8a4a46d0e9272c07166eaf.asciidoc index 9942c771f..ae2cff12d 100644 --- a/docs/examples/6f48ab7cbb8a4a46d0e9272c07166eaf.asciidoc +++ b/docs/examples/6f48ab7cbb8a4a46d0e9272c07166eaf.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// sql/apis/sql-translate-api.asciidoc:12 +// sql/apis/sql-translate-api.asciidoc:18 [source, python] ---- diff --git a/docs/examples/6f4cbebfd6d2cee54aa3e7a86a755ef8.asciidoc b/docs/examples/6f4cbebfd6d2cee54aa3e7a86a755ef8.asciidoc index b3acd844e..76430e37d 100644 --- a/docs/examples/6f4cbebfd6d2cee54aa3e7a86a755ef8.asciidoc +++ b/docs/examples/6f4cbebfd6d2cee54aa3e7a86a755ef8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/knn-query.asciidoc:204 +// query-dsl/knn-query.asciidoc:210 [source, python] ---- diff --git a/docs/examples/6f855bc92b4cc6e6a63f95bce1cb4441.asciidoc b/docs/examples/6f855bc92b4cc6e6a63f95bce1cb4441.asciidoc index 139cdcd7b..7efd19751 100644 --- a/docs/examples/6f855bc92b4cc6e6a63f95bce1cb4441.asciidoc +++ b/docs/examples/6f855bc92b4cc6e6a63f95bce1cb4441.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/logstash/get-pipeline.asciidoc:69 +// rest-api/logstash/get-pipeline.asciidoc:75 [source, python] ---- diff --git a/docs/examples/f9ee5d55a73f4c1fe7d507609047aefd.asciidoc b/docs/examples/6fa02c2ad485bbe91f44b321158250f3.asciidoc similarity index 69% rename from docs/examples/f9ee5d55a73f4c1fe7d507609047aefd.asciidoc rename to docs/examples/6fa02c2ad485bbe91f44b321158250f3.asciidoc index d74dee4b3..f5ab1b8f5 100644 --- a/docs/examples/f9ee5d55a73f4c1fe7d507609047aefd.asciidoc +++ b/docs/examples/6fa02c2ad485bbe91f44b321158250f3.asciidoc @@ -16,6 +16,15 @@ resp = client.search( ] } }, + highlight={ + "fields": { + "my_field": { + "matched_fields": [ + "my_field._index_prefix" + ] + } + } + }, ) print(resp) ---- diff --git a/docs/examples/6fa570ae7039171e2ab722344ec1063f.asciidoc b/docs/examples/6fa570ae7039171e2ab722344ec1063f.asciidoc index 787c451f0..e71e7bd27 100644 --- a/docs/examples/6fa570ae7039171e2ab722344ec1063f.asciidoc +++ b/docs/examples/6fa570ae7039171e2ab722344ec1063f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-field-mapping.asciidoc:14 +// indices/get-field-mapping.asciidoc:20 [source, python] ---- diff --git a/docs/examples/6fc778e9a888b16b937c5c2a7a1ec140.asciidoc b/docs/examples/6fc778e9a888b16b937c5c2a7a1ec140.asciidoc index 3502d3a1a..509f9ce44 100644 --- a/docs/examples/6fc778e9a888b16b937c5c2a7a1ec140.asciidoc +++ b/docs/examples/6fc778e9a888b16b937c5c2a7a1ec140.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// searchable-snapshots/apis/clear-cache.asciidoc:69 +// searchable-snapshots/apis/clear-cache.asciidoc:75 [source, python] ---- diff --git a/docs/examples/6fe6c095c6995e0f2214f5f3bc85d74e.asciidoc b/docs/examples/6fe6c095c6995e0f2214f5f3bc85d74e.asciidoc index cba318af2..ea72bc7db 100644 --- a/docs/examples/6fe6c095c6995e0f2214f5f3bc85d74e.asciidoc +++ b/docs/examples/6fe6c095c6995e0f2214f5f3bc85d74e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/lifecycle/apis/delete-lifecycle.asciidoc:77 +// data-streams/lifecycle/apis/delete-lifecycle.asciidoc:83 [source, python] ---- diff --git a/docs/examples/701f1fffc65e9e51c96aa60261e2eae3.asciidoc b/docs/examples/701f1fffc65e9e51c96aa60261e2eae3.asciidoc index 46263db30..5505f8c93 100644 --- a/docs/examples/701f1fffc65e9e51c96aa60261e2eae3.asciidoc +++ b/docs/examples/701f1fffc65e9e51c96aa60261e2eae3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/update-cross-cluster-api-key.asciidoc:120 +// rest-api/security/update-cross-cluster-api-key.asciidoc:126 [source, python] ---- diff --git a/docs/examples/708e7ec681be41791f232817a07cda82.asciidoc b/docs/examples/708e7ec681be41791f232817a07cda82.asciidoc index 522239825..6fe563481 100644 --- a/docs/examples/708e7ec681be41791f232817a07cda82.asciidoc +++ b/docs/examples/708e7ec681be41791f232817a07cda82.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/get-snapshot-api.asciidoc:532 +// snapshot-restore/apis/get-snapshot-api.asciidoc:538 [source, python] ---- diff --git a/docs/examples/70c736ecb3746dbe839af0e468712805.asciidoc b/docs/examples/70c736ecb3746dbe839af0e468712805.asciidoc index 88e06c678..f30a7d193 100644 --- a/docs/examples/70c736ecb3746dbe839af0e468712805.asciidoc +++ b/docs/examples/70c736ecb3746dbe839af0e468712805.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// transform/apis/delete-transform.asciidoc:53 +// transform/apis/delete-transform.asciidoc:59 [source, python] ---- diff --git a/docs/examples/70cc66bf4054ebf0ad4955cb99d9ab80.asciidoc b/docs/examples/70cc66bf4054ebf0ad4955cb99d9ab80.asciidoc index 6382cbc78..83f65cf1b 100644 --- a/docs/examples/70cc66bf4054ebf0ad4955cb99d9ab80.asciidoc +++ b/docs/examples/70cc66bf4054ebf0ad4955cb99d9ab80.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/update-trained-model-deployment.asciidoc:74 +// ml/trained-models/apis/update-trained-model-deployment.asciidoc:80 [source, python] ---- diff --git a/docs/examples/7106e6317e6368b9863cf64df9c6f0c9.asciidoc b/docs/examples/7106e6317e6368b9863cf64df9c6f0c9.asciidoc index 6e7c9ce55..f24bc51be 100644 --- a/docs/examples/7106e6317e6368b9863cf64df9c6f0c9.asciidoc +++ b/docs/examples/7106e6317e6368b9863cf64df9c6f0c9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// transform/apis/put-transform.asciidoc:378 +// transform/apis/put-transform.asciidoc:384 [source, python] ---- diff --git a/docs/examples/71c629c44bf3c542a0daacbfc253c4b0.asciidoc b/docs/examples/71c629c44bf3c542a0daacbfc253c4b0.asciidoc index bd0b9c3ca..aafa33337 100644 --- a/docs/examples/71c629c44bf3c542a0daacbfc253c4b0.asciidoc +++ b/docs/examples/71c629c44bf3c542a0daacbfc253c4b0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/stats.asciidoc:1895 +// cluster/stats.asciidoc:1907 [source, python] ---- diff --git a/docs/examples/71fa652ddea811eb3c8bf8c5db21e549.asciidoc b/docs/examples/71fa652ddea811eb3c8bf8c5db21e549.asciidoc index 94f43f795..c0932dc5d 100644 --- a/docs/examples/71fa652ddea811eb3c8bf8c5db21e549.asciidoc +++ b/docs/examples/71fa652ddea811eb3c8bf8c5db21e549.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/analyze.asciidoc:224 +// indices/analyze.asciidoc:230 [source, python] ---- diff --git a/docs/examples/72bae0252b74ff6fd9f0702ff008d84a.asciidoc b/docs/examples/72bae0252b74ff6fd9f0702ff008d84a.asciidoc index 733bae91a..676bf2d62 100644 --- a/docs/examples/72bae0252b74ff6fd9f0702ff008d84a.asciidoc +++ b/docs/examples/72bae0252b74ff6fd9f0702ff008d84a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/get-snapshot-api.asciidoc:664 +// snapshot-restore/apis/get-snapshot-api.asciidoc:670 [source, python] ---- diff --git a/docs/examples/72beebe779a258c225dee7b023e60c52.asciidoc b/docs/examples/72beebe779a258c225dee7b023e60c52.asciidoc index 4b0eeca27..1863afd2c 100644 --- a/docs/examples/72beebe779a258c225dee7b023e60c52.asciidoc +++ b/docs/examples/72beebe779a258c225dee7b023e60c52.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/point-in-time-api.asciidoc:146 +// search/point-in-time-api.asciidoc:152 [source, python] ---- diff --git a/docs/examples/730045fae3743c39b612813a42c330c3.asciidoc b/docs/examples/730045fae3743c39b612813a42c330c3.asciidoc new file mode 100644 index 000000000..646e4225f --- /dev/null +++ b/docs/examples/730045fae3743c39b612813a42c330c3.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// mapping/params/index-prefixes.asciidoc:64 + +[source, python] +---- +resp = client.search( + index="my-index-000001", + query={ + "prefix": { + "full_name": { + "value": "ki" + } + } + }, + highlight={ + "fields": { + "full_name": { + "matched_fields": [ + "full_name._index_prefix" + ] + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/73646c12ad33a813ab2280f1dc83500e.asciidoc b/docs/examples/73646c12ad33a813ab2280f1dc83500e.asciidoc index 3ca8489e1..61020a145 100644 --- a/docs/examples/73646c12ad33a813ab2280f1dc83500e.asciidoc +++ b/docs/examples/73646c12ad33a813ab2280f1dc83500e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ccr/apis/follow/put-follow.asciidoc:24 +// ccr/apis/follow/put-follow.asciidoc:30 [source, python] ---- diff --git a/docs/examples/73b07b24ab2c4cd304a57f9cbda8b863.asciidoc b/docs/examples/73b07b24ab2c4cd304a57f9cbda8b863.asciidoc index 6fe97c17f..c0a9ee6d8 100644 --- a/docs/examples/73b07b24ab2c4cd304a57f9cbda8b863.asciidoc +++ b/docs/examples/73b07b24ab2c4cd304a57f9cbda8b863.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// behavioral-analytics/apis/list-analytics-collection.asciidoc:60 +// behavioral-analytics/apis/list-analytics-collection.asciidoc:66 [source, python] ---- diff --git a/docs/examples/73d1a6c5ef90b7e35d43a0bfdc1e158d.asciidoc b/docs/examples/73d1a6c5ef90b7e35d43a0bfdc1e158d.asciidoc index adff218d6..41b4fc221 100644 --- a/docs/examples/73d1a6c5ef90b7e35d43a0bfdc1e158d.asciidoc +++ b/docs/examples/73d1a6c5ef90b7e35d43a0bfdc1e158d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/rollup-index-caps.asciidoc:88 +// rollup/apis/rollup-index-caps.asciidoc:94 [source, python] ---- diff --git a/docs/examples/73ebc89cb32adb389ae16bb088d7c7e6.asciidoc b/docs/examples/73ebc89cb32adb389ae16bb088d7c7e6.asciidoc index 54424173c..524e6230b 100644 --- a/docs/examples/73ebc89cb32adb389ae16bb088d7c7e6.asciidoc +++ b/docs/examples/73ebc89cb32adb389ae16bb088d7c7e6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:84 +// troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:242 [source, python] ---- diff --git a/docs/examples/73fa0d6d03cd98ea538fff9e89d99eed.asciidoc b/docs/examples/73fa0d6d03cd98ea538fff9e89d99eed.asciidoc index 7a44e748a..1a87f8842 100644 --- a/docs/examples/73fa0d6d03cd98ea538fff9e89d99eed.asciidoc +++ b/docs/examples/73fa0d6d03cd98ea538fff9e89d99eed.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-service-accounts.asciidoc:57 +// rest-api/security/get-service-accounts.asciidoc:63 [source, python] ---- diff --git a/docs/examples/741180473ba526219578ad0422f4fe81.asciidoc b/docs/examples/741180473ba526219578ad0422f4fe81.asciidoc index 328eb727d..462e68e4d 100644 --- a/docs/examples/741180473ba526219578ad0422f4fe81.asciidoc +++ b/docs/examples/741180473ba526219578ad0422f4fe81.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-features-api.asciidoc:91 +// connector/apis/update-connector-features-api.asciidoc:97 [source, python] ---- diff --git a/docs/examples/7429b16221fe741fd31b0584786dd0b0.asciidoc b/docs/examples/7429b16221fe741fd31b0584786dd0b0.asciidoc index e9d165184..9e552ae3c 100644 --- a/docs/examples/7429b16221fe741fd31b0584786dd0b0.asciidoc +++ b/docs/examples/7429b16221fe741fd31b0584786dd0b0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/post-inference.asciidoc:243 +// inference/post-inference.asciidoc:249 [source, python] ---- diff --git a/docs/examples/7471e97aaaf21c3a200abdd89f15c3cc.asciidoc b/docs/examples/7471e97aaaf21c3a200abdd89f15c3cc.asciidoc index 471f96714..8db72beed 100644 --- a/docs/examples/7471e97aaaf21c3a200abdd89f15c3cc.asciidoc +++ b/docs/examples/7471e97aaaf21c3a200abdd89f15c3cc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/intervals-query.asciidoc:383 +// query-dsl/intervals-query.asciidoc:393 [source, python] ---- diff --git a/docs/examples/7478ff69113fb53f41ea07cdf911fa67.asciidoc b/docs/examples/7478ff69113fb53f41ea07cdf911fa67.asciidoc new file mode 100644 index 000000000..4fce43b61 --- /dev/null +++ b/docs/examples/7478ff69113fb53f41ea07cdf911fa67.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/aggs-tutorial.asciidoc:1343 + +[source, python] +---- +resp = client.search( + index="kibana_sample_data_ecommerce", + size=0, + aggs={ + "daily_sales": { + "date_histogram": { + "field": "order_date", + "calendar_interval": "day" + }, + "aggs": { + "daily_revenue": { + "sum": { + "field": "taxful_total_price" + } + }, + "smoothed_revenue": { + "moving_fn": { + "buckets_path": "daily_revenue", + "window": 3, + "script": "MovingFunctions.unweightedAvg(values)" + } + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/74a80c28737a0648db0dfe7f049d12f2.asciidoc b/docs/examples/74a80c28737a0648db0dfe7f049d12f2.asciidoc index c94c2b279..604fd2449 100644 --- a/docs/examples/74a80c28737a0648db0dfe7f049d12f2.asciidoc +++ b/docs/examples/74a80c28737a0648db0dfe7f049d12f2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/get.asciidoc:272 +// docs/get.asciidoc:278 [source, python] ---- diff --git a/docs/examples/74b229a6e020113e5749099451979c89.asciidoc b/docs/examples/74b229a6e020113e5749099451979c89.asciidoc deleted file mode 100644 index d4560e688..000000000 --- a/docs/examples/74b229a6e020113e5749099451979c89.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// query-dsl/semantic-query.asciidoc:172 - -[source, python] ----- -resp = client.search( - index="test-index", - query={ - "nested": { - "path": "inference_field.inference.chunks", - "query": { - "knn": { - "field": "inference_field.inference.chunks.embeddings", - "query_vector_builder": { - "text_embedding": { - "model_id": "my_inference_id", - "model_text": "mountain lake" - } - } - } - } - } - }, -) -print(resp) ----- diff --git a/docs/examples/74da377bccad43da2b0e276c086d26ba.asciidoc b/docs/examples/74da377bccad43da2b0e276c086d26ba.asciidoc index 731c683e3..7607134c9 100644 --- a/docs/examples/74da377bccad43da2b0e276c086d26ba.asciidoc +++ b/docs/examples/74da377bccad43da2b0e276c086d26ba.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/cluster-info.asciidoc:382 +// cluster/cluster-info.asciidoc:388 [source, python] ---- diff --git a/docs/examples/750ac969f9a05567f5cdf4f93d6244b6.asciidoc b/docs/examples/750ac969f9a05567f5cdf4f93d6244b6.asciidoc index 455c5f508..7e47cc7c7 100644 --- a/docs/examples/750ac969f9a05567f5cdf4f93d6244b6.asciidoc +++ b/docs/examples/750ac969f9a05567f5cdf4f93d6244b6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:248 +// troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:281 [source, python] ---- diff --git a/docs/examples/75957a7d1b67e3d47899c5f18b32cb61.asciidoc b/docs/examples/75957a7d1b67e3d47899c5f18b32cb61.asciidoc index c6b6cad6c..4befbcd95 100644 --- a/docs/examples/75957a7d1b67e3d47899c5f18b32cb61.asciidoc +++ b/docs/examples/75957a7d1b67e3d47899c5f18b32cb61.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/close-job.asciidoc:99 +// ml/anomaly-detection/apis/close-job.asciidoc:105 [source, python] ---- diff --git a/docs/examples/75aba7b1d3a22dce62f26b8b1e6bee58.asciidoc b/docs/examples/75aba7b1d3a22dce62f26b8b1e6bee58.asciidoc index d603f109b..30dd30499 100644 --- a/docs/examples/75aba7b1d3a22dce62f26b8b1e6bee58.asciidoc +++ b/docs/examples/75aba7b1d3a22dce62f26b8b1e6bee58.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/validate.asciidoc:167 +// search/validate.asciidoc:173 [source, python] ---- diff --git a/docs/examples/75e6d66e94e61bd8a555beaaee255c36.asciidoc b/docs/examples/75e6d66e94e61bd8a555beaaee255c36.asciidoc index b8e011dc5..f1e5b8201 100644 --- a/docs/examples/75e6d66e94e61bd8a555beaaee255c36.asciidoc +++ b/docs/examples/75e6d66e94e61bd8a555beaaee255c36.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/rollup-search.asciidoc:170 +// rollup/apis/rollup-search.asciidoc:176 [source, python] ---- diff --git a/docs/examples/76bc87c2592864152768687c2963d1d1.asciidoc b/docs/examples/76bc87c2592864152768687c2963d1d1.asciidoc index 204b2d14b..f0da20a0f 100644 --- a/docs/examples/76bc87c2592864152768687c2963d1d1.asciidoc +++ b/docs/examples/76bc87c2592864152768687c2963d1d1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/update-api-key.asciidoc:148 +// rest-api/security/update-api-key.asciidoc:154 [source, python] ---- diff --git a/docs/examples/76c167d8ab305cb43b594f140c902dfe.asciidoc b/docs/examples/76c167d8ab305cb43b594f140c902dfe.asciidoc index 9a6d854d8..ffc545fa3 100644 --- a/docs/examples/76c167d8ab305cb43b594f140c902dfe.asciidoc +++ b/docs/examples/76c167d8ab305cb43b594f140c902dfe.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/shrink-index.asciidoc:162 +// indices/shrink-index.asciidoc:168 [source, python] ---- diff --git a/docs/examples/76dbdd0b2bd48c3c6b1a8d81e23bafd6.asciidoc b/docs/examples/76dbdd0b2bd48c3c6b1a8d81e23bafd6.asciidoc index 88bbfb22e..954224106 100644 --- a/docs/examples/76dbdd0b2bd48c3c6b1a8d81e23bafd6.asciidoc +++ b/docs/examples/76dbdd0b2bd48c3c6b1a8d81e23bafd6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/analyze.asciidoc:143 +// indices/analyze.asciidoc:149 [source, python] ---- diff --git a/docs/examples/76e02434835630cb830724beb92df354.asciidoc b/docs/examples/76e02434835630cb830724beb92df354.asciidoc new file mode 100644 index 000000000..886da7d4d --- /dev/null +++ b/docs/examples/76e02434835630cb830724beb92df354.asciidoc @@ -0,0 +1,47 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/retrievers-examples.asciidoc:1183 + +[source, python] +---- +resp = client.search( + index="retrievers_example", + retriever={ + "rrf": { + "retrievers": [ + { + "knn": { + "field": "vector", + "query_vector": [ + 0.23, + 0.67, + 0.89 + ], + "k": 3, + "num_candidates": 5 + } + }, + { + "text_similarity_reranker": { + "retriever": { + "standard": { + "query": { + "term": { + "topic": "ai" + } + } + } + }, + "field": "text", + "inference_id": "my-rerank-model", + "inference_text": "Can I use generative AI to identify user intent and improve search relevance?" + } + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + }, + source=False, +) +print(resp) +---- diff --git a/docs/examples/7709a48020a6cefbbe547fb944541cdb.asciidoc b/docs/examples/7709a48020a6cefbbe547fb944541cdb.asciidoc index 7e83448bc..00ea6fa58 100644 --- a/docs/examples/7709a48020a6cefbbe547fb944541cdb.asciidoc +++ b/docs/examples/7709a48020a6cefbbe547fb944541cdb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/dense-vector.asciidoc:423 +// mapping/types/dense-vector.asciidoc:421 [source, python] ---- diff --git a/docs/examples/77447e2966708e92f5e219d43ac3f00d.asciidoc b/docs/examples/77447e2966708e92f5e219d43ac3f00d.asciidoc index 10b268ae4..075cb366a 100644 --- a/docs/examples/77447e2966708e92f5e219d43ac3f00d.asciidoc +++ b/docs/examples/77447e2966708e92f5e219d43ac3f00d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/tasks.asciidoc:226 +// cluster/tasks.asciidoc:232 [source, python] ---- diff --git a/docs/examples/774bfde8793dc4927f7cad2dd91c5b5f.asciidoc b/docs/examples/774bfde8793dc4927f7cad2dd91c5b5f.asciidoc index 40889a7d3..980ce422b 100644 --- a/docs/examples/774bfde8793dc4927f7cad2dd91c5b5f.asciidoc +++ b/docs/examples/774bfde8793dc4927f7cad2dd91c5b5f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/multi-search-template-api.asciidoc:38 +// search/multi-search-template-api.asciidoc:44 [source, python] ---- diff --git a/docs/examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc b/docs/examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc index 12661f21d..f62ff2428 100644 --- a/docs/examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc +++ b/docs/examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc @@ -1,9 +1,12 @@ // This file is autogenerated, DO NOT EDIT -// text-structure/apis/find-message-structure.asciidoc:87 +// text-structure/apis/find-message-structure.asciidoc:93 [source, python] ---- -resp = client.text_structure.find_message_structure( +resp = client.perform_request( + "POST", + "/_text_structure/find_message_structure", + headers={"Content-Type": "application/json"}, body={ "messages": [ "[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128", diff --git a/docs/examples/7752b677825523bfb0c38ad9325a6d47.asciidoc b/docs/examples/7752b677825523bfb0c38ad9325a6d47.asciidoc index 75baf555d..1fb038498 100644 --- a/docs/examples/7752b677825523bfb0c38ad9325a6d47.asciidoc +++ b/docs/examples/7752b677825523bfb0c38ad9325a6d47.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/delete-connector-api.asciidoc:70 +// connector/apis/delete-connector-api.asciidoc:79 [source, python] ---- diff --git a/docs/examples/776b553df0e507c96dbdbaedecaca0cc.asciidoc b/docs/examples/776b553df0e507c96dbdbaedecaca0cc.asciidoc index 95f751ddd..e6f55debc 100644 --- a/docs/examples/776b553df0e507c96dbdbaedecaca0cc.asciidoc +++ b/docs/examples/776b553df0e507c96dbdbaedecaca0cc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/infer-trained-model.asciidoc:981 +// ml/trained-models/apis/infer-trained-model.asciidoc:987 [source, python] ---- diff --git a/docs/examples/7781b13b0ffff6026d10c4e3ab4a3a51.asciidoc b/docs/examples/7781b13b0ffff6026d10c4e3ab4a3a51.asciidoc index ed9f75c24..fdb30e0d9 100644 --- a/docs/examples/7781b13b0ffff6026d10c4e3ab4a3a51.asciidoc +++ b/docs/examples/7781b13b0ffff6026d10c4e3ab4a3a51.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// behavioral-analytics/apis/put-analytics-collection.asciidoc:49 +// behavioral-analytics/apis/put-analytics-collection.asciidoc:55 [source, python] ---- diff --git a/docs/examples/77b90f6787195767b6da60d8532714b4.asciidoc b/docs/examples/77b90f6787195767b6da60d8532714b4.asciidoc index 8e749b938..62e3132f7 100644 --- a/docs/examples/77b90f6787195767b6da60d8532714b4.asciidoc +++ b/docs/examples/77b90f6787195767b6da60d8532714b4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-azure-openai.asciidoc:141 +// inference/service-azure-openai.asciidoc:147 [source, python] ---- diff --git a/docs/examples/77c099c97ea6911e2dd6e996da7dcca0.asciidoc b/docs/examples/77c099c97ea6911e2dd6e996da7dcca0.asciidoc index 5536af5cf..c68675eb1 100644 --- a/docs/examples/77c099c97ea6911e2dd6e996da7dcca0.asciidoc +++ b/docs/examples/77c099c97ea6911e2dd6e996da7dcca0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/nodes-hot-threads.asciidoc:72 +// cluster/nodes-hot-threads.asciidoc:78 [source, python] ---- diff --git a/docs/examples/77c50f982906718ecc59aa708aed728f.asciidoc b/docs/examples/77c50f982906718ecc59aa708aed728f.asciidoc index 498e9f892..e36b4be9a 100644 --- a/docs/examples/77c50f982906718ecc59aa708aed728f.asciidoc +++ b/docs/examples/77c50f982906718ecc59aa708aed728f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// scripting/using.asciidoc:294 +// scripting/using.asciidoc:299 [source, python] ---- diff --git a/docs/examples/77ca1a3193f75651e0bf9e8fe5227a04.asciidoc b/docs/examples/77ca1a3193f75651e0bf9e8fe5227a04.asciidoc index 3800f0b25..53ab5fcec 100644 --- a/docs/examples/77ca1a3193f75651e0bf9e8fe5227a04.asciidoc +++ b/docs/examples/77ca1a3193f75651e0bf9e8fe5227a04.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/get-job-model-snapshot-upgrade-stats.asciidoc:121 +// ml/anomaly-detection/apis/get-job-model-snapshot-upgrade-stats.asciidoc:127 [source, python] ---- diff --git a/docs/examples/9d47f02a063444da9f098858a1830d28.asciidoc b/docs/examples/77cebba946fe648873a1e7375c13df41.asciidoc similarity index 60% rename from docs/examples/9d47f02a063444da9f098858a1830d28.asciidoc rename to docs/examples/77cebba946fe648873a1e7375c13df41.asciidoc index 0cef3692d..85c688fad 100644 --- a/docs/examples/9d47f02a063444da9f098858a1830d28.asciidoc +++ b/docs/examples/77cebba946fe648873a1e7375c13df41.asciidoc @@ -1,11 +1,12 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:210 +// troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:215 [source, python] ---- resp = client.cluster.put_settings( persistent={ - "cluster.routing.allocation.disk.watermark.low": "30gb" + "cluster.routing.allocation.disk.watermark.low": "90%", + "cluster.routing.allocation.disk.watermark.high": "95%" }, ) print(resp) diff --git a/docs/examples/77e3dcd87d2b2c8e6ec842462b02df1f.asciidoc b/docs/examples/77e3dcd87d2b2c8e6ec842462b02df1f.asciidoc index 80b434a2f..3459e379e 100644 --- a/docs/examples/77e3dcd87d2b2c8e6ec842462b02df1f.asciidoc +++ b/docs/examples/77e3dcd87d2b2c8e6ec842462b02df1f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/clone-index.asciidoc:10 +// indices/clone-index.asciidoc:16 [source, python] ---- diff --git a/docs/examples/78043831fd32004a82930c8ac8a1d809.asciidoc b/docs/examples/78043831fd32004a82930c8ac8a1d809.asciidoc new file mode 100644 index 000000000..f2b32c69a --- /dev/null +++ b/docs/examples/78043831fd32004a82930c8ac8a1d809.asciidoc @@ -0,0 +1,48 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/retrievers-examples.asciidoc:1128 + +[source, python] +---- +resp = client.search( + index="retrievers_example", + retriever={ + "text_similarity_reranker": { + "retriever": { + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "query_string": { + "query": "(information retrieval) OR (artificial intelligence)", + "default_field": "text" + } + } + } + }, + { + "knn": { + "field": "vector", + "query_vector": [ + 0.23, + 0.67, + 0.89 + ], + "k": 3, + "num_candidates": 5 + } + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + }, + "field": "text", + "inference_id": "my-rerank-model", + "inference_text": "What are the state of the art applications of AI in information retrieval?" + } + }, + source=False, +) +print(resp) +---- diff --git a/docs/examples/78176cd6f570e1534bb40b19e6e900b6.asciidoc b/docs/examples/78176cd6f570e1534bb40b19e6e900b6.asciidoc index e0ae96d2b..2fd9fd572 100644 --- a/docs/examples/78176cd6f570e1534bb40b19e6e900b6.asciidoc +++ b/docs/examples/78176cd6f570e1534bb40b19e6e900b6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/alias.asciidoc:85 +// cat/alias.asciidoc:93 [source, python] ---- diff --git a/docs/examples/78c4035e4fbf6851140660f6ed2a1fa5.asciidoc b/docs/examples/78c4035e4fbf6851140660f6ed2a1fa5.asciidoc index b980e41a9..130322ecc 100644 --- a/docs/examples/78c4035e4fbf6851140660f6ed2a1fa5.asciidoc +++ b/docs/examples/78c4035e4fbf6851140660f6ed2a1fa5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/stats.asciidoc:115 +// indices/stats.asciidoc:121 [source, python] ---- diff --git a/docs/examples/78c96113ae4ed0054e581b17542528a7.asciidoc b/docs/examples/78c96113ae4ed0054e581b17542528a7.asciidoc index d7e332f84..65574f3f8 100644 --- a/docs/examples/78c96113ae4ed0054e581b17542528a7.asciidoc +++ b/docs/examples/78c96113ae4ed0054e581b17542528a7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/reindex.asciidoc:403 +// docs/reindex.asciidoc:409 [source, python] ---- diff --git a/docs/examples/790684b45bef2bb848ea932f0fd0cfbd.asciidoc b/docs/examples/790684b45bef2bb848ea932f0fd0cfbd.asciidoc new file mode 100644 index 000000000..1ab1bd218 --- /dev/null +++ b/docs/examples/790684b45bef2bb848ea932f0fd0cfbd.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// query-dsl/intervals-query.asciidoc:539 + +[source, python] +---- +resp = client.search( + query={ + "intervals": { + "my_text": { + "all_of": { + "ordered": False, + "max_gaps": 1, + "intervals": [ + { + "match": { + "query": "my favorite food", + "max_gaps": 0, + "ordered": True + } + }, + { + "match": { + "query": "cold porridge", + "max_gaps": 4, + "ordered": True + } + } + ] + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/8cad5d95a0e7c103f08be53d0b172558.asciidoc b/docs/examples/79d206a528be704050a437adce2496dd.asciidoc similarity index 53% rename from docs/examples/8cad5d95a0e7c103f08be53d0b172558.asciidoc rename to docs/examples/79d206a528be704050a437adce2496dd.asciidoc index d27833994..25b3aae32 100644 --- a/docs/examples/8cad5d95a0e7c103f08be53d0b172558.asciidoc +++ b/docs/examples/79d206a528be704050a437adce2496dd.asciidoc @@ -1,20 +1,21 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/semantic-text-hybrid-search:20 +// search/retriever.asciidoc:584 [source, python] ---- resp = client.inference.put( - task_type="sparse_embedding", - inference_id="my-elser-endpoint", + task_type="rerank", + inference_id="my-elastic-rerank", inference_config={ - "service": "elser", + "service": "elasticsearch", "service_settings": { + "model_id": ".rerank-v1", + "num_threads": 1, "adaptive_allocations": { "enabled": True, - "min_number_of_allocations": 3, + "min_number_of_allocations": 1, "max_number_of_allocations": 10 - }, - "num_threads": 1 + } } }, ) diff --git a/docs/examples/79f33e05b203eb46eef7958fbc95ef77.asciidoc b/docs/examples/79f33e05b203eb46eef7958fbc95ef77.asciidoc index 01da4fc57..8ddda044a 100644 --- a/docs/examples/79f33e05b203eb46eef7958fbc95ef77.asciidoc +++ b/docs/examples/79f33e05b203eb46eef7958fbc95ef77.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc:87 +// ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc:93 [source, python] ---- diff --git a/docs/examples/7a0c633a67244e9703344d036e584d95.asciidoc b/docs/examples/7a0c633a67244e9703344d036e584d95.asciidoc index db177d476..d7325bc54 100644 --- a/docs/examples/7a0c633a67244e9703344d036e584d95.asciidoc +++ b/docs/examples/7a0c633a67244e9703344d036e584d95.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/enable-user-profile.asciidoc:54 +// rest-api/security/enable-user-profile.asciidoc:60 [source, python] ---- diff --git a/docs/examples/7a23a385a63c87cab58fd494870450fd.asciidoc b/docs/examples/7a23a385a63c87cab58fd494870450fd.asciidoc index c38503fb5..4116be624 100644 --- a/docs/examples/7a23a385a63c87cab58fd494870450fd.asciidoc +++ b/docs/examples/7a23a385a63c87cab58fd494870450fd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/create-role-mappings.asciidoc:175 +// rest-api/security/create-role-mappings.asciidoc:181 [source, python] ---- diff --git a/docs/examples/58f6b72009512851843c7b7a20e9504a.asciidoc b/docs/examples/7a2fdfd7b0553d63440af7598f9ad867.asciidoc similarity index 84% rename from docs/examples/58f6b72009512851843c7b7a20e9504a.asciidoc rename to docs/examples/7a2fdfd7b0553d63440af7598f9ad867.asciidoc index 3fd25391d..b6a6cc46d 100644 --- a/docs/examples/58f6b72009512851843c7b7a20e9504a.asciidoc +++ b/docs/examples/7a2fdfd7b0553d63440af7598f9ad867.asciidoc @@ -1,10 +1,10 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/semantic-text.asciidoc:42 +// mapping/types/semantic-text.asciidoc:63 [source, python] ---- resp = client.indices.create( - index="my-index-000002", + index="my-index-000003", mappings={ "properties": { "inference_field": { diff --git a/docs/examples/7a3a7fbd81e5050b42e8c1eca26c7c1d.asciidoc b/docs/examples/7a3a7fbd81e5050b42e8c1eca26c7c1d.asciidoc index a1990bd3f..60b088761 100644 --- a/docs/examples/7a3a7fbd81e5050b42e8c1eca26c7c1d.asciidoc +++ b/docs/examples/7a3a7fbd81e5050b42e8c1eca26c7c1d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/async-search.asciidoc:334 +// search/async-search.asciidoc:340 [source, python] ---- diff --git a/docs/examples/7a8de5606f283f4ef171b015eef6befa.asciidoc b/docs/examples/7a8de5606f283f4ef171b015eef6befa.asciidoc index 3c62552a4..f35c65c58 100644 --- a/docs/examples/7a8de5606f283f4ef171b015eef6befa.asciidoc +++ b/docs/examples/7a8de5606f283f4ef171b015eef6befa.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/stats.asciidoc:143 +// indices/stats.asciidoc:149 [source, python] ---- diff --git a/docs/examples/7ae434b3667c589a8e70fe560f4ee3f9.asciidoc b/docs/examples/7ae434b3667c589a8e70fe560f4ee3f9.asciidoc index 782dd3688..a094945d5 100644 --- a/docs/examples/7ae434b3667c589a8e70fe560f4ee3f9.asciidoc +++ b/docs/examples/7ae434b3667c589a8e70fe560f4ee3f9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update-by-query.asciidoc:12 +// docs/update-by-query.asciidoc:18 [source, python] ---- diff --git a/docs/examples/7b5c231526846f2f7b98d78f3656ae6a.asciidoc b/docs/examples/7b5c231526846f2f7b98d78f3656ae6a.asciidoc index 66cd93a86..62de86215 100644 --- a/docs/examples/7b5c231526846f2f7b98d78f3656ae6a.asciidoc +++ b/docs/examples/7b5c231526846f2f7b98d78f3656ae6a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update.asciidoc:362 +// docs/update.asciidoc:368 [source, python] ---- diff --git a/docs/examples/7b864d61767ab283cfd5f9b9ba784b1f.asciidoc b/docs/examples/7b864d61767ab283cfd5f9b9ba784b1f.asciidoc index 510099f4a..fbda20ffb 100644 --- a/docs/examples/7b864d61767ab283cfd5f9b9ba784b1f.asciidoc +++ b/docs/examples/7b864d61767ab283cfd5f9b9ba784b1f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-api-keys.asciidoc:201 +// rest-api/security/get-api-keys.asciidoc:207 [source, python] ---- diff --git a/docs/examples/7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc b/docs/examples/7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc new file mode 100644 index 000000000..1ed294e9c --- /dev/null +++ b/docs/examples/7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// ingest/apis/delete-ip-location-database.asciidoc:16 + +[source, python] +---- +resp = client.perform_request( + "DELETE", + "/_ingest/ip_location/database/my-database-id", +) +print(resp) +---- diff --git a/docs/examples/8593715fcc70315a0816b435551258e0.asciidoc b/docs/examples/7bdc283b96c7a965fae23013647b8578.asciidoc similarity index 79% rename from docs/examples/8593715fcc70315a0816b435551258e0.asciidoc rename to docs/examples/7bdc283b96c7a965fae23013647b8578.asciidoc index 743b90211..b71f4fa2e 100644 --- a/docs/examples/8593715fcc70315a0816b435551258e0.asciidoc +++ b/docs/examples/7bdc283b96c7a965fae23013647b8578.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/semantic-text.asciidoc:185 +// mapping/types/semantic-text.asciidoc:194 [source, python] ---- @@ -7,13 +7,13 @@ resp = client.indices.create( index="test-index", mappings={ "properties": { - "infer_field": { - "type": "semantic_text", - "inference_id": "my-elser-endpoint" - }, "source_field": { "type": "text", "copy_to": "infer_field" + }, + "infer_field": { + "type": "semantic_text", + "inference_id": ".elser-2-elasticsearch" } } }, diff --git a/docs/examples/7cd23457e220c8b64c5b0041d2acc27a.asciidoc b/docs/examples/7cd23457e220c8b64c5b0041d2acc27a.asciidoc index 6685a274c..261211313 100644 --- a/docs/examples/7cd23457e220c8b64c5b0041d2acc27a.asciidoc +++ b/docs/examples/7cd23457e220c8b64c5b0041d2acc27a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// setup/advanced-configuration.asciidoc:124 +// setup/advanced-configuration.asciidoc:123 [source, python] ---- diff --git a/docs/examples/7cd3d8388c51a9f6ee3f730cdaddbb89.asciidoc b/docs/examples/7cd3d8388c51a9f6ee3f730cdaddbb89.asciidoc index 0bbe1f915..d872da8bc 100644 --- a/docs/examples/7cd3d8388c51a9f6ee3f730cdaddbb89.asciidoc +++ b/docs/examples/7cd3d8388c51a9f6ee3f730cdaddbb89.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/update-settings.asciidoc:91 +// indices/update-settings.asciidoc:97 [source, python] ---- diff --git a/docs/examples/7d880157a95f64ad339225d4af71c2de.asciidoc b/docs/examples/7d880157a95f64ad339225d4af71c2de.asciidoc index da85d0ee3..ad48c8c12 100644 --- a/docs/examples/7d880157a95f64ad339225d4af71c2de.asciidoc +++ b/docs/examples/7d880157a95f64ad339225d4af71c2de.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/suggest-user-profile.asciidoc:99 +// rest-api/security/suggest-user-profile.asciidoc:105 [source, python] ---- diff --git a/docs/examples/7dabae9b37d2cbd724f2a069be9e753b.asciidoc b/docs/examples/7dabae9b37d2cbd724f2a069be9e753b.asciidoc index 0c8ed226f..46f020739 100644 --- a/docs/examples/7dabae9b37d2cbd724f2a069be9e753b.asciidoc +++ b/docs/examples/7dabae9b37d2cbd724f2a069be9e753b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/reset-job.asciidoc:73 +// ml/anomaly-detection/apis/reset-job.asciidoc:79 [source, python] ---- diff --git a/docs/examples/7daff6b7e668ab8a762b8ab5dff7a167.asciidoc b/docs/examples/7daff6b7e668ab8a762b8ab5dff7a167.asciidoc index 70e4049e8..9b796b318 100644 --- a/docs/examples/7daff6b7e668ab8a762b8ab5dff7a167.asciidoc +++ b/docs/examples/7daff6b7e668ab8a762b8ab5dff7a167.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/sparse-vector-query.asciidoc:255 +// query-dsl/sparse-vector-query.asciidoc:257 [source, python] ---- diff --git a/docs/examples/7dd0d9cc6c5982a2c003d301e90feeba.asciidoc b/docs/examples/7dd0d9cc6c5982a2c003d301e90feeba.asciidoc new file mode 100644 index 000000000..4a8fcc25b --- /dev/null +++ b/docs/examples/7dd0d9cc6c5982a2c003d301e90feeba.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/aggs-tutorial.asciidoc:824 + +[source, python] +---- +resp = client.search( + index="kibana_sample_data_ecommerce", + size=0, + aggs={ + "daily_sales": { + "date_histogram": { + "field": "order_date", + "calendar_interval": "day", + "format": "yyyy-MM-dd" + }, + "aggs": { + "revenue": { + "sum": { + "field": "taxful_total_price" + } + }, + "unique_customers": { + "cardinality": { + "field": "customer_id" + } + }, + "avg_basket_size": { + "avg": { + "field": "total_quantity" + } + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/7de7e647c1c9cbe0a1df0d104fc0a947.asciidoc b/docs/examples/7de7e647c1c9cbe0a1df0d104fc0a947.asciidoc index 493a2bbd7..c5759f3ec 100644 --- a/docs/examples/7de7e647c1c9cbe0a1df0d104fc0a947.asciidoc +++ b/docs/examples/7de7e647c1c9cbe0a1df0d104fc0a947.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/repository-s3.asciidoc:20 +// snapshot-restore/repository-s3.asciidoc:23 [source, python] ---- diff --git a/docs/examples/7df191cc7f814e410a4ac7261065e6ef.asciidoc b/docs/examples/7df191cc7f814e410a4ac7261065e6ef.asciidoc index 6550ab9e4..9bc5dcb34 100644 --- a/docs/examples/7df191cc7f814e410a4ac7261065e6ef.asciidoc +++ b/docs/examples/7df191cc7f814e410a4ac7261065e6ef.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update-by-query.asciidoc:468 +// docs/update-by-query.asciidoc:474 [source, python] ---- diff --git a/docs/examples/7e126e2751311db60cfcbb22c9c41caa.asciidoc b/docs/examples/7e126e2751311db60cfcbb22c9c41caa.asciidoc index 3a10e81ef..9a2fe5082 100644 --- a/docs/examples/7e126e2751311db60cfcbb22c9c41caa.asciidoc +++ b/docs/examples/7e126e2751311db60cfcbb22c9c41caa.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/shards.asciidoc:386 +// cat/shards.asciidoc:395 [source, python] ---- diff --git a/docs/examples/7e484b8b41f9dbc2bcf1f340db197c1d.asciidoc b/docs/examples/7e484b8b41f9dbc2bcf1f340db197c1d.asciidoc index acbcbaafd..f75c537ad 100644 --- a/docs/examples/7e484b8b41f9dbc2bcf1f340db197c1d.asciidoc +++ b/docs/examples/7e484b8b41f9dbc2bcf1f340db197c1d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/reindex.asciidoc:25 +// docs/reindex.asciidoc:31 [source, python] ---- diff --git a/docs/examples/7e4cb3de3e3c75646b60f9f81ddc59cc.asciidoc b/docs/examples/7e4cb3de3e3c75646b60f9f81ddc59cc.asciidoc index f14130819..2d67112d4 100644 --- a/docs/examples/7e4cb3de3e3c75646b60f9f81ddc59cc.asciidoc +++ b/docs/examples/7e4cb3de3e3c75646b60f9f81ddc59cc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/clear-trained-model-deployment-cache.asciidoc:43 +// ml/trained-models/apis/clear-trained-model-deployment-cache.asciidoc:49 [source, python] ---- diff --git a/docs/examples/7e5faa551f2c95ffd627da352563d450.asciidoc b/docs/examples/7e5faa551f2c95ffd627da352563d450.asciidoc index 00324ab22..bb9c725f8 100644 --- a/docs/examples/7e5faa551f2c95ffd627da352563d450.asciidoc +++ b/docs/examples/7e5faa551f2c95ffd627da352563d450.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/create-role-mappings.asciidoc:269 +// rest-api/security/create-role-mappings.asciidoc:275 [source, python] ---- diff --git a/docs/examples/7e74d1a54e816e8f40cfdaa01b070788.asciidoc b/docs/examples/7e74d1a54e816e8f40cfdaa01b070788.asciidoc index fccbcf662..a2911efe7 100644 --- a/docs/examples/7e74d1a54e816e8f40cfdaa01b070788.asciidoc +++ b/docs/examples/7e74d1a54e816e8f40cfdaa01b070788.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/rrf.asciidoc:244 +// search/rrf.asciidoc:250 [source, python] ---- diff --git a/docs/examples/7e77509ab646276ff78f58bb38bec8dd.asciidoc b/docs/examples/7e77509ab646276ff78f58bb38bec8dd.asciidoc index 9424c3552..03278ad8b 100644 --- a/docs/examples/7e77509ab646276ff78f58bb38bec8dd.asciidoc +++ b/docs/examples/7e77509ab646276ff78f58bb38bec8dd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-rules/apis/delete-query-ruleset.asciidoc:69 +// query-rules/apis/delete-query-ruleset.asciidoc:75 [source, python] ---- diff --git a/docs/examples/7ebeb6cf26be5b5ecdfd408bd0fc3215.asciidoc b/docs/examples/7ebeb6cf26be5b5ecdfd408bd0fc3215.asciidoc index 52fc6bcb5..cd921232c 100644 --- a/docs/examples/7ebeb6cf26be5b5ecdfd408bd0fc3215.asciidoc +++ b/docs/examples/7ebeb6cf26be5b5ecdfd408bd0fc3215.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/profile.asciidoc:1243 +// search/profile.asciidoc:1248 [source, python] ---- diff --git a/docs/examples/7ebfb30b3ece855c1b783d9210939469.asciidoc b/docs/examples/7ebfb30b3ece855c1b783d9210939469.asciidoc index 3b4263e19..c3f26825d 100644 --- a/docs/examples/7ebfb30b3ece855c1b783d9210939469.asciidoc +++ b/docs/examples/7ebfb30b3ece855c1b783d9210939469.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/flush-job.asciidoc:102 +// ml/anomaly-detection/apis/flush-job.asciidoc:108 [source, python] ---- diff --git a/docs/examples/7f2d511cb64743c006225e5933a14bb4.asciidoc b/docs/examples/7f2d511cb64743c006225e5933a14bb4.asciidoc index fd3323a81..4412a2a08 100644 --- a/docs/examples/7f2d511cb64743c006225e5933a14bb4.asciidoc +++ b/docs/examples/7f2d511cb64743c006225e5933a14bb4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-across-clusters.asciidoc:72 +// esql/esql-across-clusters.asciidoc:67 [source, python] ---- diff --git a/docs/examples/7f37031fb40b68a61255b7c71d7eed0b.asciidoc b/docs/examples/7f37031fb40b68a61255b7c71d7eed0b.asciidoc index 1bd88d1ea..451938e0c 100644 --- a/docs/examples/7f37031fb40b68a61255b7c71d7eed0b.asciidoc +++ b/docs/examples/7f37031fb40b68a61255b7c71d7eed0b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/execute-watch.asciidoc:299 +// rest-api/watcher/execute-watch.asciidoc:305 [source, python] ---- diff --git a/docs/examples/7f56755fb6c42f7e6203339a6d0cb6e6.asciidoc b/docs/examples/7f56755fb6c42f7e6203339a6d0cb6e6.asciidoc index 08132ba5d..18e25a546 100644 --- a/docs/examples/7f56755fb6c42f7e6203339a6d0cb6e6.asciidoc +++ b/docs/examples/7f56755fb6c42f7e6203339a6d0cb6e6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/match-query.asciidoc:279 +// query-dsl/match-query.asciidoc:283 [source, python] ---- diff --git a/docs/examples/7fb921376cbf66bf9f381bcdd62030ba.asciidoc b/docs/examples/7fb921376cbf66bf9f381bcdd62030ba.asciidoc index 9dd99bfbb..73cd9c68f 100644 --- a/docs/examples/7fb921376cbf66bf9f381bcdd62030ba.asciidoc +++ b/docs/examples/7fb921376cbf66bf9f381bcdd62030ba.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// scripting/apis/get-script-contexts-api.asciidoc:10 +// scripting/apis/get-script-contexts-api.asciidoc:16 [source, python] ---- diff --git a/docs/examples/7fd5883564d183603e60b37d286ac7e2.asciidoc b/docs/examples/7fd5883564d183603e60b37d286ac7e2.asciidoc index 924cb88d3..552dcecb9 100644 --- a/docs/examples/7fd5883564d183603e60b37d286ac7e2.asciidoc +++ b/docs/examples/7fd5883564d183603e60b37d286ac7e2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/delete-expired-data.asciidoc:64 +// ml/anomaly-detection/apis/delete-expired-data.asciidoc:70 [source, python] ---- diff --git a/docs/examples/800861c15bb33ca01a46fb97dde7537a.asciidoc b/docs/examples/800861c15bb33ca01a46fb97dde7537a.asciidoc index 95fda5362..08a7b3942 100644 --- a/docs/examples/800861c15bb33ca01a46fb97dde7537a.asciidoc +++ b/docs/examples/800861c15bb33ca01a46fb97dde7537a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/get-filter.asciidoc:66 +// ml/anomaly-detection/apis/get-filter.asciidoc:72 [source, python] ---- diff --git a/docs/examples/f4d0ef2e0f76babee83d999fe35127f2.asciidoc b/docs/examples/80135e8c644e34cc70ce8a4e7915d1a2.asciidoc similarity index 89% rename from docs/examples/f4d0ef2e0f76babee83d999fe35127f2.asciidoc rename to docs/examples/80135e8c644e34cc70ce8a4e7915d1a2.asciidoc index 9a29988d0..888fe890e 100644 --- a/docs/examples/f4d0ef2e0f76babee83d999fe35127f2.asciidoc +++ b/docs/examples/80135e8c644e34cc70ce8a4e7915d1a2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/processors/attachment.asciidoc:267 +// ingest/processors/attachment.asciidoc:315 [source, python] ---- @@ -12,7 +12,7 @@ resp = client.ingest.put_pipeline( "field": "data", "indexed_chars": 11, "indexed_chars_field": "max_size", - "remove_binary": False + "remove_binary": True } } ], diff --git a/docs/examples/8051766cadded0892290bc2cc06e145c.asciidoc b/docs/examples/8051766cadded0892290bc2cc06e145c.asciidoc index 289db73ae..dea7d94c0 100644 --- a/docs/examples/8051766cadded0892290bc2cc06e145c.asciidoc +++ b/docs/examples/8051766cadded0892290bc2cc06e145c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/ack-watch.asciidoc:245 +// rest-api/watcher/ack-watch.asciidoc:251 [source, python] ---- diff --git a/docs/examples/808f4db1e2361be77dd6816c1f818139.asciidoc b/docs/examples/808f4db1e2361be77dd6816c1f818139.asciidoc index da86a9850..675a184bc 100644 --- a/docs/examples/808f4db1e2361be77dd6816c1f818139.asciidoc +++ b/docs/examples/808f4db1e2361be77dd6816c1f818139.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/shard-stores.asciidoc:13 +// indices/shard-stores.asciidoc:19 [source, python] ---- diff --git a/docs/examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc b/docs/examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc index 2f39bae89..702838d56 100644 --- a/docs/examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc +++ b/docs/examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc @@ -1,9 +1,12 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/bulk-update-api-keys.asciidoc:176 +// rest-api/security/bulk-update-api-keys.asciidoc:182 [source, python] ---- -resp = client.security.bulk_update_api_keys( +resp = client.perform_request( + "POST", + "/_security/api_key/_bulk_update", + headers={"Content-Type": "application/json"}, body={ "ids": [ "VuaCfGcBCdbkQm-e5aOx", diff --git a/docs/examples/80edd2124a822d9f9bf22ecc49d2c2e9.asciidoc b/docs/examples/80edd2124a822d9f9bf22ecc49d2c2e9.asciidoc index b9d0a1b4f..12c0a097e 100644 --- a/docs/examples/80edd2124a822d9f9bf22ecc49d2c2e9.asciidoc +++ b/docs/examples/80edd2124a822d9f9bf22ecc49d2c2e9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// synonyms/apis/get-synonym-rule.asciidoc:67 +// synonyms/apis/get-synonym-rule.asciidoc:72 [source, python] ---- diff --git a/docs/examples/8141b60ad245ece2ff5e8d0817400ee5.asciidoc b/docs/examples/8141b60ad245ece2ff5e8d0817400ee5.asciidoc index 1184d3f7f..b86225d86 100644 --- a/docs/examples/8141b60ad245ece2ff5e8d0817400ee5.asciidoc +++ b/docs/examples/8141b60ad245ece2ff5e8d0817400ee5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// eql/eql-search-api.asciidoc:631 +// eql/eql-search-api.asciidoc:684 [source, python] ---- diff --git a/docs/examples/8141cdaddbe7d794f09f9ee84e46194c.asciidoc b/docs/examples/8141cdaddbe7d794f09f9ee84e46194c.asciidoc index 3a3c00fb9..13fd6a661 100644 --- a/docs/examples/8141cdaddbe7d794f09f9ee84e46194c.asciidoc +++ b/docs/examples/8141cdaddbe7d794f09f9ee84e46194c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/count.asciidoc:67 +// cat/count.asciidoc:73 [source, python] ---- diff --git a/docs/examples/81612c2537386e031b7eb604f6756a71.asciidoc b/docs/examples/81612c2537386e031b7eb604f6756a71.asciidoc index 3f3cf6154..18f8a3bc8 100644 --- a/docs/examples/81612c2537386e031b7eb604f6756a71.asciidoc +++ b/docs/examples/81612c2537386e031b7eb604f6756a71.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/clone-index.asciidoc:117 +// indices/clone-index.asciidoc:123 [source, python] ---- diff --git a/docs/examples/81aad155ff23b1b396833b1182c9d46b.asciidoc b/docs/examples/81aad155ff23b1b396833b1182c9d46b.asciidoc index 99962351b..eb41e3131 100644 --- a/docs/examples/81aad155ff23b1b396833b1182c9d46b.asciidoc +++ b/docs/examples/81aad155ff23b1b396833b1182c9d46b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/disk-usage-exceeded.asciidoc:31 +// troubleshooting/common-issues/disk-usage-exceeded.asciidoc:35 [source, python] ---- diff --git a/docs/examples/821422f8a03dc98d024a15fc737fe9eb.asciidoc b/docs/examples/821422f8a03dc98d024a15fc737fe9eb.asciidoc index c6a34d582..c6c064054 100644 --- a/docs/examples/821422f8a03dc98d024a15fc737fe9eb.asciidoc +++ b/docs/examples/821422f8a03dc98d024a15fc737fe9eb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/delete-trained-models-aliases.asciidoc:51 +// ml/trained-models/apis/delete-trained-models-aliases.asciidoc:57 [source, python] ---- diff --git a/docs/examples/824fded1f9db28906ae7e85ae8de9bd0.asciidoc b/docs/examples/824fded1f9db28906ae7e85ae8de9bd0.asciidoc index 8492ee835..70cd4540b 100644 --- a/docs/examples/824fded1f9db28906ae7e85ae8de9bd0.asciidoc +++ b/docs/examples/824fded1f9db28906ae7e85ae8de9bd0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ccr/apis/follow/post-resume-follow.asciidoc:84 +// ccr/apis/follow/post-resume-follow.asciidoc:90 [source, python] ---- diff --git a/docs/examples/827b7e9308ea288f18aea00a5accc38e.asciidoc b/docs/examples/827b7e9308ea288f18aea00a5accc38e.asciidoc index cc91ffc4d..fb48fb59b 100644 --- a/docs/examples/827b7e9308ea288f18aea00a5accc38e.asciidoc +++ b/docs/examples/827b7e9308ea288f18aea00a5accc38e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-component-template.asciidoc:40 +// indices/get-component-template.asciidoc:46 [source, python] ---- diff --git a/docs/examples/82844ef45e11c0eece100d3109db3182.asciidoc b/docs/examples/82844ef45e11c0eece100d3109db3182.asciidoc index 8886c646e..b601d54d0 100644 --- a/docs/examples/82844ef45e11c0eece100d3109db3182.asciidoc +++ b/docs/examples/82844ef45e11c0eece100d3109db3182.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-amazon-bedrock.asciidoc:174 +// inference/service-amazon-bedrock.asciidoc:180 [source, python] ---- diff --git a/docs/examples/828f0045747fde4888a947bb99e190e3.asciidoc b/docs/examples/828f0045747fde4888a947bb99e190e3.asciidoc index c14704b5a..5e62a1545 100644 --- a/docs/examples/828f0045747fde4888a947bb99e190e3.asciidoc +++ b/docs/examples/828f0045747fde4888a947bb99e190e3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:601 +// search/retriever.asciidoc:792 [source, python] ---- diff --git a/docs/examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc b/docs/examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc new file mode 100644 index 000000000..f5e9f0231 --- /dev/null +++ b/docs/examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// inference/chat-completion-inference.asciidoc:318 + +[source, python] +---- +resp = client.perform_request( + "POST", + "/_inference/chat_completion/openai-completion/_stream", + headers={"Content-Type": "application/json"}, + body={ + "messages": [ + { + "role": "assistant", + "content": "Let's find out what the weather is", + "tool_calls": [ + { + "id": "call_KcAjWtAww20AihPHphUh46Gd", + "type": "function", + "function": { + "name": "get_current_weather", + "arguments": "{\"location\":\"Boston, MA\"}" + } + } + ] + }, + { + "role": "tool", + "content": "The weather is cold", + "tool_call_id": "call_KcAjWtAww20AihPHphUh46Gd" + } + ] + }, +) +print(resp) +---- diff --git a/docs/examples/82d6de3081de7b0664f44adf2942675a.asciidoc b/docs/examples/82d6de3081de7b0664f44adf2942675a.asciidoc index 74c43c0f7..b14f9ce36 100644 --- a/docs/examples/82d6de3081de7b0664f44adf2942675a.asciidoc +++ b/docs/examples/82d6de3081de7b0664f44adf2942675a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// behavioral-analytics/apis/list-analytics-collection.asciidoc:85 +// behavioral-analytics/apis/list-analytics-collection.asciidoc:91 [source, python] ---- diff --git a/docs/examples/8330b2ea6317769e52d0647ba434b354.asciidoc b/docs/examples/8330b2ea6317769e52d0647ba434b354.asciidoc index 6ee95743d..08a669440 100644 --- a/docs/examples/8330b2ea6317769e52d0647ba434b354.asciidoc +++ b/docs/examples/8330b2ea6317769e52d0647ba434b354.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/multi-get.asciidoc:262 +// docs/multi-get.asciidoc:268 [source, python] ---- diff --git a/docs/examples/8357aa6099089940589ae3e97e7bcffa.asciidoc b/docs/examples/8357aa6099089940589ae3e97e7bcffa.asciidoc index 0ae02fd4c..867b8527a 100644 --- a/docs/examples/8357aa6099089940589ae3e97e7bcffa.asciidoc +++ b/docs/examples/8357aa6099089940589ae3e97e7bcffa.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/downsampling-ilm.asciidoc:381 +// data-streams/downsampling-dsl.asciidoc:362 [source, python] ---- diff --git a/docs/examples/838a4eabebba4c06100fb37dc30c7722.asciidoc b/docs/examples/838a4eabebba4c06100fb37dc30c7722.asciidoc index 07d36ca6b..4c2576b1a 100644 --- a/docs/examples/838a4eabebba4c06100fb37dc30c7722.asciidoc +++ b/docs/examples/838a4eabebba4c06100fb37dc30c7722.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/rollup-search.asciidoc:78 +// rollup/apis/rollup-search.asciidoc:84 [source, python] ---- diff --git a/docs/examples/839710129a165cf93c6e329abedf9089.asciidoc b/docs/examples/839710129a165cf93c6e329abedf9089.asciidoc index 393e65035..9bb354833 100644 --- a/docs/examples/839710129a165cf93c6e329abedf9089.asciidoc +++ b/docs/examples/839710129a165cf93c6e329abedf9089.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/update-cross-cluster-api-key.asciidoc:83 +// rest-api/security/update-cross-cluster-api-key.asciidoc:89 [source, python] ---- diff --git a/docs/examples/839a4b2930856790e34cc9dfeb983284.asciidoc b/docs/examples/839a4b2930856790e34cc9dfeb983284.asciidoc index bbb931afe..0e13d90e2 100644 --- a/docs/examples/839a4b2930856790e34cc9dfeb983284.asciidoc +++ b/docs/examples/839a4b2930856790e34cc9dfeb983284.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/downsampling.asciidoc:90 +// data-streams/downsampling.asciidoc:129 [source, python] ---- diff --git a/docs/examples/83b94f9e7b3a9abca8e165ea56927714.asciidoc b/docs/examples/83b94f9e7b3a9abca8e165ea56927714.asciidoc index 30cf5041e..b17d0c5b2 100644 --- a/docs/examples/83b94f9e7b3a9abca8e165ea56927714.asciidoc +++ b/docs/examples/83b94f9e7b3a9abca8e165ea56927714.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/rollover-index.asciidoc:380 +// indices/rollover-index.asciidoc:386 [source, python] ---- diff --git a/docs/examples/841d8b766902c8e3ae85c228a31383ac.asciidoc b/docs/examples/841d8b766902c8e3ae85c228a31383ac.asciidoc index 17e5ecf63..89b2d222d 100644 --- a/docs/examples/841d8b766902c8e3ae85c228a31383ac.asciidoc +++ b/docs/examples/841d8b766902c8e3ae85c228a31383ac.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// sql/apis/get-async-sql-search-status-api.asciidoc:12 +// sql/apis/get-async-sql-search-status-api.asciidoc:18 [source, python] ---- diff --git a/docs/examples/84237aa9da49ab4b4c4e2b21d2548df2.asciidoc b/docs/examples/84237aa9da49ab4b4c4e2b21d2548df2.asciidoc index ca3a5b11e..a181069b1 100644 --- a/docs/examples/84237aa9da49ab4b4c4e2b21d2548df2.asciidoc +++ b/docs/examples/84237aa9da49ab4b4c4e2b21d2548df2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/verify-repo-integrity-api.asciidoc:25 +// snapshot-restore/apis/verify-repo-integrity-api.asciidoc:31 [source, python] ---- diff --git a/docs/examples/84c61160ca815e29e9973ba1380219dd.asciidoc b/docs/examples/84c61160ca815e29e9973ba1380219dd.asciidoc index 64f0af253..99b6078e2 100644 --- a/docs/examples/84c61160ca815e29e9973ba1380219dd.asciidoc +++ b/docs/examples/84c61160ca815e29e9973ba1380219dd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// searchable-snapshots/apis/shard-stats.asciidoc:73 +// searchable-snapshots/apis/shard-stats.asciidoc:79 [source, python] ---- diff --git a/docs/examples/84c69fb07050f0e89720007a6507a221.asciidoc b/docs/examples/84c69fb07050f0e89720007a6507a221.asciidoc index e89a8c84b..0a74815fc 100644 --- a/docs/examples/84c69fb07050f0e89720007a6507a221.asciidoc +++ b/docs/examples/84c69fb07050f0e89720007a6507a221.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/high-cpu-usage.asciidoc:114 +// troubleshooting/common-issues/high-cpu-usage.asciidoc:118 [source, python] ---- diff --git a/docs/examples/853fc710cea79fb4e1a85fb6d149f9c5.asciidoc b/docs/examples/853fc710cea79fb4e1a85fb6d149f9c5.asciidoc index 70473d84b..e41858318 100644 --- a/docs/examples/853fc710cea79fb4e1a85fb6d149f9c5.asciidoc +++ b/docs/examples/853fc710cea79fb4e1a85fb6d149f9c5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:640 +// search/retriever.asciidoc:831 [source, python] ---- diff --git a/docs/examples/8582e918a6275472d2eba2e95f1dbe77.asciidoc b/docs/examples/8582e918a6275472d2eba2e95f1dbe77.asciidoc index 1f744bb67..5f0baae9a 100644 --- a/docs/examples/8582e918a6275472d2eba2e95f1dbe77.asciidoc +++ b/docs/examples/8582e918a6275472d2eba2e95f1dbe77.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/disk-usage-exceeded.asciidoc:61 +// troubleshooting/common-issues/disk-usage-exceeded.asciidoc:65 [source, python] ---- diff --git a/docs/examples/85e2719d9fd6d2c2d47d28d39f2e3f7e.asciidoc b/docs/examples/85e2719d9fd6d2c2d47d28d39f2e3f7e.asciidoc index 385a7529e..3aab09fac 100644 --- a/docs/examples/85e2719d9fd6d2c2d47d28d39f2e3f7e.asciidoc +++ b/docs/examples/85e2719d9fd6d2c2d47d28d39f2e3f7e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// migration/apis/feature-migration.asciidoc:47 +// migration/apis/feature-migration.asciidoc:53 [source, python] ---- diff --git a/docs/examples/85f2839beeb71edb66988e5c82188be0.asciidoc b/docs/examples/85f2839beeb71edb66988e5c82188be0.asciidoc index 09bc916c5..c42c597e7 100644 --- a/docs/examples/85f2839beeb71edb66988e5c82188be0.asciidoc +++ b/docs/examples/85f2839beeb71edb66988e5c82188be0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// licensing/update-license.asciidoc:63 +// licensing/update-license.asciidoc:69 [source, python] ---- diff --git a/docs/examples/85f9fc6f98e8573efed9b034e853d5ae.asciidoc b/docs/examples/85f9fc6f98e8573efed9b034e853d5ae.asciidoc index b7610f139..107825f54 100644 --- a/docs/examples/85f9fc6f98e8573efed9b034e853d5ae.asciidoc +++ b/docs/examples/85f9fc6f98e8573efed9b034e853d5ae.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-elasticsearch.asciidoc:248 +// inference/service-elasticsearch.asciidoc:289 [source, python] ---- diff --git a/docs/examples/8619bd17bbfe33490b1f277007f654db.asciidoc b/docs/examples/8619bd17bbfe33490b1f277007f654db.asciidoc index 03d8b42f7..d48245664 100644 --- a/docs/examples/8619bd17bbfe33490b1f277007f654db.asciidoc +++ b/docs/examples/8619bd17bbfe33490b1f277007f654db.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-cohere.asciidoc:208 +// inference/service-cohere.asciidoc:214 [source, python] ---- diff --git a/docs/examples/861f5f61409dc87f3671293b87839ff7.asciidoc b/docs/examples/861f5f61409dc87f3671293b87839ff7.asciidoc index 23dd310c0..5993138ca 100644 --- a/docs/examples/861f5f61409dc87f3671293b87839ff7.asciidoc +++ b/docs/examples/861f5f61409dc87f3671293b87839ff7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/stats.asciidoc:1533 +// cluster/stats.asciidoc:1542 [source, python] ---- diff --git a/docs/examples/86280dcb49aa89083be4b2644daf1b7c.asciidoc b/docs/examples/86280dcb49aa89083be4b2644daf1b7c.asciidoc index 17ed48338..871123f58 100644 --- a/docs/examples/86280dcb49aa89083be4b2644daf1b7c.asciidoc +++ b/docs/examples/86280dcb49aa89083be4b2644daf1b7c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/get-job.asciidoc:234 +// ml/anomaly-detection/apis/get-job.asciidoc:240 [source, python] ---- diff --git a/docs/examples/8684589e31d96ab229e8c4feb4d704bb.asciidoc b/docs/examples/8684589e31d96ab229e8c4feb4d704bb.asciidoc index 56b0b8967..cac6bcbf9 100644 --- a/docs/examples/8684589e31d96ab229e8c4feb4d704bb.asciidoc +++ b/docs/examples/8684589e31d96ab229e8c4feb4d704bb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/apis/enrich/get-enrich-policy.asciidoc:124 +// ingest/apis/enrich/get-enrich-policy.asciidoc:130 [source, python] ---- diff --git a/docs/examples/8699d35269a47ba867fa8cc766287413.asciidoc b/docs/examples/8699d35269a47ba867fa8cc766287413.asciidoc index 6606c3b1f..5ad7fc06e 100644 --- a/docs/examples/8699d35269a47ba867fa8cc766287413.asciidoc +++ b/docs/examples/8699d35269a47ba867fa8cc766287413.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// licensing/start-basic.asciidoc:42 +// licensing/start-basic.asciidoc:48 [source, python] ---- diff --git a/docs/examples/8739fad1fb2323950b673acf0c9f2ff5.asciidoc b/docs/examples/8739fad1fb2323950b673acf0c9f2ff5.asciidoc index 2a0845659..bc5186e11 100644 --- a/docs/examples/8739fad1fb2323950b673acf0c9f2ff5.asciidoc +++ b/docs/examples/8739fad1fb2323950b673acf0c9f2ff5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/open-close.asciidoc:120 +// indices/open-close.asciidoc:126 [source, python] ---- diff --git a/docs/examples/87416e6a1ca2da324dbed6deb05303eb.asciidoc b/docs/examples/87416e6a1ca2da324dbed6deb05303eb.asciidoc index 98b216fe6..3a6004b3e 100644 --- a/docs/examples/87416e6a1ca2da324dbed6deb05303eb.asciidoc +++ b/docs/examples/87416e6a1ca2da324dbed6deb05303eb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/count.asciidoc:105 +// search/count.asciidoc:112 [source, python] ---- diff --git a/docs/examples/87469f8b7e9b965408479d276c3ce8aa.asciidoc b/docs/examples/87469f8b7e9b965408479d276c3ce8aa.asciidoc index fcd5ca517..7d6a699e2 100644 --- a/docs/examples/87469f8b7e9b965408479d276c3ce8aa.asciidoc +++ b/docs/examples/87469f8b7e9b965408479d276c3ce8aa.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// behavioral-analytics/apis/list-analytics-collection.asciidoc:105 +// behavioral-analytics/apis/list-analytics-collection.asciidoc:111 [source, python] ---- diff --git a/docs/examples/87733deeea4b441b595d19a0f97346f0.asciidoc b/docs/examples/87733deeea4b441b595d19a0f97346f0.asciidoc index 1f2c7ceaa..58f99b552 100644 --- a/docs/examples/87733deeea4b441b595d19a0f97346f0.asciidoc +++ b/docs/examples/87733deeea4b441b595d19a0f97346f0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// health/health.asciidoc:473 +// health/health.asciidoc:479 [source, python] ---- diff --git a/docs/examples/87b0b496747ad6c1e4ab4b462128fa1c.asciidoc b/docs/examples/87b0b496747ad6c1e4ab4b462128fa1c.asciidoc index 15aa7398b..39ef07bad 100644 --- a/docs/examples/87b0b496747ad6c1e4ab4b462128fa1c.asciidoc +++ b/docs/examples/87b0b496747ad6c1e4ab4b462128fa1c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/nodeattrs.asciidoc:113 +// cat/nodeattrs.asciidoc:119 [source, python] ---- diff --git a/docs/examples/87c3e9963400a3e4b296ef8d1c86fae3.asciidoc b/docs/examples/87c3e9963400a3e4b296ef8d1c86fae3.asciidoc index 326753968..c582bf9a0 100644 --- a/docs/examples/87c3e9963400a3e4b296ef8d1c86fae3.asciidoc +++ b/docs/examples/87c3e9963400a3e4b296ef8d1c86fae3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/clear-roles-cache.asciidoc:49 +// rest-api/security/clear-roles-cache.asciidoc:55 [source, python] ---- diff --git a/docs/examples/87c42ef733a50954e4d757fc0a08decc.asciidoc b/docs/examples/87c42ef733a50954e4d757fc0a08decc.asciidoc index 699f6f86e..a696167e8 100644 --- a/docs/examples/87c42ef733a50954e4d757fc0a08decc.asciidoc +++ b/docs/examples/87c42ef733a50954e4d757fc0a08decc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-api-keys.asciidoc:255 +// rest-api/security/get-api-keys.asciidoc:261 [source, python] ---- diff --git a/docs/examples/88554b79dba8fd79991855a692b69ff9.asciidoc b/docs/examples/88554b79dba8fd79991855a692b69ff9.asciidoc index c96cce159..5b9b18135 100644 --- a/docs/examples/88554b79dba8fd79991855a692b69ff9.asciidoc +++ b/docs/examples/88554b79dba8fd79991855a692b69ff9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// graph/explore.asciidoc:309 +// graph/explore.asciidoc:315 [source, python] ---- diff --git a/docs/examples/88cecae3f0363fc186d955dd8616b5d4.asciidoc b/docs/examples/88cecae3f0363fc186d955dd8616b5d4.asciidoc index 8910550de..293270942 100644 --- a/docs/examples/88cecae3f0363fc186d955dd8616b5d4.asciidoc +++ b/docs/examples/88cecae3f0363fc186d955dd8616b5d4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// eql/get-async-eql-status-api.asciidoc:83 +// eql/get-async-eql-status-api.asciidoc:90 [source, python] ---- diff --git a/docs/examples/88cf60d3310a56d8ae12704abc05b565.asciidoc b/docs/examples/88cf60d3310a56d8ae12704abc05b565.asciidoc index ce84bd692..e2a7fb005 100644 --- a/docs/examples/88cf60d3310a56d8ae12704abc05b565.asciidoc +++ b/docs/examples/88cf60d3310a56d8ae12704abc05b565.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// licensing/get-trial-status.asciidoc:40 +// licensing/get-trial-status.asciidoc:46 [source, python] ---- diff --git a/docs/examples/89b72dd7f747f6297c2b089e8bc807be.asciidoc b/docs/examples/89b72dd7f747f6297c2b089e8bc807be.asciidoc index 4b43638cc..e5818f9d3 100644 --- a/docs/examples/89b72dd7f747f6297c2b089e8bc807be.asciidoc +++ b/docs/examples/89b72dd7f747f6297c2b089e8bc807be.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/put-repo-api.asciidoc:10 +// snapshot-restore/apis/put-repo-api.asciidoc:16 [source, python] ---- diff --git a/docs/examples/89d2a3748dc14c6d5d4c6f94b9b03938.asciidoc b/docs/examples/89d2a3748dc14c6d5d4c6f94b9b03938.asciidoc index 53f24fc7b..fe773300a 100644 --- a/docs/examples/89d2a3748dc14c6d5d4c6f94b9b03938.asciidoc +++ b/docs/examples/89d2a3748dc14c6d5d4c6f94b9b03938.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/split-index.asciidoc:44 +// indices/split-index.asciidoc:50 [source, python] ---- diff --git a/docs/examples/89f8eac24f3ec6a7668d580aaf0eeefa.asciidoc b/docs/examples/89f8eac24f3ec6a7668d580aaf0eeefa.asciidoc index d608db517..ae121d5b9 100644 --- a/docs/examples/89f8eac24f3ec6a7668d580aaf0eeefa.asciidoc +++ b/docs/examples/89f8eac24f3ec6a7668d580aaf0eeefa.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/analyze.asciidoc:286 +// indices/analyze.asciidoc:292 [source, python] ---- diff --git a/docs/examples/c8fa8d7e029792d539464fede18ce258.asciidoc b/docs/examples/8a0b5f759de3f27f0801c1176e616117.asciidoc similarity index 75% rename from docs/examples/c8fa8d7e029792d539464fede18ce258.asciidoc rename to docs/examples/8a0b5f759de3f27f0801c1176e616117.asciidoc index e75f79036..4a66835cb 100644 --- a/docs/examples/c8fa8d7e029792d539464fede18ce258.asciidoc +++ b/docs/examples/8a0b5f759de3f27f0801c1176e616117.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/semantic-search-semantic-text.asciidoc:72 +// search/search-your-data/semantic-search-semantic-text.asciidoc:36 [source, python] ---- @@ -8,8 +8,7 @@ resp = client.indices.create( mappings={ "properties": { "content": { - "type": "semantic_text", - "inference_id": "my-elser-endpoint" + "type": "semantic_text" } } }, diff --git a/docs/examples/8a12cd824404d74f098d854716a26899.asciidoc b/docs/examples/8a12cd824404d74f098d854716a26899.asciidoc index b8edf1b62..a97fdd91e 100644 --- a/docs/examples/8a12cd824404d74f098d854716a26899.asciidoc +++ b/docs/examples/8a12cd824404d74f098d854716a26899.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/delete-datafeed.asciidoc:43 +// ml/anomaly-detection/apis/delete-datafeed.asciidoc:49 [source, python] ---- diff --git a/docs/examples/8a1f6cffa653800282c0ae160ee375bc.asciidoc b/docs/examples/8a1f6cffa653800282c0ae160ee375bc.asciidoc index e9aae2f90..5c195ab8f 100644 --- a/docs/examples/8a1f6cffa653800282c0ae160ee375bc.asciidoc +++ b/docs/examples/8a1f6cffa653800282c0ae160ee375bc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update.asciidoc:155 +// docs/update.asciidoc:161 [source, python] ---- diff --git a/docs/examples/8aa17bd25a3f2d634e5253b4b72fec4c.asciidoc b/docs/examples/8aa17bd25a3f2d634e5253b4b72fec4c.asciidoc index d6e4a61fb..750d1778b 100644 --- a/docs/examples/8aa17bd25a3f2d634e5253b4b72fec4c.asciidoc +++ b/docs/examples/8aa17bd25a3f2d634e5253b4b72fec4c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/df-analytics/apis/explain-dfanalytics.asciidoc:120 +// ml/df-analytics/apis/explain-dfanalytics.asciidoc:126 [source, python] ---- diff --git a/docs/examples/8b144b3eb20872595fd7cbc6c245c7c8.asciidoc b/docs/examples/8b144b3eb20872595fd7cbc6c245c7c8.asciidoc index fcb54f662..8573b7472 100644 --- a/docs/examples/8b144b3eb20872595fd7cbc6c245c7c8.asciidoc +++ b/docs/examples/8b144b3eb20872595fd7cbc6c245c7c8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/query-role.asciidoc:127 +// rest-api/security/query-role.asciidoc:133 [source, python] ---- diff --git a/docs/examples/8b301122cbf42be6eafeda714a36559e.asciidoc b/docs/examples/8b301122cbf42be6eafeda714a36559e.asciidoc index 5d3c43e24..f1161a356 100644 --- a/docs/examples/8b301122cbf42be6eafeda714a36559e.asciidoc +++ b/docs/examples/8b301122cbf42be6eafeda714a36559e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/logstash/put-pipeline.asciidoc:74 +// rest-api/logstash/put-pipeline.asciidoc:80 [source, python] ---- diff --git a/docs/examples/8b3a94495127efd9d56b2cd7f3eecdca.asciidoc b/docs/examples/8b3a94495127efd9d56b2cd7f3eecdca.asciidoc index 2d6c0a251..8e1b601bc 100644 --- a/docs/examples/8b3a94495127efd9d56b2cd7f3eecdca.asciidoc +++ b/docs/examples/8b3a94495127efd9d56b2cd7f3eecdca.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-role-mappings.asciidoc:64 +// rest-api/security/get-role-mappings.asciidoc:70 [source, python] ---- diff --git a/docs/examples/8b8b6aac2111b2d8b93758ac737e6543.asciidoc b/docs/examples/8b8b6aac2111b2d8b93758ac737e6543.asciidoc index 55f7d68aa..31bfc8362 100644 --- a/docs/examples/8b8b6aac2111b2d8b93758ac737e6543.asciidoc +++ b/docs/examples/8b8b6aac2111b2d8b93758ac737e6543.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/fields/synthetic-source.asciidoc:231 +// mapping/fields/synthetic-source.asciidoc:224 [source, python] ---- diff --git a/docs/examples/8bf1e7a6d529547906ba8b1d6501fa0c.asciidoc b/docs/examples/8bf1e7a6d529547906ba8b1d6501fa0c.asciidoc index 1c44b7e5c..0770bb31a 100644 --- a/docs/examples/8bf1e7a6d529547906ba8b1d6501fa0c.asciidoc +++ b/docs/examples/8bf1e7a6d529547906ba8b1d6501fa0c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/set-connector-sync-job-error-api.asciidoc:57 +// connector/apis/set-connector-sync-job-error-api.asciidoc:63 [source, python] ---- diff --git a/docs/examples/8c5d48252cd6d1ee26a2bb817f89c78e.asciidoc b/docs/examples/8c5d48252cd6d1ee26a2bb817f89c78e.asciidoc index 63cd82aaa..9be0798d3 100644 --- a/docs/examples/8c5d48252cd6d1ee26a2bb817f89c78e.asciidoc +++ b/docs/examples/8c5d48252cd6d1ee26a2bb817f89c78e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/delete-filter.asciidoc:40 +// ml/anomaly-detection/apis/delete-filter.asciidoc:46 [source, python] ---- diff --git a/docs/examples/8c639d3eef5c2de29e12bd9c6a42d3d4.asciidoc b/docs/examples/8c639d3eef5c2de29e12bd9c6a42d3d4.asciidoc new file mode 100644 index 000000000..124e138f6 --- /dev/null +++ b/docs/examples/8c639d3eef5c2de29e12bd9c6a42d3d4.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/aggs-tutorial.asciidoc:738 + +[source, python] +---- +resp = client.search( + index="kibana_sample_data_ecommerce", + size=0, + aggs={ + "categories": { + "terms": { + "field": "category.keyword", + "size": 5, + "order": { + "total_revenue": "desc" + } + }, + "aggs": { + "total_revenue": { + "sum": { + "field": "taxful_total_price" + } + }, + "avg_order_value": { + "avg": { + "field": "taxful_total_price" + } + }, + "total_items": { + "sum": { + "field": "total_quantity" + } + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/8c6f3bb8abae9ff1d21e776f16ad1c86.asciidoc b/docs/examples/8c6f3bb8abae9ff1d21e776f16ad1c86.asciidoc index 592b9e8bf..95ffe4194 100644 --- a/docs/examples/8c6f3bb8abae9ff1d21e776f16ad1c86.asciidoc +++ b/docs/examples/8c6f3bb8abae9ff1d21e776f16ad1c86.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/df-analytics/apis/put-dfanalytics.asciidoc:574 +// ml/df-analytics/apis/put-dfanalytics.asciidoc:580 [source, python] ---- diff --git a/docs/examples/8c9081dc738d1290fd76071b283fcaec.asciidoc b/docs/examples/8c9081dc738d1290fd76071b283fcaec.asciidoc index bb9df6b30..b062fd8f1 100644 --- a/docs/examples/8c9081dc738d1290fd76071b283fcaec.asciidoc +++ b/docs/examples/8c9081dc738d1290fd76071b283fcaec.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/get.asciidoc:92 +// docs/get.asciidoc:98 [source, python] ---- diff --git a/docs/examples/8cd00a3aba7c3c158277bc032aac2830.asciidoc b/docs/examples/8cd00a3aba7c3c158277bc032aac2830.asciidoc index 7140c60f2..68ae5624a 100644 --- a/docs/examples/8cd00a3aba7c3c158277bc032aac2830.asciidoc +++ b/docs/examples/8cd00a3aba7c3c158277bc032aac2830.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/bulk.asciidoc:610 +// docs/bulk.asciidoc:620 [source, python] ---- diff --git a/docs/examples/8d4dda5d988d568f4f4210a6387e026f.asciidoc b/docs/examples/8d4dda5d988d568f4f4210a6387e026f.asciidoc index dcfbb881e..60df33f6c 100644 --- a/docs/examples/8d4dda5d988d568f4f4210a6387e026f.asciidoc +++ b/docs/examples/8d4dda5d988d568f4f4210a6387e026f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/saml-logout-api.asciidoc:66 +// rest-api/security/saml-logout-api.asciidoc:72 [source, python] ---- diff --git a/docs/examples/8d9b04f2a97f4229dec9e620126de049.asciidoc b/docs/examples/8d9b04f2a97f4229dec9e620126de049.asciidoc index e175422ed..fdcc89bdb 100644 --- a/docs/examples/8d9b04f2a97f4229dec9e620126de049.asciidoc +++ b/docs/examples/8d9b04f2a97f4229dec9e620126de049.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/repository-s3.asciidoc:602 +// snapshot-restore/repository-s3.asciidoc:609 [source, python] ---- diff --git a/docs/examples/8de6fed6ba2b94ce6a12ce076be2b4d7.asciidoc b/docs/examples/8de6fed6ba2b94ce6a12ce076be2b4d7.asciidoc index 30b9b54bb..13ad215ea 100644 --- a/docs/examples/8de6fed6ba2b94ce6a12ce076be2b4d7.asciidoc +++ b/docs/examples/8de6fed6ba2b94ce6a12ce076be2b4d7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/segments.asciidoc:126 +// cat/segments.asciidoc:132 [source, python] ---- diff --git a/docs/examples/8e208098a0156c4c92afe0a06960b230.asciidoc b/docs/examples/8e208098a0156c4c92afe0a06960b230.asciidoc index defa6892f..65f5e3558 100644 --- a/docs/examples/8e208098a0156c4c92afe0a06960b230.asciidoc +++ b/docs/examples/8e208098a0156c4c92afe0a06960b230.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/saml-authenticate-api.asciidoc:83 +// rest-api/security/saml-authenticate-api.asciidoc:89 [source, python] ---- diff --git a/docs/examples/8e2bbef535fef688d397e60e09aefa7f.asciidoc b/docs/examples/8e2bbef535fef688d397e60e09aefa7f.asciidoc index ad334ccf3..d240a9f5e 100644 --- a/docs/examples/8e2bbef535fef688d397e60e09aefa7f.asciidoc +++ b/docs/examples/8e2bbef535fef688d397e60e09aefa7f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/hotspotting.asciidoc:202 +// troubleshooting/common-issues/hotspotting.asciidoc:206 [source, python] ---- diff --git a/docs/examples/8e43bb5b7946143e69d397bb81d87df0.asciidoc b/docs/examples/8e43bb5b7946143e69d397bb81d87df0.asciidoc index db3591bdb..702b5f448 100644 --- a/docs/examples/8e43bb5b7946143e69d397bb81d87df0.asciidoc +++ b/docs/examples/8e43bb5b7946143e69d397bb81d87df0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ccr/apis/follow/get-follow-stats.asciidoc:219 +// ccr/apis/follow/get-follow-stats.asciidoc:225 [source, python] ---- diff --git a/docs/examples/8e89fee0be6a436c4e3d7c152659c47e.asciidoc b/docs/examples/8e89fee0be6a436c4e3d7c152659c47e.asciidoc index 30432d61e..d8069a2de 100644 --- a/docs/examples/8e89fee0be6a436c4e3d7c152659c47e.asciidoc +++ b/docs/examples/8e89fee0be6a436c4e3d7c152659c47e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-scheduling-api.asciidoc:90 +// connector/apis/update-connector-scheduling-api.asciidoc:96 [source, python] ---- diff --git a/docs/examples/8e9e7dc5fad2b2b8e74ab4dc225d9c53.asciidoc b/docs/examples/8e9e7dc5fad2b2b8e74ab4dc225d9c53.asciidoc index d6d14075f..f2e57ee11 100644 --- a/docs/examples/8e9e7dc5fad2b2b8e74ab4dc225d9c53.asciidoc +++ b/docs/examples/8e9e7dc5fad2b2b8e74ab4dc225d9c53.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/common/apis/set-upgrade-mode.asciidoc:96 +// ml/common/apis/set-upgrade-mode.asciidoc:102 [source, python] ---- diff --git a/docs/examples/8f4a7f68f2ca3698abdf20026a2d8c5f.asciidoc b/docs/examples/8f4a7f68f2ca3698abdf20026a2d8c5f.asciidoc index 73ac49610..c292f8323 100644 --- a/docs/examples/8f4a7f68f2ca3698abdf20026a2d8c5f.asciidoc +++ b/docs/examples/8f4a7f68f2ca3698abdf20026a2d8c5f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/high-cpu-usage.asciidoc:77 +// troubleshooting/common-issues/high-cpu-usage.asciidoc:81 [source, python] ---- diff --git a/docs/examples/90083d93e46fad2524755b8d4d1306fc.asciidoc b/docs/examples/90083d93e46fad2524755b8d4d1306fc.asciidoc index dfe32a284..9a88ab0c4 100644 --- a/docs/examples/90083d93e46fad2524755b8d4d1306fc.asciidoc +++ b/docs/examples/90083d93e46fad2524755b8d4d1306fc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/set-connector-sync-job-stats-api.asciidoc:75 +// connector/apis/set-connector-sync-job-stats-api.asciidoc:81 [source, python] ---- diff --git a/docs/examples/9054187cbab5c9e1c4ca2a4dba6a5db0.asciidoc b/docs/examples/9054187cbab5c9e1c4ca2a4dba6a5db0.asciidoc index d6939a443..d0e29e412 100644 --- a/docs/examples/9054187cbab5c9e1c4ca2a4dba6a5db0.asciidoc +++ b/docs/examples/9054187cbab5c9e1c4ca2a4dba6a5db0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/info.asciidoc:51 +// rest-api/info.asciidoc:57 [source, python] ---- diff --git a/docs/examples/90631797c7fbda43902abf2cc0ea8304.asciidoc b/docs/examples/90631797c7fbda43902abf2cc0ea8304.asciidoc index a4049b5aa..f98a2eab2 100644 --- a/docs/examples/90631797c7fbda43902abf2cc0ea8304.asciidoc +++ b/docs/examples/90631797c7fbda43902abf2cc0ea8304.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// modules/indices/request_cache.asciidoc:146 +// shard-request-cache.asciidoc:132 [source, python] ---- diff --git a/docs/examples/90c087560ea6c0b7405f710971c86ef0.asciidoc b/docs/examples/90c087560ea6c0b7405f710971c86ef0.asciidoc index f582f5f81..ce7afe167 100644 --- a/docs/examples/90c087560ea6c0b7405f710971c86ef0.asciidoc +++ b/docs/examples/90c087560ea6c0b7405f710971c86ef0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc:113 +// ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc:119 [source, python] ---- diff --git a/docs/examples/90e06d5ec5e454832d8fbd2e73ec2248.asciidoc b/docs/examples/90e06d5ec5e454832d8fbd2e73ec2248.asciidoc index f7a08a261..8799359cf 100644 --- a/docs/examples/90e06d5ec5e454832d8fbd2e73ec2248.asciidoc +++ b/docs/examples/90e06d5ec5e454832d8fbd2e73ec2248.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// autoscaling/apis/delete-autoscaling-policy.asciidoc:79 +// autoscaling/apis/delete-autoscaling-policy.asciidoc:85 [source, python] ---- diff --git a/docs/examples/9116ee8a5b00cc877291ed5559563f24.asciidoc b/docs/examples/9116ee8a5b00cc877291ed5559563f24.asciidoc index 0259220c5..283bb7417 100644 --- a/docs/examples/9116ee8a5b00cc877291ed5559563f24.asciidoc +++ b/docs/examples/9116ee8a5b00cc877291ed5559563f24.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/ack-watch.asciidoc:62 +// rest-api/watcher/ack-watch.asciidoc:68 [source, python] ---- diff --git a/docs/examples/9129dec88d35571b3166c6677297f03b.asciidoc b/docs/examples/9129dec88d35571b3166c6677297f03b.asciidoc index bf613cbd6..7a1da47bb 100644 --- a/docs/examples/9129dec88d35571b3166c6677297f03b.asciidoc +++ b/docs/examples/9129dec88d35571b3166c6677297f03b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// transform/apis/get-transform.asciidoc:109 +// transform/apis/get-transform.asciidoc:115 [source, python] ---- diff --git a/docs/examples/9138550002cb26ab64918cce427963b8.asciidoc b/docs/examples/9138550002cb26ab64918cce427963b8.asciidoc index 10a10eebc..166525f22 100644 --- a/docs/examples/9138550002cb26ab64918cce427963b8.asciidoc +++ b/docs/examples/9138550002cb26ab64918cce427963b8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-index-template.asciidoc:271 +// indices/put-index-template.asciidoc:277 [source, python] ---- diff --git a/docs/examples/9143be4f137574271953a7a8107e175b.asciidoc b/docs/examples/9143be4f137574271953a7a8107e175b.asciidoc index 56b693906..2bbfd3079 100644 --- a/docs/examples/9143be4f137574271953a7a8107e175b.asciidoc +++ b/docs/examples/9143be4f137574271953a7a8107e175b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-user-profile.asciidoc:63 +// rest-api/security/get-user-profile.asciidoc:69 [source, python] ---- diff --git a/docs/examples/9169d19a80175ec94f80865d0f9bef4c.asciidoc b/docs/examples/9169d19a80175ec94f80865d0f9bef4c.asciidoc index 17091127e..6e0eedc2a 100644 --- a/docs/examples/9169d19a80175ec94f80865d0f9bef4c.asciidoc +++ b/docs/examples/9169d19a80175ec94f80865d0f9bef4c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:279 +// search/retriever.asciidoc:291 [source, python] ---- diff --git a/docs/examples/91c01fcad9bf341d039a15dfc593dcd7.asciidoc b/docs/examples/91c01fcad9bf341d039a15dfc593dcd7.asciidoc index cbbebebab..8f764f138 100644 --- a/docs/examples/91c01fcad9bf341d039a15dfc593dcd7.asciidoc +++ b/docs/examples/91c01fcad9bf341d039a15dfc593dcd7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/field-caps.asciidoc:305 +// search/field-caps.asciidoc:310 [source, python] ---- diff --git a/docs/examples/91c925fc71abe0ddfe52457e9130363b.asciidoc b/docs/examples/91c925fc71abe0ddfe52457e9130363b.asciidoc index 6673f2caa..1a1361ffc 100644 --- a/docs/examples/91c925fc71abe0ddfe52457e9130363b.asciidoc +++ b/docs/examples/91c925fc71abe0ddfe52457e9130363b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/grant-api-keys.asciidoc:172 +// rest-api/security/grant-api-keys.asciidoc:178 [source, python] ---- diff --git a/docs/examples/91e106a2affbc8df32cd940684a779ed.asciidoc b/docs/examples/91e106a2affbc8df32cd940684a779ed.asciidoc new file mode 100644 index 000000000..acc46409b --- /dev/null +++ b/docs/examples/91e106a2affbc8df32cd940684a779ed.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// ingest/apis/put-ip-location-database.asciidoc:22 + +[source, python] +---- +resp = client.perform_request( + "PUT", + "/_ingest/ip_location/database/my-database-1", + headers={"Content-Type": "application/json"}, + body={ + "name": "GeoIP2-Domain", + "maxmind": { + "account_id": "1234567" + } + }, +) +print(resp) +---- diff --git a/docs/examples/9200ed8d5f798a158def4c526e41269e.asciidoc b/docs/examples/9200ed8d5f798a158def4c526e41269e.asciidoc index 935b9089d..4514ba050 100644 --- a/docs/examples/9200ed8d5f798a158def4c526e41269e.asciidoc +++ b/docs/examples/9200ed8d5f798a158def4c526e41269e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/field-caps.asciidoc:186 +// search/field-caps.asciidoc:191 [source, python] ---- diff --git a/docs/examples/9225841fdcddaf83ebdb90c2b0399e20.asciidoc b/docs/examples/9225841fdcddaf83ebdb90c2b0399e20.asciidoc index e2f8526f9..51ebb8041 100644 --- a/docs/examples/9225841fdcddaf83ebdb90c2b0399e20.asciidoc +++ b/docs/examples/9225841fdcddaf83ebdb90c2b0399e20.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/get-trained-models-stats.asciidoc:405 +// ml/trained-models/apis/get-trained-models-stats.asciidoc:412 [source, python] ---- diff --git a/docs/examples/9250ac57ec81d5192e8ad4c462438489.asciidoc b/docs/examples/9250ac57ec81d5192e8ad4c462438489.asciidoc new file mode 100644 index 000000000..192a0dcf3 --- /dev/null +++ b/docs/examples/9250ac57ec81d5192e8ad4c462438489.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// inference/service-jinaai.asciidoc:204 + +[source, python] +---- +resp = client.bulk( + index="jinaai-index", + operations=[ + { + "index": { + "_index": "jinaai-index", + "_id": "1" + } + }, + { + "content": "Sarah Johnson is a talented marine biologist working at the Oceanographic Institute. Her groundbreaking research on coral reef ecosystems has garnered international attention and numerous accolades." + }, + { + "index": { + "_index": "jinaai-index", + "_id": "2" + } + }, + { + "content": "She spends months at a time diving in remote locations, meticulously documenting the intricate relationships between various marine species. " + }, + { + "index": { + "_index": "jinaai-index", + "_id": "3" + } + }, + { + "content": "Her dedication to preserving these delicate underwater environments has inspired a new generation of conservationists." + } + ], +) +print(resp) +---- diff --git a/docs/examples/926c0134aeaad53bd0f3bdad9c430217.asciidoc b/docs/examples/926c0134aeaad53bd0f3bdad9c430217.asciidoc index 8eed56f99..b40547250 100644 --- a/docs/examples/926c0134aeaad53bd0f3bdad9c430217.asciidoc +++ b/docs/examples/926c0134aeaad53bd0f3bdad9c430217.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/reindex.asciidoc:763 +// docs/reindex.asciidoc:769 [source, python] ---- diff --git a/docs/examples/9270964d35d172ea5b193c5fc7a473dd.asciidoc b/docs/examples/9270964d35d172ea5b193c5fc7a473dd.asciidoc index b95bb8eaa..96a0d9589 100644 --- a/docs/examples/9270964d35d172ea5b193c5fc7a473dd.asciidoc +++ b/docs/examples/9270964d35d172ea5b193c5fc7a473dd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/templates.asciidoc:61 +// cat/templates.asciidoc:67 [source, python] ---- diff --git a/docs/examples/9298aaf8232a819e79b3bf8471245e98.asciidoc b/docs/examples/9298aaf8232a819e79b3bf8471245e98.asciidoc index 5dc7235b5..e926cb058 100644 --- a/docs/examples/9298aaf8232a819e79b3bf8471245e98.asciidoc +++ b/docs/examples/9298aaf8232a819e79b3bf8471245e98.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/get-job-stats.asciidoc:375 +// ml/anomaly-detection/apis/get-job-stats.asciidoc:381 [source, python] ---- diff --git a/docs/examples/92fe53019958ba466d1272da0834cf53.asciidoc b/docs/examples/92fe53019958ba466d1272da0834cf53.asciidoc index d7be84156..1b810cb98 100644 --- a/docs/examples/92fe53019958ba466d1272da0834cf53.asciidoc +++ b/docs/examples/92fe53019958ba466d1272da0834cf53.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/stats.asciidoc:11 +// indices/stats.asciidoc:17 [source, python] ---- diff --git a/docs/examples/931817b168e055ecf738785c721125dd.asciidoc b/docs/examples/931817b168e055ecf738785c721125dd.asciidoc new file mode 100644 index 000000000..76c4ea122 --- /dev/null +++ b/docs/examples/931817b168e055ecf738785c721125dd.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// ingest/processors/inference.asciidoc:750 + +[source, python] +---- +resp = client.ingest.put_pipeline( + id="query_helper_pipeline", + processors=[ + { + "script": { + "source": "ctx.prompt = 'Please generate an elasticsearch search query on index `articles_index` for the following natural language query. Dates are in the field `@timestamp`, document types are in the field `type` (options are `news`, `publication`), categories in the field `category` and can be multiple (options are `medicine`, `pharmaceuticals`, `technology`), and document names are in the field `title` which should use a fuzzy match. Ignore fields which cannot be determined from the natural language query context: ' + ctx.content" + } + }, + { + "inference": { + "model_id": "openai_chat_completions", + "input_output": { + "input_field": "prompt", + "output_field": "query" + } + } + }, + { + "remove": { + "field": "prompt" + } + } + ], +) +print(resp) +---- diff --git a/docs/examples/934aa38c3adcc4cf74ea40cd8736876c.asciidoc b/docs/examples/934aa38c3adcc4cf74ea40cd8736876c.asciidoc index 6516c85e6..f500cbfae 100644 --- a/docs/examples/934aa38c3adcc4cf74ea40cd8736876c.asciidoc +++ b/docs/examples/934aa38c3adcc4cf74ea40cd8736876c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/create-index.asciidoc:172 +// indices/create-index.asciidoc:178 [source, python] ---- diff --git a/docs/examples/935566d5426d44ade486a49ec5289741.asciidoc b/docs/examples/935566d5426d44ade486a49ec5289741.asciidoc index 920cadc2f..fade6a47d 100644 --- a/docs/examples/935566d5426d44ade486a49ec5289741.asciidoc +++ b/docs/examples/935566d5426d44ade486a49ec5289741.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/semantic-text-hybrid-search:113 +// search/search-your-data/semantic-text-hybrid-search:76 [source, python] ---- diff --git a/docs/examples/9382f022086c692ba05efb0acae65946.asciidoc b/docs/examples/9382f022086c692ba05efb0acae65946.asciidoc index 782786730..02ad2f12e 100644 --- a/docs/examples/9382f022086c692ba05efb0acae65946.asciidoc +++ b/docs/examples/9382f022086c692ba05efb0acae65946.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/fields/synthetic-source.asciidoc:70 +// mapping/fields/synthetic-source.asciidoc:63 [source, python] ---- diff --git a/docs/examples/93fb59d3204f37af952198b331fb6bb7.asciidoc b/docs/examples/93fb59d3204f37af952198b331fb6bb7.asciidoc index 93b46d640..23150a3d5 100644 --- a/docs/examples/93fb59d3204f37af952198b331fb6bb7.asciidoc +++ b/docs/examples/93fb59d3204f37af952198b331fb6bb7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/tasks.asciidoc:217 +// cluster/tasks.asciidoc:223 [source, python] ---- diff --git a/docs/examples/940e8c2c7ff92d71f489bdb7183c1ce6.asciidoc b/docs/examples/940e8c2c7ff92d71f489bdb7183c1ce6.asciidoc index 61efcb462..c57f84122 100644 --- a/docs/examples/940e8c2c7ff92d71f489bdb7183c1ce6.asciidoc +++ b/docs/examples/940e8c2c7ff92d71f489bdb7183c1ce6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/segments.asciidoc:110 +// indices/segments.asciidoc:116 [source, python] ---- diff --git a/docs/examples/9410af79177dd1df9b7b16229a581e18.asciidoc b/docs/examples/9410af79177dd1df9b7b16229a581e18.asciidoc index e41d2fa89..58e584450 100644 --- a/docs/examples/9410af79177dd1df9b7b16229a581e18.asciidoc +++ b/docs/examples/9410af79177dd1df9b7b16229a581e18.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/change-password.asciidoc:70 +// rest-api/security/change-password.asciidoc:76 [source, python] ---- diff --git a/docs/examples/941c8d05486200e835d97642e4ee05d5.asciidoc b/docs/examples/941c8d05486200e835d97642e4ee05d5.asciidoc index a43bd9f9b..211d8a492 100644 --- a/docs/examples/941c8d05486200e835d97642e4ee05d5.asciidoc +++ b/docs/examples/941c8d05486200e835d97642e4ee05d5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/termvectors.asciidoc:177 +// docs/termvectors.asciidoc:183 [source, python] ---- diff --git a/docs/examples/94246f45025ed394cd6415ed8d7a0588.asciidoc b/docs/examples/94246f45025ed394cd6415ed8d7a0588.asciidoc index e0925bba8..9071c0bdb 100644 --- a/docs/examples/94246f45025ed394cd6415ed8d7a0588.asciidoc +++ b/docs/examples/94246f45025ed394cd6415ed8d7a0588.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/delete-job.asciidoc:79 +// rollup/apis/delete-job.asciidoc:85 [source, python] ---- diff --git a/docs/examples/944806221eb89f5af2298ccdf2902277.asciidoc b/docs/examples/944806221eb89f5af2298ccdf2902277.asciidoc index 4fa39ab7e..be677f846 100644 --- a/docs/examples/944806221eb89f5af2298ccdf2902277.asciidoc +++ b/docs/examples/944806221eb89f5af2298ccdf2902277.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/rollup-caps.asciidoc:163 +// rollup/apis/rollup-caps.asciidoc:169 [source, python] ---- diff --git a/docs/examples/946522c26d02bebf5c527ba28e55c724.asciidoc b/docs/examples/946522c26d02bebf5c527ba28e55c724.asciidoc index 1ed6de0de..c817120d6 100644 --- a/docs/examples/946522c26d02bebf5c527ba28e55c724.asciidoc +++ b/docs/examples/946522c26d02bebf5c527ba28e55c724.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update-by-query.asciidoc:352 +// docs/update-by-query.asciidoc:358 [source, python] ---- diff --git a/docs/examples/948418e0ef1b7e7cfee2f11be715d7d2.asciidoc b/docs/examples/948418e0ef1b7e7cfee2f11be715d7d2.asciidoc new file mode 100644 index 000000000..3abdfaedb --- /dev/null +++ b/docs/examples/948418e0ef1b7e7cfee2f11be715d7d2.asciidoc @@ -0,0 +1,111 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/retrievers-examples.asciidoc:465 + +[source, python] +---- +resp = client.indices.create( + index="retrievers_example_nested", + settings={ + "number_of_shards": 1 + }, + mappings={ + "properties": { + "nested_field": { + "type": "nested", + "properties": { + "paragraph_id": { + "type": "keyword" + }, + "nested_vector": { + "type": "dense_vector", + "dims": 3, + "similarity": "l2_norm", + "index": True, + "index_options": { + "type": "flat" + } + } + } + }, + "topic": { + "type": "keyword" + } + } + }, +) +print(resp) + +resp1 = client.index( + index="retrievers_example_nested", + id="1", + document={ + "nested_field": [ + { + "paragraph_id": "1a", + "nested_vector": [ + -1.12, + -0.59, + 0.78 + ] + }, + { + "paragraph_id": "1b", + "nested_vector": [ + -0.12, + 1.56, + 0.42 + ] + }, + { + "paragraph_id": "1c", + "nested_vector": [ + 1, + -1, + 0 + ] + } + ], + "topic": [ + "ai" + ] + }, +) +print(resp1) + +resp2 = client.index( + index="retrievers_example_nested", + id="2", + document={ + "nested_field": [ + { + "paragraph_id": "2a", + "nested_vector": [ + 0.23, + 1.24, + 0.65 + ] + } + ], + "topic": [ + "information_retrieval" + ] + }, +) +print(resp2) + +resp3 = client.index( + index="retrievers_example_nested", + id="3", + document={ + "topic": [ + "ai" + ] + }, +) +print(resp3) + +resp4 = client.indices.refresh( + index="retrievers_example_nested", +) +print(resp4) +---- diff --git a/docs/examples/950f1230536422567f99a205ff4165ec.asciidoc b/docs/examples/950f1230536422567f99a205ff4165ec.asciidoc index 9c6457477..eea712620 100644 --- a/docs/examples/950f1230536422567f99a205ff4165ec.asciidoc +++ b/docs/examples/950f1230536422567f99a205ff4165ec.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/rollover-index.asciidoc:399 +// indices/rollover-index.asciidoc:405 [source, python] ---- diff --git a/docs/examples/95414139c7b1203e3c2d99a354415801.asciidoc b/docs/examples/95414139c7b1203e3c2d99a354415801.asciidoc index 54d9b2266..773758b44 100644 --- a/docs/examples/95414139c7b1203e3c2d99a354415801.asciidoc +++ b/docs/examples/95414139c7b1203e3c2d99a354415801.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/recovery.asciidoc:83 +// cat/recovery.asciidoc:89 [source, python] ---- diff --git a/docs/examples/95c1b376652533c352bbf793c74d1b08.asciidoc b/docs/examples/95c1b376652533c352bbf793c74d1b08.asciidoc index 386ee81d0..a0778d4b3 100644 --- a/docs/examples/95c1b376652533c352bbf793c74d1b08.asciidoc +++ b/docs/examples/95c1b376652533c352bbf793c74d1b08.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/query-role.asciidoc:227 +// rest-api/security/query-role.asciidoc:233 [source, python] ---- diff --git a/docs/examples/9608820dbeac261ba53fb89bb9400560.asciidoc b/docs/examples/9608820dbeac261ba53fb89bb9400560.asciidoc index 2f6de15c4..16bed42e9 100644 --- a/docs/examples/9608820dbeac261ba53fb89bb9400560.asciidoc +++ b/docs/examples/9608820dbeac261ba53fb89bb9400560.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-api-keys.asciidoc:233 +// rest-api/security/get-api-keys.asciidoc:239 [source, python] ---- diff --git a/docs/examples/9684e5fa8c22a07a372feb6fc1f5f7c0.asciidoc b/docs/examples/9684e5fa8c22a07a372feb6fc1f5f7c0.asciidoc index 50f6c76d4..9c42ef493 100644 --- a/docs/examples/9684e5fa8c22a07a372feb6fc1f5f7c0.asciidoc +++ b/docs/examples/9684e5fa8c22a07a372feb6fc1f5f7c0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/has-privileges.asciidoc:69 +// rest-api/security/has-privileges.asciidoc:75 [source, python] ---- diff --git a/docs/examples/968fb5b92aa65af09544f7c002b0953e.asciidoc b/docs/examples/968fb5b92aa65af09544f7c002b0953e.asciidoc index 134fe03d9..3746bf530 100644 --- a/docs/examples/968fb5b92aa65af09544f7c002b0953e.asciidoc +++ b/docs/examples/968fb5b92aa65af09544f7c002b0953e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/semantic-search-semantic-text.asciidoc:185 +// search/search-your-data/semantic-search-semantic-text.asciidoc:144 [source, python] ---- diff --git a/docs/examples/19f1f9f25933f8e7aba59a10881c648b.asciidoc b/docs/examples/96e88611f99e6834bd64b58dc8a282c1.asciidoc similarity index 67% rename from docs/examples/19f1f9f25933f8e7aba59a10881c648b.asciidoc rename to docs/examples/96e88611f99e6834bd64b58dc8a282c1.asciidoc index 0b123e38b..61477cef7 100644 --- a/docs/examples/19f1f9f25933f8e7aba59a10881c648b.asciidoc +++ b/docs/examples/96e88611f99e6834bd64b58dc8a282c1.asciidoc @@ -1,15 +1,15 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/semantic-text.asciidoc:21 +// mapping/types/semantic-text.asciidoc:42 [source, python] ---- resp = client.indices.create( - index="my-index-000001", + index="my-index-000002", mappings={ "properties": { "inference_field": { "type": "semantic_text", - "inference_id": "my-elser-endpoint" + "inference_id": "my-openai-endpoint" } } }, diff --git a/docs/examples/971fd23adb81bb5842c7750e0379336a.asciidoc b/docs/examples/971fd23adb81bb5842c7750e0379336a.asciidoc index ffc1c6da8..502b26032 100644 --- a/docs/examples/971fd23adb81bb5842c7750e0379336a.asciidoc +++ b/docs/examples/971fd23adb81bb5842c7750e0379336a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:528 +// search/retriever.asciidoc:719 [source, python] ---- diff --git a/docs/examples/973a3ff47fc4ce036ecd9bd363fef9f7.asciidoc b/docs/examples/973a3ff47fc4ce036ecd9bd363fef9f7.asciidoc index 635584804..599db64b4 100644 --- a/docs/examples/973a3ff47fc4ce036ecd9bd363fef9f7.asciidoc +++ b/docs/examples/973a3ff47fc4ce036ecd9bd363fef9f7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/reindex.asciidoc:843 +// docs/reindex.asciidoc:849 [source, python] ---- diff --git a/docs/examples/975b4b92464d52068516aa2f0f955cc1.asciidoc b/docs/examples/975b4b92464d52068516aa2f0f955cc1.asciidoc index 322e7aacd..367434a52 100644 --- a/docs/examples/975b4b92464d52068516aa2f0f955cc1.asciidoc +++ b/docs/examples/975b4b92464d52068516aa2f0f955cc1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/segments.asciidoc:119 +// indices/segments.asciidoc:125 [source, python] ---- diff --git a/docs/examples/97a3216af3d4b4d805d467d9c715cb3e.asciidoc b/docs/examples/97a3216af3d4b4d805d467d9c715cb3e.asciidoc index efa54ac9f..8cad74cda 100644 --- a/docs/examples/97a3216af3d4b4d805d467d9c715cb3e.asciidoc +++ b/docs/examples/97a3216af3d4b4d805d467d9c715cb3e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/get-desired-balance.asciidoc:21 +// cluster/get-desired-balance.asciidoc:27 [source, python] ---- diff --git a/docs/examples/97babc8d19ef0866774576716eb6d19e.asciidoc b/docs/examples/97babc8d19ef0866774576716eb6d19e.asciidoc index f950aedd0..c86db87c1 100644 --- a/docs/examples/97babc8d19ef0866774576716eb6d19e.asciidoc +++ b/docs/examples/97babc8d19ef0866774576716eb6d19e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update-by-query.asciidoc:775 +// docs/update-by-query.asciidoc:781 [source, python] ---- diff --git a/docs/examples/97c6c07f46f4177f0565a04bc50924a3.asciidoc b/docs/examples/97c6c07f46f4177f0565a04bc50924a3.asciidoc new file mode 100644 index 000000000..d6d19299d --- /dev/null +++ b/docs/examples/97c6c07f46f4177f0565a04bc50924a3.asciidoc @@ -0,0 +1,41 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/retrievers-examples.asciidoc:105 + +[source, python] +---- +resp = client.search( + index="retrievers_example", + retriever={ + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "query_string": { + "query": "(information retrieval) OR (artificial intelligence)", + "default_field": "text" + } + } + } + }, + { + "knn": { + "field": "vector", + "query_vector": [ + 0.23, + 0.67, + 0.89 + ], + "k": 3, + "num_candidates": 5 + } + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + }, + source=False, +) +print(resp) +---- diff --git a/docs/examples/97ea5ab17213cb1faaf6f3ea13607098.asciidoc b/docs/examples/97ea5ab17213cb1faaf6f3ea13607098.asciidoc index 7d382579c..2df9987cc 100644 --- a/docs/examples/97ea5ab17213cb1faaf6f3ea13607098.asciidoc +++ b/docs/examples/97ea5ab17213cb1faaf6f3ea13607098.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/start.asciidoc:43 +// rest-api/watcher/start.asciidoc:49 [source, python] ---- diff --git a/docs/examples/97f5df84efec655f479fad78bc392d4d.asciidoc b/docs/examples/97f5df84efec655f479fad78bc392d4d.asciidoc index 0caa87fcd..7a0dba9f4 100644 --- a/docs/examples/97f5df84efec655f479fad78bc392d4d.asciidoc +++ b/docs/examples/97f5df84efec655f479fad78bc392d4d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/profile.asciidoc:830 +// search/profile.asciidoc:835 [source, python] ---- diff --git a/docs/examples/986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc b/docs/examples/986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc index 07f78c0cd..3e5ded035 100644 --- a/docs/examples/986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc +++ b/docs/examples/986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-alibabacloud-ai-search.asciidoc:222 +// inference/service-alibabacloud-ai-search.asciidoc:228 [source, python] ---- diff --git a/docs/examples/98855f4bda8726d5d123aeebf7869e47.asciidoc b/docs/examples/98855f4bda8726d5d123aeebf7869e47.asciidoc index 87e68355f..1acb209f3 100644 --- a/docs/examples/98855f4bda8726d5d123aeebf7869e47.asciidoc +++ b/docs/examples/98855f4bda8726d5d123aeebf7869e47.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/nodeattrs.asciidoc:82 +// cat/nodeattrs.asciidoc:88 [source, python] ---- diff --git a/docs/examples/99803d7b111b862c0c82e9908e549b16.asciidoc b/docs/examples/99803d7b111b862c0c82e9908e549b16.asciidoc index 3c30cc23d..a33de7a36 100644 --- a/docs/examples/99803d7b111b862c0c82e9908e549b16.asciidoc +++ b/docs/examples/99803d7b111b862c0c82e9908e549b16.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-mistral.asciidoc:107 +// inference/service-mistral.asciidoc:113 [source, python] ---- diff --git a/docs/examples/99c1cfe60f3ccf5bf3abd24c31ed9034.asciidoc b/docs/examples/99c1cfe60f3ccf5bf3abd24c31ed9034.asciidoc index a3bf61263..672e32dbc 100644 --- a/docs/examples/99c1cfe60f3ccf5bf3abd24c31ed9034.asciidoc +++ b/docs/examples/99c1cfe60f3ccf5bf3abd24c31ed9034.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc:14 +// ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc:20 [source, python] ---- diff --git a/docs/examples/99fb82d49ac477e6a9dfdd71f9465374.asciidoc b/docs/examples/99fb82d49ac477e6a9dfdd71f9465374.asciidoc new file mode 100644 index 000000000..df9b99ed6 --- /dev/null +++ b/docs/examples/99fb82d49ac477e6a9dfdd71f9465374.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// ingest/apis/delete-ip-location-database.asciidoc:58 + +[source, python] +---- +resp = client.perform_request( + "DELETE", + "/_ingest/ip_location/database/example-database-id", +) +print(resp) +---- diff --git a/docs/examples/9a05cc10eea1251e23b82a4549913536.asciidoc b/docs/examples/9a05cc10eea1251e23b82a4549913536.asciidoc index fde02f78f..fd2de14ab 100644 --- a/docs/examples/9a05cc10eea1251e23b82a4549913536.asciidoc +++ b/docs/examples/9a05cc10eea1251e23b82a4549913536.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/hotspotting.asciidoc:104 +// troubleshooting/common-issues/hotspotting.asciidoc:108 [source, python] ---- diff --git a/docs/examples/9a09d33ec11e20b6081cae882282ca60.asciidoc b/docs/examples/9a09d33ec11e20b6081cae882282ca60.asciidoc index 352c19931..e006ded1c 100644 --- a/docs/examples/9a09d33ec11e20b6081cae882282ca60.asciidoc +++ b/docs/examples/9a09d33ec11e20b6081cae882282ca60.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/clear-privileges-cache.asciidoc:57 +// rest-api/security/clear-privileges-cache.asciidoc:63 [source, python] ---- diff --git a/docs/examples/9a203aae3e1412d919546276fb52a5ca.asciidoc b/docs/examples/9a203aae3e1412d919546276fb52a5ca.asciidoc index 8c93ab505..3d9cb9247 100644 --- a/docs/examples/9a203aae3e1412d919546276fb52a5ca.asciidoc +++ b/docs/examples/9a203aae3e1412d919546276fb52a5ca.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-cohere.asciidoc:190 +// inference/service-cohere.asciidoc:196 [source, python] ---- diff --git a/docs/examples/9a49b7572d571e00e20dbebdd30f9368.asciidoc b/docs/examples/9a49b7572d571e00e20dbebdd30f9368.asciidoc index d5531c3be..5810962a6 100644 --- a/docs/examples/9a49b7572d571e00e20dbebdd30f9368.asciidoc +++ b/docs/examples/9a49b7572d571e00e20dbebdd30f9368.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-vector-tile-api.asciidoc:114 +// search/search-vector-tile-api.asciidoc:119 [source, python] ---- diff --git a/docs/examples/9a4d5e41c52c20635d1fd9c6e13f6c7a.asciidoc b/docs/examples/9a4d5e41c52c20635d1fd9c6e13f6c7a.asciidoc index 5d558d09f..6a952d37e 100644 --- a/docs/examples/9a4d5e41c52c20635d1fd9c6e13f6c7a.asciidoc +++ b/docs/examples/9a4d5e41c52c20635d1fd9c6e13f6c7a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/reindex.asciidoc:827 +// docs/reindex.asciidoc:833 [source, python] ---- diff --git a/docs/examples/9a743b6575c6fe5acdf46024a7fda8a1.asciidoc b/docs/examples/9a743b6575c6fe5acdf46024a7fda8a1.asciidoc index 6cab67aae..435bc627d 100644 --- a/docs/examples/9a743b6575c6fe5acdf46024a7fda8a1.asciidoc +++ b/docs/examples/9a743b6575c6fe5acdf46024a7fda8a1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// scripting/using.asciidoc:537 +// scripting/using.asciidoc:542 [source, python] ---- diff --git a/docs/examples/9ab351893dae65ec97fd8cb6832950fb.asciidoc b/docs/examples/9ab351893dae65ec97fd8cb6832950fb.asciidoc index 6b763a1a6..f3393b1a2 100644 --- a/docs/examples/9ab351893dae65ec97fd8cb6832950fb.asciidoc +++ b/docs/examples/9ab351893dae65ec97fd8cb6832950fb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/knn-search.asciidoc:1124 +// search/search-your-data/knn-search.asciidoc:1288 [source, python] ---- diff --git a/docs/examples/9ad0864bcd665b63551e944653d32423.asciidoc b/docs/examples/9ad0864bcd665b63551e944653d32423.asciidoc index a6d007b6a..9615a7520 100644 --- a/docs/examples/9ad0864bcd665b63551e944653d32423.asciidoc +++ b/docs/examples/9ad0864bcd665b63551e944653d32423.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/semantic-text-hybrid-search:155 +// search/search-your-data/semantic-text-hybrid-search:118 [source, python] ---- diff --git a/docs/examples/9ae268058c0ea32ef8926568e011c728.asciidoc b/docs/examples/9ae268058c0ea32ef8926568e011c728.asciidoc index d885d8879..381b76a4b 100644 --- a/docs/examples/9ae268058c0ea32ef8926568e011c728.asciidoc +++ b/docs/examples/9ae268058c0ea32ef8926568e011c728.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-features-api.asciidoc:123 +// connector/apis/update-connector-features-api.asciidoc:129 [source, python] ---- diff --git a/docs/examples/9aedc45f83e022732789e8d796f5a43c.asciidoc b/docs/examples/9aedc45f83e022732789e8d796f5a43c.asciidoc index 3d8050779..0636b0a05 100644 --- a/docs/examples/9aedc45f83e022732789e8d796f5a43c.asciidoc +++ b/docs/examples/9aedc45f83e022732789e8d796f5a43c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/reroute.asciidoc:195 +// cluster/reroute.asciidoc:200 [source, python] ---- diff --git a/docs/examples/9af44592fb2e78fb17ad3e834bbef7a7.asciidoc b/docs/examples/9af44592fb2e78fb17ad3e834bbef7a7.asciidoc index 5e62697f9..444b4782d 100644 --- a/docs/examples/9af44592fb2e78fb17ad3e834bbef7a7.asciidoc +++ b/docs/examples/9af44592fb2e78fb17ad3e834bbef7a7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/apis/geoip-stats.asciidoc:11 +// ingest/apis/geoip-stats.asciidoc:17 [source, python] ---- diff --git a/docs/examples/9afa0844883b7471883aa378a8dd10b4.asciidoc b/docs/examples/9afa0844883b7471883aa378a8dd10b4.asciidoc index 15d83c81e..b8c42b55b 100644 --- a/docs/examples/9afa0844883b7471883aa378a8dd10b4.asciidoc +++ b/docs/examples/9afa0844883b7471883aa378a8dd10b4.asciidoc @@ -1,11 +1,12 @@ // This file is autogenerated, DO NOT EDIT -// behavioral-analytics/apis/post-analytics-collection-event.asciidoc:69 +// behavioral-analytics/apis/post-analytics-collection-event.asciidoc:75 [source, python] ---- -resp = client.search_application.post_behavioral_analytics_event( - collection_name="my_analytics_collection", - event_type="search_click", +resp = client.perform_request( + "POST", + "/_application/analytics/my_analytics_collection/event/search_click", + headers={"Content-Type": "application/json"}, body={ "session": { "id": "1797ca95-91c9-4e2e-b1bd-9c38e6f386a9" diff --git a/docs/examples/9b0f34d122a4b348dc86df7410d6ebb6.asciidoc b/docs/examples/9b0f34d122a4b348dc86df7410d6ebb6.asciidoc index 34e4efa64..40e47d094 100644 --- a/docs/examples/9b0f34d122a4b348dc86df7410d6ebb6.asciidoc +++ b/docs/examples/9b0f34d122a4b348dc86df7410d6ebb6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/cancel-connector-sync-job-api.asciidoc:51 +// connector/apis/cancel-connector-sync-job-api.asciidoc:57 [source, python] ---- diff --git a/docs/examples/9b30a69fec54cf01f7af1b04a6e15239.asciidoc b/docs/examples/9b30a69fec54cf01f7af1b04a6e15239.asciidoc index 215dcff86..29f4fbd77 100644 --- a/docs/examples/9b30a69fec54cf01f7af1b04a6e15239.asciidoc +++ b/docs/examples/9b30a69fec54cf01f7af1b04a6e15239.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ccr/apis/get-ccr-stats.asciidoc:103 +// ccr/apis/get-ccr-stats.asciidoc:109 [source, python] ---- diff --git a/docs/examples/9b345e0bfd45f3a37194585ec9193478.asciidoc b/docs/examples/9b345e0bfd45f3a37194585ec9193478.asciidoc index 5cab8fba7..d7527c836 100644 --- a/docs/examples/9b345e0bfd45f3a37194585ec9193478.asciidoc +++ b/docs/examples/9b345e0bfd45f3a37194585ec9193478.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/forcemerge.asciidoc:173 +// indices/forcemerge.asciidoc:179 [source, python] ---- diff --git a/docs/examples/9bae72e974bdeb56007d9104e73eff92.asciidoc b/docs/examples/9bae72e974bdeb56007d9104e73eff92.asciidoc index 3651a3480..d59016490 100644 --- a/docs/examples/9bae72e974bdeb56007d9104e73eff92.asciidoc +++ b/docs/examples/9bae72e974bdeb56007d9104e73eff92.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update.asciidoc:182 +// docs/update.asciidoc:188 [source, python] ---- diff --git a/docs/examples/9bb24fe09e3d1c73a71d00b994ba8cfb.asciidoc b/docs/examples/9bb24fe09e3d1c73a71d00b994ba8cfb.asciidoc index 438842e0f..b586d25dc 100644 --- a/docs/examples/9bb24fe09e3d1c73a71d00b994ba8cfb.asciidoc +++ b/docs/examples/9bb24fe09e3d1c73a71d00b994ba8cfb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/shards.asciidoc:343 +// cat/shards.asciidoc:352 [source, python] ---- diff --git a/docs/examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc b/docs/examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc index 95150009b..227cb5a9e 100644 --- a/docs/examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc +++ b/docs/examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc @@ -1,9 +1,12 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/oidc-authenticate-api.asciidoc:68 +// rest-api/security/oidc-authenticate-api.asciidoc:74 [source, python] ---- -resp = client.security.oidc_authenticate( +resp = client.perform_request( + "POST", + "/_security/oidc/authenticate", + headers={"Content-Type": "application/json"}, body={ "redirect_uri": "https://oidc-kibana.elastic.co:5603/api/security/oidc/callback?code=jtI3Ntt8v3_XvcLzCFGq&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", "state": "4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", diff --git a/docs/examples/9c021836acf7c0370e289f611325868d.asciidoc b/docs/examples/9c021836acf7c0370e289f611325868d.asciidoc index 81f2363e6..2f6b15f0c 100644 --- a/docs/examples/9c021836acf7c0370e289f611325868d.asciidoc +++ b/docs/examples/9c021836acf7c0370e289f611325868d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-configuration-api.asciidoc:309 +// connector/apis/update-connector-configuration-api.asciidoc:315 [source, python] ---- diff --git a/docs/examples/9c7c8051592b6af3adb5d7c490849068.asciidoc b/docs/examples/9c7c8051592b6af3adb5d7c490849068.asciidoc index b7dba1f67..d021306cc 100644 --- a/docs/examples/9c7c8051592b6af3adb5d7c490849068.asciidoc +++ b/docs/examples/9c7c8051592b6af3adb5d7c490849068.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/put-datafeed.asciidoc:162 +// ml/anomaly-detection/apis/put-datafeed.asciidoc:168 [source, python] ---- diff --git a/docs/examples/9cbb097e5498a9fde39e3b1d3b62a4d2.asciidoc b/docs/examples/9cbb097e5498a9fde39e3b1d3b62a4d2.asciidoc index 70dfa8833..583aefcb6 100644 --- a/docs/examples/9cbb097e5498a9fde39e3b1d3b62a4d2.asciidoc +++ b/docs/examples/9cbb097e5498a9fde39e3b1d3b62a4d2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/infer-trained-model.asciidoc:1046 +// ml/trained-models/apis/infer-trained-model.asciidoc:1052 [source, python] ---- diff --git a/docs/examples/9cc64ab2f60f995f5dbfaca67aa6dd41.asciidoc b/docs/examples/9cc64ab2f60f995f5dbfaca67aa6dd41.asciidoc index 57fbd39dd..b30c1fe6a 100644 --- a/docs/examples/9cc64ab2f60f995f5dbfaca67aa6dd41.asciidoc +++ b/docs/examples/9cc64ab2f60f995f5dbfaca67aa6dd41.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-query-api.asciidoc:10 +// esql/esql-query-api.asciidoc:16 [source, python] ---- diff --git a/docs/examples/9cc952d4a03264b700136cbc45abc8c6.asciidoc b/docs/examples/9cc952d4a03264b700136cbc45abc8c6.asciidoc new file mode 100644 index 000000000..9bb8f645b --- /dev/null +++ b/docs/examples/9cc952d4a03264b700136cbc45abc8c6.asciidoc @@ -0,0 +1,38 @@ +// This file is autogenerated, DO NOT EDIT +// mapping/types/rank-vectors.asciidoc:42 + +[source, python] +---- +resp = client.indices.create( + index="my-rank-vectors-byte", + mappings={ + "properties": { + "my_vector": { + "type": "rank_vectors", + "element_type": "byte" + } + } + }, +) +print(resp) + +resp1 = client.index( + index="my-rank-vectors-byte", + id="1", + document={ + "my_vector": [ + [ + 1, + 2, + 3 + ], + [ + 4, + 5, + 6 + ] + ] + }, +) +print(resp1) +---- diff --git a/docs/examples/9cd37d0ccbc66ad47ddb626564b27cc8.asciidoc b/docs/examples/9cd37d0ccbc66ad47ddb626564b27cc8.asciidoc index 21eab8e92..3305bc3e2 100644 --- a/docs/examples/9cd37d0ccbc66ad47ddb626564b27cc8.asciidoc +++ b/docs/examples/9cd37d0ccbc66ad47ddb626564b27cc8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/execute-watch.asciidoc:327 +// rest-api/watcher/execute-watch.asciidoc:333 [source, python] ---- diff --git a/docs/examples/9cf6c7012a4f2bb562bc256aa28c3409.asciidoc b/docs/examples/9cf6c7012a4f2bb562bc256aa28c3409.asciidoc index bcc021bbb..f03f6e8ad 100644 --- a/docs/examples/9cf6c7012a4f2bb562bc256aa28c3409.asciidoc +++ b/docs/examples/9cf6c7012a4f2bb562bc256aa28c3409.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/execute-watch.asciidoc:314 +// rest-api/watcher/execute-watch.asciidoc:320 [source, python] ---- diff --git a/docs/examples/9d66cb59711f24e6b4ff85608c9b5a1b.asciidoc b/docs/examples/9d66cb59711f24e6b4ff85608c9b5a1b.asciidoc index dfe0eb762..46b5bc2ff 100644 --- a/docs/examples/9d66cb59711f24e6b4ff85608c9b5a1b.asciidoc +++ b/docs/examples/9d66cb59711f24e6b4ff85608c9b5a1b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/task-queue-backlog.asciidoc:51 +// troubleshooting/common-issues/task-queue-backlog.asciidoc:73 [source, python] ---- diff --git a/docs/examples/9d79645ab3a9da3f63c54a1516214a5a.asciidoc b/docs/examples/9d79645ab3a9da3f63c54a1516214a5a.asciidoc index b6f73d2cb..1895c27ca 100644 --- a/docs/examples/9d79645ab3a9da3f63c54a1516214a5a.asciidoc +++ b/docs/examples/9d79645ab3a9da3f63c54a1516214a5a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// health/health.asciidoc:465 +// health/health.asciidoc:471 [source, python] ---- diff --git a/docs/examples/9e563b8d5a7845f644db8d5bbf453eb6.asciidoc b/docs/examples/9e563b8d5a7845f644db8d5bbf453eb6.asciidoc index d1b60ef2e..63cadd7c0 100644 --- a/docs/examples/9e563b8d5a7845f644db8d5bbf453eb6.asciidoc +++ b/docs/examples/9e563b8d5a7845f644db8d5bbf453eb6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// synonyms/apis/put-synonyms-set.asciidoc:61 +// synonyms/apis/put-synonyms-set.asciidoc:67 [source, python] ---- diff --git a/docs/examples/9e9717d9108ae1425bfacf71c7c44539.asciidoc b/docs/examples/9e9717d9108ae1425bfacf71c7c44539.asciidoc index fc588c589..db6f3e51e 100644 --- a/docs/examples/9e9717d9108ae1425bfacf71c7c44539.asciidoc +++ b/docs/examples/9e9717d9108ae1425bfacf71c7c44539.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat.asciidoc:121 +// cat.asciidoc:127 [source, python] ---- diff --git a/docs/examples/9f99be2d58c48a6bf8e892aa24604197.asciidoc b/docs/examples/9f99be2d58c48a6bf8e892aa24604197.asciidoc index 37a5c6840..cd8037882 100644 --- a/docs/examples/9f99be2d58c48a6bf8e892aa24604197.asciidoc +++ b/docs/examples/9f99be2d58c48a6bf8e892aa24604197.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/df-analytics/apis/update-dfanalytics.asciidoc:92 +// ml/df-analytics/apis/update-dfanalytics.asciidoc:98 [source, python] ---- diff --git a/docs/examples/9fda516a5dc60ba477b970eaad4429db.asciidoc b/docs/examples/9fda516a5dc60ba477b970eaad4429db.asciidoc index c7e75d94a..293ced05a 100644 --- a/docs/examples/9fda516a5dc60ba477b970eaad4429db.asciidoc +++ b/docs/examples/9fda516a5dc60ba477b970eaad4429db.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/lifecycle/apis/get-lifecycle.asciidoc:142 +// data-streams/lifecycle/apis/get-lifecycle.asciidoc:148 [source, python] ---- diff --git a/docs/examples/9feff356f302ea4915347ab71cc4887a.asciidoc b/docs/examples/9feff356f302ea4915347ab71cc4887a.asciidoc index 31a0cbc95..d9508e5ae 100644 --- a/docs/examples/9feff356f302ea4915347ab71cc4887a.asciidoc +++ b/docs/examples/9feff356f302ea4915347ab71cc4887a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/simulate-template.asciidoc:235 +// indices/simulate-template.asciidoc:241 [source, python] ---- diff --git a/docs/examples/9ff9b2a73419a6c82f17a358b4991499.asciidoc b/docs/examples/9ff9b2a73419a6c82f17a358b4991499.asciidoc index 6a64b2870..01ddcfd66 100644 --- a/docs/examples/9ff9b2a73419a6c82f17a358b4991499.asciidoc +++ b/docs/examples/9ff9b2a73419a6c82f17a358b4991499.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/point-in-time-api.asciidoc:159 +// search/point-in-time-api.asciidoc:165 [source, python] ---- diff --git a/docs/examples/a00311843b5f8f3e9f7d511334a828b1.asciidoc b/docs/examples/a00311843b5f8f3e9f7d511334a828b1.asciidoc index 21debb682..5802d6251 100644 --- a/docs/examples/a00311843b5f8f3e9f7d511334a828b1.asciidoc +++ b/docs/examples/a00311843b5f8f3e9f7d511334a828b1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/rollup-caps.asciidoc:91 +// rollup/apis/rollup-caps.asciidoc:97 [source, python] ---- diff --git a/docs/examples/a0f4e902d18460337684d74ea932fbe9.asciidoc b/docs/examples/a0f4e902d18460337684d74ea932fbe9.asciidoc index 80ed4b788..3865fe048 100644 --- a/docs/examples/a0f4e902d18460337684d74ea932fbe9.asciidoc +++ b/docs/examples/a0f4e902d18460337684d74ea932fbe9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update.asciidoc:257 +// docs/update.asciidoc:263 [source, python] ---- diff --git a/docs/examples/a159e1ce0cba7a35ce44db9bebad22f3.asciidoc b/docs/examples/a159e1ce0cba7a35ce44db9bebad22f3.asciidoc index 79193519b..4690f73d9 100644 --- a/docs/examples/a159e1ce0cba7a35ce44db9bebad22f3.asciidoc +++ b/docs/examples/a159e1ce0cba7a35ce44db9bebad22f3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// slm/apis/slm-get.asciidoc:126 +// slm/apis/slm-get.asciidoc:132 [source, python] ---- diff --git a/docs/examples/a162eb50853331c80596f5994e9d1c38.asciidoc b/docs/examples/a162eb50853331c80596f5994e9d1c38.asciidoc index 2866e98b4..f8772be64 100644 --- a/docs/examples/a162eb50853331c80596f5994e9d1c38.asciidoc +++ b/docs/examples/a162eb50853331c80596f5994e9d1c38.asciidoc @@ -3,8 +3,10 @@ [source, python] ---- -resp = client.search_application.render_query( - name="my_search_application", +resp = client.perform_request( + "POST", + "/_application/search_application/my_search_application/_render_query", + headers={"Content-Type": "application/json"}, body={ "params": { "query_string": "rock climbing" diff --git a/docs/examples/a1d0603b24a5b048f0959975d8057534.asciidoc b/docs/examples/a1d0603b24a5b048f0959975d8057534.asciidoc index 750fec900..840358da8 100644 --- a/docs/examples/a1d0603b24a5b048f0959975d8057534.asciidoc +++ b/docs/examples/a1d0603b24a5b048f0959975d8057534.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/termvectors.asciidoc:354 +// docs/termvectors.asciidoc:360 [source, python] ---- diff --git a/docs/examples/a1dda7e7c01be96a4acf7b725d70385f.asciidoc b/docs/examples/a1dda7e7c01be96a4acf7b725d70385f.asciidoc index a603b404b..743681aa0 100644 --- a/docs/examples/a1dda7e7c01be96a4acf7b725d70385f.asciidoc +++ b/docs/examples/a1dda7e7c01be96a4acf7b725d70385f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:448 +// search/retriever.asciidoc:639 [source, python] ---- diff --git a/docs/examples/a1f70bc71b763b58206814c40a7440e7.asciidoc b/docs/examples/a1f70bc71b763b58206814c40a7440e7.asciidoc index 88c3ac6fb..103eacdfc 100644 --- a/docs/examples/a1f70bc71b763b58206814c40a7440e7.asciidoc +++ b/docs/examples/a1f70bc71b763b58206814c40a7440e7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/update-settings.asciidoc:41 +// rest-api/watcher/update-settings.asciidoc:47 [source, python] ---- diff --git a/docs/examples/a2b2ce031120dac49b5120b26eea8758.asciidoc b/docs/examples/a2b2ce031120dac49b5120b26eea8758.asciidoc index 4a33279d1..0612348a0 100644 --- a/docs/examples/a2b2ce031120dac49b5120b26eea8758.asciidoc +++ b/docs/examples/a2b2ce031120dac49b5120b26eea8758.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/indices.asciidoc:113 +// cat/indices.asciidoc:119 [source, python] ---- diff --git a/docs/examples/a2c3e284354e8d49cf51bb8dd5ef3613.asciidoc b/docs/examples/a2c3e284354e8d49cf51bb8dd5ef3613.asciidoc index 90db588fa..fcf84bb43 100644 --- a/docs/examples/a2c3e284354e8d49cf51bb8dd5ef3613.asciidoc +++ b/docs/examples/a2c3e284354e8d49cf51bb8dd5ef3613.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// transform/apis/upgrade-transforms.asciidoc:97 +// transform/apis/upgrade-transforms.asciidoc:103 [source, python] ---- diff --git a/docs/examples/a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc b/docs/examples/a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc index 67acc59e9..7d18ab764 100644 --- a/docs/examples/a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc +++ b/docs/examples/a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:144 +// search/retriever.asciidoc:154 [source, python] ---- diff --git a/docs/examples/a3a2856ac2338a624a1fa5f31aec4db4.asciidoc b/docs/examples/a3a2856ac2338a624a1fa5f31aec4db4.asciidoc index 638ba69a9..193ddffd0 100644 --- a/docs/examples/a3a2856ac2338a624a1fa5f31aec4db4.asciidoc +++ b/docs/examples/a3a2856ac2338a624a1fa5f31aec4db4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-api-keys.asciidoc:92 +// rest-api/security/get-api-keys.asciidoc:98 [source, python] ---- diff --git a/docs/examples/a3a64d568fe93a22b042a8b31b9905b0.asciidoc b/docs/examples/a3a64d568fe93a22b042a8b31b9905b0.asciidoc index f03b8da79..2a51f08db 100644 --- a/docs/examples/a3a64d568fe93a22b042a8b31b9905b0.asciidoc +++ b/docs/examples/a3a64d568fe93a22b042a8b31b9905b0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/apis/simulate-pipeline.asciidoc:303 +// ingest/apis/simulate-pipeline.asciidoc:309 [source, python] ---- diff --git a/docs/examples/a3ce0cfe2176f3d8a36959a5916995f0.asciidoc b/docs/examples/a3ce0cfe2176f3d8a36959a5916995f0.asciidoc index d332c6b9c..c48e04756 100644 --- a/docs/examples/a3ce0cfe2176f3d8a36959a5916995f0.asciidoc +++ b/docs/examples/a3ce0cfe2176f3d8a36959a5916995f0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/tasks.asciidoc:277 +// cluster/tasks.asciidoc:283 [source, python] ---- diff --git a/docs/examples/a3cfd350c73a104b99a998c6be931408.asciidoc b/docs/examples/a3cfd350c73a104b99a998c6be931408.asciidoc index b2548ae41..35bf10f1f 100644 --- a/docs/examples/a3cfd350c73a104b99a998c6be931408.asciidoc +++ b/docs/examples/a3cfd350c73a104b99a998c6be931408.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/state.asciidoc:158 +// cluster/state.asciidoc:164 [source, python] ---- diff --git a/docs/examples/a3d943ac9d45b4eff4aa0c679b4eceb3.asciidoc b/docs/examples/a3d943ac9d45b4eff4aa0c679b4eceb3.asciidoc index 9ac1d1d70..2969b0a95 100644 --- a/docs/examples/a3d943ac9d45b4eff4aa0c679b4eceb3.asciidoc +++ b/docs/examples/a3d943ac9d45b4eff4aa0c679b4eceb3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/dangling-index-import.asciidoc:13 +// indices/dangling-index-import.asciidoc:19 [source, python] ---- diff --git a/docs/examples/a3e79d6c626a490341c5b731acbb4a5d.asciidoc b/docs/examples/a3e79d6c626a490341c5b731acbb4a5d.asciidoc index 9c4fef882..8694e9d34 100644 --- a/docs/examples/a3e79d6c626a490341c5b731acbb4a5d.asciidoc +++ b/docs/examples/a3e79d6c626a490341c5b731acbb4a5d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/get.asciidoc:307 +// docs/get.asciidoc:313 [source, python] ---- diff --git a/docs/examples/a45605347d6438e7aecdf3b37198616d.asciidoc b/docs/examples/a45605347d6438e7aecdf3b37198616d.asciidoc index 7f083b5a2..e9fbcec25 100644 --- a/docs/examples/a45605347d6438e7aecdf3b37198616d.asciidoc +++ b/docs/examples/a45605347d6438e7aecdf3b37198616d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ilm/apis/move-to-step.asciidoc:150 +// ilm/apis/move-to-step.asciidoc:156 [source, python] ---- diff --git a/docs/examples/a4a3c3cd09efa75168dab90105afb2e9.asciidoc b/docs/examples/a4a3c3cd09efa75168dab90105afb2e9.asciidoc index bcd4b081f..fd57a63b9 100644 --- a/docs/examples/a4a3c3cd09efa75168dab90105afb2e9.asciidoc +++ b/docs/examples/a4a3c3cd09efa75168dab90105afb2e9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/get-inference.asciidoc:68 +// inference/get-inference.asciidoc:74 [source, python] ---- diff --git a/docs/examples/a520168c1c8b454a8f102d6a13027c73.asciidoc b/docs/examples/a520168c1c8b454a8f102d6a13027c73.asciidoc index 41527cb83..51a025513 100644 --- a/docs/examples/a520168c1c8b454a8f102d6a13027c73.asciidoc +++ b/docs/examples/a520168c1c8b454a8f102d6a13027c73.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ccr/apis/follow/get-follow-info.asciidoc:143 +// ccr/apis/follow/get-follow-info.asciidoc:149 [source, python] ---- diff --git a/docs/examples/a547bb926c25f670078b98fbe67de3cc.asciidoc b/docs/examples/a547bb926c25f670078b98fbe67de3cc.asciidoc index 335b78c15..a98628f53 100644 --- a/docs/examples/a547bb926c25f670078b98fbe67de3cc.asciidoc +++ b/docs/examples/a547bb926c25f670078b98fbe67de3cc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// synonyms/apis/delete-synonym-rule.asciidoc:103 +// synonyms/apis/delete-synonym-rule.asciidoc:108 [source, python] ---- diff --git a/docs/examples/a5dfcfd1cfb3558e7912456669c92eee.asciidoc b/docs/examples/a5dfcfd1cfb3558e7912456669c92eee.asciidoc index c1c77e7c9..f9c66d797 100644 --- a/docs/examples/a5dfcfd1cfb3558e7912456669c92eee.asciidoc +++ b/docs/examples/a5dfcfd1cfb3558e7912456669c92eee.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/saml-prepare-authentication-api.asciidoc:79 +// rest-api/security/saml-prepare-authentication-api.asciidoc:85 [source, python] ---- diff --git a/docs/examples/a5e2b3588258430f2e595abda98e3943.asciidoc b/docs/examples/a5e2b3588258430f2e595abda98e3943.asciidoc index c3786c06d..dde1af6c9 100644 --- a/docs/examples/a5e2b3588258430f2e595abda98e3943.asciidoc +++ b/docs/examples/a5e2b3588258430f2e595abda98e3943.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/clear-cache.asciidoc:54 +// rest-api/security/clear-cache.asciidoc:60 [source, python] ---- diff --git a/docs/examples/a5e793d82a4455cf4105dac82a156617.asciidoc b/docs/examples/a5e793d82a4455cf4105dac82a156617.asciidoc index 36414e53b..001ced277 100644 --- a/docs/examples/a5e793d82a4455cf4105dac82a156617.asciidoc +++ b/docs/examples/a5e793d82a4455cf4105dac82a156617.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/validate.asciidoc:208 +// search/validate.asciidoc:214 [source, python] ---- diff --git a/docs/examples/a5f9eb40087921e67d820775acf71522.asciidoc b/docs/examples/a5f9eb40087921e67d820775acf71522.asciidoc index 77b52e796..c85c3543d 100644 --- a/docs/examples/a5f9eb40087921e67d820775acf71522.asciidoc +++ b/docs/examples/a5f9eb40087921e67d820775acf71522.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:212 +// indices/put-mapping.asciidoc:218 [source, python] ---- diff --git a/docs/examples/a6204edaa0bcf7b82a89ab4f6bda0914.asciidoc b/docs/examples/a6204edaa0bcf7b82a89ab4f6bda0914.asciidoc index b1ada6b8c..97ef9d659 100644 --- a/docs/examples/a6204edaa0bcf7b82a89ab4f6bda0914.asciidoc +++ b/docs/examples/a6204edaa0bcf7b82a89ab4f6bda0914.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/open-job.asciidoc:68 +// ml/anomaly-detection/apis/open-job.asciidoc:74 [source, python] ---- diff --git a/docs/examples/a692b4c0ca7825c467880b346841f5a5.asciidoc b/docs/examples/a692b4c0ca7825c467880b346841f5a5.asciidoc index 24eacbc92..4069ff6d3 100644 --- a/docs/examples/a692b4c0ca7825c467880b346841f5a5.asciidoc +++ b/docs/examples/a692b4c0ca7825c467880b346841f5a5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:156 +// indices/put-mapping.asciidoc:162 [source, python] ---- diff --git a/docs/examples/a69b1ce5cc9528fb3639185eaf241ae3.asciidoc b/docs/examples/a69b1ce5cc9528fb3639185eaf241ae3.asciidoc index ef8c2ddb1..d143f7f3c 100644 --- a/docs/examples/a69b1ce5cc9528fb3639185eaf241ae3.asciidoc +++ b/docs/examples/a69b1ce5cc9528fb3639185eaf241ae3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/clear-scroll-api.asciidoc:25 +// search/clear-scroll-api.asciidoc:31 [source, python] ---- diff --git a/docs/examples/a6bb306ca250cf651f19cae808b97012.asciidoc b/docs/examples/a6bb306ca250cf651f19cae808b97012.asciidoc index 9358bbc19..a0f30908c 100644 --- a/docs/examples/a6bb306ca250cf651f19cae808b97012.asciidoc +++ b/docs/examples/a6bb306ca250cf651f19cae808b97012.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-index.asciidoc:11 +// indices/get-index.asciidoc:17 [source, python] ---- diff --git a/docs/examples/a6be6c1cb4a556866fdccb0dee2f1dea.asciidoc b/docs/examples/a6be6c1cb4a556866fdccb0dee2f1dea.asciidoc index f4930b061..6ca864c52 100644 --- a/docs/examples/a6be6c1cb4a556866fdccb0dee2f1dea.asciidoc +++ b/docs/examples/a6be6c1cb4a556866fdccb0dee2f1dea.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/index-template-exists-v1.asciidoc:17 +// indices/index-template-exists-v1.asciidoc:23 [source, python] ---- diff --git a/docs/examples/a6fdd0100cd362df54af6c95d1055c96.asciidoc b/docs/examples/a6fdd0100cd362df54af6c95d1055c96.asciidoc index fe4ad1147..80137273d 100644 --- a/docs/examples/a6fdd0100cd362df54af6c95d1055c96.asciidoc +++ b/docs/examples/a6fdd0100cd362df54af6c95d1055c96.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-mapping.asciidoc:11 +// indices/get-mapping.asciidoc:17 [source, python] ---- diff --git a/docs/examples/a72613de3774571ba24def4b495161b5.asciidoc b/docs/examples/a72613de3774571ba24def4b495161b5.asciidoc index 1742374e6..790292f7e 100644 --- a/docs/examples/a72613de3774571ba24def4b495161b5.asciidoc +++ b/docs/examples/a72613de3774571ba24def4b495161b5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:422 +// indices/put-mapping.asciidoc:428 [source, python] ---- diff --git a/docs/examples/a75765e3fb130421dde6c3c2f12e8acb.asciidoc b/docs/examples/a75765e3fb130421dde6c3c2f12e8acb.asciidoc index aff792fea..34aaf2c4c 100644 --- a/docs/examples/a75765e3fb130421dde6c3c2f12e8acb.asciidoc +++ b/docs/examples/a75765e3fb130421dde6c3c2f12e8acb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/claim-connector-sync-job-api.asciidoc:63 +// connector/apis/claim-connector-sync-job-api.asciidoc:69 [source, python] ---- diff --git a/docs/examples/a78dfb844d385405d4b0fb0e09b4a5a4.asciidoc b/docs/examples/a78dfb844d385405d4b0fb0e09b4a5a4.asciidoc index 93e49ac7c..470fb6c3a 100644 --- a/docs/examples/a78dfb844d385405d4b0fb0e09b4a5a4.asciidoc +++ b/docs/examples/a78dfb844d385405d4b0fb0e09b4a5a4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update.asciidoc:205 +// docs/update.asciidoc:211 [source, python] ---- diff --git a/docs/examples/a7cf31f4b907e4c00132aca75f55790c.asciidoc b/docs/examples/a7cf31f4b907e4c00132aca75f55790c.asciidoc index 189272ce5..8638b6fa9 100644 --- a/docs/examples/a7cf31f4b907e4c00132aca75f55790c.asciidoc +++ b/docs/examples/a7cf31f4b907e4c00132aca75f55790c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/apis/delete-pipeline.asciidoc:73 +// ingest/apis/delete-pipeline.asciidoc:79 [source, python] ---- diff --git a/docs/examples/a811b82ba4632bdd9065829085188bc9.asciidoc b/docs/examples/a811b82ba4632bdd9065829085188bc9.asciidoc index ad8fa8a52..109001514 100644 --- a/docs/examples/a811b82ba4632bdd9065829085188bc9.asciidoc +++ b/docs/examples/a811b82ba4632bdd9065829085188bc9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/get-snapshot-api.asciidoc:44 +// snapshot-restore/apis/get-snapshot-api.asciidoc:50 [source, python] ---- diff --git a/docs/examples/a861a89f52008610e813b9f073951c58.asciidoc b/docs/examples/a861a89f52008610e813b9f073951c58.asciidoc index 7d9b3e32a..6cf9dfbf6 100644 --- a/docs/examples/a861a89f52008610e813b9f073951c58.asciidoc +++ b/docs/examples/a861a89f52008610e813b9f073951c58.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/stats.asciidoc:129 +// indices/stats.asciidoc:135 [source, python] ---- diff --git a/docs/examples/a960b43e720b4934edb74ab4b085ca77.asciidoc b/docs/examples/a960b43e720b4934edb74ab4b085ca77.asciidoc index d55601076..b48cffe85 100644 --- a/docs/examples/a960b43e720b4934edb74ab4b085ca77.asciidoc +++ b/docs/examples/a960b43e720b4934edb74ab4b085ca77.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/list-connectors-api.asciidoc:79 +// connector/apis/list-connectors-api.asciidoc:88 [source, python] ---- diff --git a/docs/examples/a985e6b7b2ead9c3f30a9bc97d8b598e.asciidoc b/docs/examples/a985e6b7b2ead9c3f30a9bc97d8b598e.asciidoc index a03a4fe75..11daa71b0 100644 --- a/docs/examples/a985e6b7b2ead9c3f30a9bc97d8b598e.asciidoc +++ b/docs/examples/a985e6b7b2ead9c3f30a9bc97d8b598e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/field-caps.asciidoc:196 +// search/field-caps.asciidoc:201 [source, python] ---- diff --git a/docs/examples/a98692a565904ec0783884d81a7b71fc.asciidoc b/docs/examples/a98692a565904ec0783884d81a7b71fc.asciidoc index d28575ab0..35ca56e9a 100644 --- a/docs/examples/a98692a565904ec0783884d81a7b71fc.asciidoc +++ b/docs/examples/a98692a565904ec0783884d81a7b71fc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/health.asciidoc:81 +// cat/health.asciidoc:87 [source, python] ---- diff --git a/docs/examples/a9c08023354aa9b9023807962df71d13.asciidoc b/docs/examples/a9c08023354aa9b9023807962df71d13.asciidoc index b80b858fe..9e7df3d34 100644 --- a/docs/examples/a9c08023354aa9b9023807962df71d13.asciidoc +++ b/docs/examples/a9c08023354aa9b9023807962df71d13.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/forcemerge.asciidoc:183 +// indices/forcemerge.asciidoc:189 [source, python] ---- diff --git a/docs/examples/a9dd9595e96c307b8c798beaeb571521.asciidoc b/docs/examples/a9dd9595e96c307b8c798beaeb571521.asciidoc index dd6329c4f..25524cba2 100644 --- a/docs/examples/a9dd9595e96c307b8c798beaeb571521.asciidoc +++ b/docs/examples/a9dd9595e96c307b8c798beaeb571521.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/upgrade-job-model-snapshot.asciidoc:77 +// ml/anomaly-detection/apis/upgrade-job-model-snapshot.asciidoc:83 [source, python] ---- diff --git a/docs/examples/a9f14efc26fdd3c37a71f06c310163d9.asciidoc b/docs/examples/a9f14efc26fdd3c37a71f06c310163d9.asciidoc new file mode 100644 index 000000000..e3f0a60c7 --- /dev/null +++ b/docs/examples/a9f14efc26fdd3c37a71f06c310163d9.asciidoc @@ -0,0 +1,27 @@ +// This file is autogenerated, DO NOT EDIT +// search/retriever.asciidoc:605 + +[source, python] +---- +resp = client.search( + retriever={ + "text_similarity_reranker": { + "retriever": { + "standard": { + "query": { + "match": { + "text": "How often does the moon hide the sun?" + } + } + } + }, + "field": "text", + "inference_id": "my-elastic-rerank", + "inference_text": "How often does the moon hide the sun?", + "rank_window_size": 100, + "min_score": 0.5 + } + }, +) +print(resp) +---- diff --git a/docs/examples/a9fe70387d9c96a07830e1859c57efbb.asciidoc b/docs/examples/a9fe70387d9c96a07830e1859c57efbb.asciidoc index 013117b0e..fd3951ad4 100644 --- a/docs/examples/a9fe70387d9c96a07830e1859c57efbb.asciidoc +++ b/docs/examples/a9fe70387d9c96a07830e1859c57efbb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/create-index.asciidoc:148 +// indices/create-index.asciidoc:154 [source, python] ---- diff --git a/docs/examples/aa6282d4bc92c753c4bd7a5b166abece.asciidoc b/docs/examples/aa6282d4bc92c753c4bd7a5b166abece.asciidoc index 10ebcd52e..2bde61912 100644 --- a/docs/examples/aa6282d4bc92c753c4bd7a5b166abece.asciidoc +++ b/docs/examples/aa6282d4bc92c753c4bd7a5b166abece.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/start-trained-model-deployment.asciidoc:160 +// ml/trained-models/apis/start-trained-model-deployment.asciidoc:166 [source, python] ---- diff --git a/docs/examples/aa676d54a59dee87ecd28bcc1edce59b.asciidoc b/docs/examples/aa676d54a59dee87ecd28bcc1edce59b.asciidoc index 1e934487f..d6bbbd75a 100644 --- a/docs/examples/aa676d54a59dee87ecd28bcc1edce59b.asciidoc +++ b/docs/examples/aa676d54a59dee87ecd28bcc1edce59b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-alibabacloud-ai-search.asciidoc:186 +// inference/service-alibabacloud-ai-search.asciidoc:192 [source, python] ---- diff --git a/docs/examples/aa7cf5df36b867aee5e3314ac4b4fa68.asciidoc b/docs/examples/aa7cf5df36b867aee5e3314ac4b4fa68.asciidoc index 9b46dc0dd..2008fab55 100644 --- a/docs/examples/aa7cf5df36b867aee5e3314ac4b4fa68.asciidoc +++ b/docs/examples/aa7cf5df36b867aee5e3314ac4b4fa68.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// slm/apis/slm-put.asciidoc:119 +// slm/apis/slm-put.asciidoc:124 [source, python] ---- diff --git a/docs/examples/aa7f62279b487989440d423c1ed4a1c0.asciidoc b/docs/examples/aa7f62279b487989440d423c1ed4a1c0.asciidoc index 3a7f885af..d54d81586 100644 --- a/docs/examples/aa7f62279b487989440d423c1ed4a1c0.asciidoc +++ b/docs/examples/aa7f62279b487989440d423c1ed4a1c0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/restore-snapshot-api.asciidoc:88 +// snapshot-restore/apis/restore-snapshot-api.asciidoc:94 [source, python] ---- diff --git a/docs/examples/aa814309ad5f1630886ba75255b444f5.asciidoc b/docs/examples/aa814309ad5f1630886ba75255b444f5.asciidoc index d542f9477..9ced73fdc 100644 --- a/docs/examples/aa814309ad5f1630886ba75255b444f5.asciidoc +++ b/docs/examples/aa814309ad5f1630886ba75255b444f5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/task-queue-backlog.asciidoc:80 +// troubleshooting/common-issues/task-queue-backlog.asciidoc:104 [source, python] ---- diff --git a/docs/examples/aab3de5a8a3fefbe012fc2ed50dfe4d6.asciidoc b/docs/examples/aab3de5a8a3fefbe012fc2ed50dfe4d6.asciidoc index d84bb077f..c991347e3 100644 --- a/docs/examples/aab3de5a8a3fefbe012fc2ed50dfe4d6.asciidoc +++ b/docs/examples/aab3de5a8a3fefbe012fc2ed50dfe4d6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// searchable-snapshots/apis/node-cache-stats.asciidoc:96 +// searchable-snapshots/apis/node-cache-stats.asciidoc:102 [source, python] ---- diff --git a/docs/examples/aaba346e0becdf12db13658296e0b8a1.asciidoc b/docs/examples/aaba346e0becdf12db13658296e0b8a1.asciidoc index 7a2fb484b..1969a302d 100644 --- a/docs/examples/aaba346e0becdf12db13658296e0b8a1.asciidoc +++ b/docs/examples/aaba346e0becdf12db13658296e0b8a1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ilm/error-handling.asciidoc:41 +// ilm/error-handling.asciidoc:42 [source, python] ---- diff --git a/docs/examples/ab8b4537fad80107bc88f633d4039a52.asciidoc b/docs/examples/ab8b4537fad80107bc88f633d4039a52.asciidoc index 74d6fa5df..f1e5f2da2 100644 --- a/docs/examples/ab8b4537fad80107bc88f633d4039a52.asciidoc +++ b/docs/examples/ab8b4537fad80107bc88f633d4039a52.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/create-index.asciidoc:210 +// indices/create-index.asciidoc:216 [source, python] ---- diff --git a/docs/examples/ab8de34fcfc0277901cb39618ecfc9d5.asciidoc b/docs/examples/ab8de34fcfc0277901cb39618ecfc9d5.asciidoc index 9b2ff6473..e3ea8cd47 100644 --- a/docs/examples/ab8de34fcfc0277901cb39618ecfc9d5.asciidoc +++ b/docs/examples/ab8de34fcfc0277901cb39618ecfc9d5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/allocation-explain.asciidoc:102 +// cluster/allocation-explain.asciidoc:108 [source, python] ---- diff --git a/docs/examples/abd4fc3ce7784413a56fe2dcfe2809b5.asciidoc b/docs/examples/abd4fc3ce7784413a56fe2dcfe2809b5.asciidoc index 94b61315a..2c6b64a28 100644 --- a/docs/examples/abd4fc3ce7784413a56fe2dcfe2809b5.asciidoc +++ b/docs/examples/abd4fc3ce7784413a56fe2dcfe2809b5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update-by-query.asciidoc:748 +// docs/update-by-query.asciidoc:754 [source, python] ---- diff --git a/docs/examples/abdbc81e799e28c833556b1c29f03ba6.asciidoc b/docs/examples/abdbc81e799e28c833556b1c29f03ba6.asciidoc index e2b8bbbe6..a71cdd46d 100644 --- a/docs/examples/abdbc81e799e28c833556b1c29f03ba6.asciidoc +++ b/docs/examples/abdbc81e799e28c833556b1c29f03ba6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-users.asciidoc:113 +// rest-api/security/get-users.asciidoc:118 [source, python] ---- diff --git a/docs/examples/ac22cc2b0f4ad659055feed2852a2d59.asciidoc b/docs/examples/ac22cc2b0f4ad659055feed2852a2d59.asciidoc new file mode 100644 index 000000000..2023f2b91 --- /dev/null +++ b/docs/examples/ac22cc2b0f4ad659055feed2852a2d59.asciidoc @@ -0,0 +1,39 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/retrievers-examples.asciidoc:1235 + +[source, python] +---- +resp = client.search( + index="retrievers_example", + retriever={ + "text_similarity_reranker": { + "retriever": { + "text_similarity_reranker": { + "retriever": { + "knn": { + "field": "vector", + "query_vector": [ + 0.23, + 0.67, + 0.89 + ], + "k": 3, + "num_candidates": 5 + } + }, + "rank_window_size": 100, + "field": "text", + "inference_id": "my-rerank-model", + "inference_text": "What are the state of the art applications of AI in information retrieval?" + } + }, + "rank_window_size": 10, + "field": "text", + "inference_id": "my-other-more-expensive-rerank-model", + "inference_text": "Applications of Large Language Models in technology and their impact on user satisfaction" + } + }, + source=False, +) +print(resp) +---- diff --git a/docs/examples/ac497917ef707538198a8458ae3d5c6b.asciidoc b/docs/examples/ac497917ef707538198a8458ae3d5c6b.asciidoc index 63187d6b3..d0c229186 100644 --- a/docs/examples/ac497917ef707538198a8458ae3d5c6b.asciidoc +++ b/docs/examples/ac497917ef707538198a8458ae3d5c6b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/match-query.asciidoc:161 +// query-dsl/match-query.asciidoc:165 [source, python] ---- diff --git a/docs/examples/ac85e05c0bf2fd5099fbcb9c492f447e.asciidoc b/docs/examples/ac85e05c0bf2fd5099fbcb9c492f447e.asciidoc index b53368172..ffc3002f0 100644 --- a/docs/examples/ac85e05c0bf2fd5099fbcb9c492f447e.asciidoc +++ b/docs/examples/ac85e05c0bf2fd5099fbcb9c492f447e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/update-settings.asciidoc:68 +// cluster/update-settings.asciidoc:73 [source, python] ---- diff --git a/docs/examples/acb850c08f51226eadb75be09e336076.asciidoc b/docs/examples/acb850c08f51226eadb75be09e336076.asciidoc index 959f8d18b..8eb4dcb33 100644 --- a/docs/examples/acb850c08f51226eadb75be09e336076.asciidoc +++ b/docs/examples/acb850c08f51226eadb75be09e336076.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/async-search.asciidoc:253 +// search/async-search.asciidoc:259 [source, python] ---- diff --git a/docs/examples/ad2b8aed84c67cdc295917b47a12d3dc.asciidoc b/docs/examples/ad2b8aed84c67cdc295917b47a12d3dc.asciidoc index 3744722f8..ed67692fc 100644 --- a/docs/examples/ad2b8aed84c67cdc295917b47a12d3dc.asciidoc +++ b/docs/examples/ad2b8aed84c67cdc295917b47a12d3dc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/knn-query.asciidoc:42 +// query-dsl/knn-query.asciidoc:43 [source, python] ---- diff --git a/docs/examples/ad3b159657d4bcb373623fdc61acc3bf.asciidoc b/docs/examples/ad3b159657d4bcb373623fdc61acc3bf.asciidoc index e6101c4b9..82e77bf11 100644 --- a/docs/examples/ad3b159657d4bcb373623fdc61acc3bf.asciidoc +++ b/docs/examples/ad3b159657d4bcb373623fdc61acc3bf.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/count.asciidoc:10 +// search/count.asciidoc:16 [source, python] ---- diff --git a/docs/examples/ad88e46bb06739991498dee248850223.asciidoc b/docs/examples/ad88e46bb06739991498dee248850223.asciidoc index 7b3e08bcb..83c9b17fd 100644 --- a/docs/examples/ad88e46bb06739991498dee248850223.asciidoc +++ b/docs/examples/ad88e46bb06739991498dee248850223.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/thread_pool.asciidoc:136 +// cat/thread_pool.asciidoc:142 [source, python] ---- diff --git a/docs/examples/ad92a1a8bb1b0f26d1536fe8ba4ffd17.asciidoc b/docs/examples/ad92a1a8bb1b0f26d1536fe8ba4ffd17.asciidoc index dd83f695e..4f80d30ef 100644 --- a/docs/examples/ad92a1a8bb1b0f26d1536fe8ba4ffd17.asciidoc +++ b/docs/examples/ad92a1a8bb1b0f26d1536fe8ba4ffd17.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/render-search-template-api.asciidoc:33 +// search/render-search-template-api.asciidoc:39 [source, python] ---- diff --git a/docs/examples/7b9691bd34a02dd859562eb927f175e0.asciidoc b/docs/examples/ad9889fd8a4b5930e312a51f3bc996dc.asciidoc similarity index 83% rename from docs/examples/7b9691bd34a02dd859562eb927f175e0.asciidoc rename to docs/examples/ad9889fd8a4b5930e312a51f3bc996dc.asciidoc index 0fd593aaf..1a8d6ba81 100644 --- a/docs/examples/7b9691bd34a02dd859562eb927f175e0.asciidoc +++ b/docs/examples/ad9889fd8a4b5930e312a51f3bc996dc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-elasticsearch.asciidoc:132 +// inference/service-elasticsearch.asciidoc:140 [source, python] ---- @@ -12,7 +12,7 @@ resp = client.inference.put( "adaptive_allocations": { "enabled": True, "min_number_of_allocations": 1, - "max_number_of_allocations": 10 + "max_number_of_allocations": 4 }, "num_threads": 1, "model_id": ".elser_model_2" diff --git a/docs/examples/adc18ca0c344d81d68ec3b9422b54ff5.asciidoc b/docs/examples/adc18ca0c344d81d68ec3b9422b54ff5.asciidoc index c4abb1185..bdd6602d9 100644 --- a/docs/examples/adc18ca0c344d81d68ec3b9422b54ff5.asciidoc +++ b/docs/examples/adc18ca0c344d81d68ec3b9422b54ff5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/multi-search.asciidoc:312 +// search/multi-search.asciidoc:318 [source, python] ---- diff --git a/docs/examples/adf36e2d8fc05c3719c91912481c4e19.asciidoc b/docs/examples/adf36e2d8fc05c3719c91912481c4e19.asciidoc index e78abe0de..3a7314c80 100644 --- a/docs/examples/adf36e2d8fc05c3719c91912481c4e19.asciidoc +++ b/docs/examples/adf36e2d8fc05c3719c91912481c4e19.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/enable-users.asciidoc:44 +// rest-api/security/enable-users.asciidoc:50 [source, python] ---- diff --git a/docs/examples/ae4aa368617637a390074535df86e64b.asciidoc b/docs/examples/ae4aa368617637a390074535df86e64b.asciidoc index 86c8a2122..586e248d5 100644 --- a/docs/examples/ae4aa368617637a390074535df86e64b.asciidoc +++ b/docs/examples/ae4aa368617637a390074535df86e64b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/common/apis/set-upgrade-mode.asciidoc:74 +// ml/common/apis/set-upgrade-mode.asciidoc:80 [source, python] ---- diff --git a/docs/examples/ae82eb17c23cb8e5761cb6240a5ed0a6.asciidoc b/docs/examples/ae82eb17c23cb8e5761cb6240a5ed0a6.asciidoc index 92158eaad..99c88443b 100644 --- a/docs/examples/ae82eb17c23cb8e5761cb6240a5ed0a6.asciidoc +++ b/docs/examples/ae82eb17c23cb8e5761cb6240a5ed0a6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/df-analytics/apis/put-dfanalytics.asciidoc:787 +// ml/df-analytics/apis/put-dfanalytics.asciidoc:793 [source, python] ---- diff --git a/docs/examples/ae9ccfaa146731ab9176df90670db1c2.asciidoc b/docs/examples/ae9ccfaa146731ab9176df90670db1c2.asciidoc index 1651f5822..0e01584d8 100644 --- a/docs/examples/ae9ccfaa146731ab9176df90670db1c2.asciidoc +++ b/docs/examples/ae9ccfaa146731ab9176df90670db1c2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/bulk.asciidoc:499 +// docs/bulk.asciidoc:509 [source, python] ---- diff --git a/docs/examples/aeaa97939a05f5b2f3f2c43b771f35e3.asciidoc b/docs/examples/aeaa97939a05f5b2f3f2c43b771f35e3.asciidoc index bdc24bf70..4801a2961 100644 --- a/docs/examples/aeaa97939a05f5b2f3f2c43b771f35e3.asciidoc +++ b/docs/examples/aeaa97939a05f5b2f3f2c43b771f35e3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/termvectors.asciidoc:310 +// docs/termvectors.asciidoc:316 [source, python] ---- diff --git a/docs/examples/af18f5c5fb2364ae23c6a14431820aba.asciidoc b/docs/examples/af18f5c5fb2364ae23c6a14431820aba.asciidoc index f89b0e83c..6607a5085 100644 --- a/docs/examples/af18f5c5fb2364ae23c6a14431820aba.asciidoc +++ b/docs/examples/af18f5c5fb2364ae23c6a14431820aba.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/apis/enrich/get-enrich-policy.asciidoc:88 +// ingest/apis/enrich/get-enrich-policy.asciidoc:94 [source, python] ---- diff --git a/docs/examples/af517b6936fa41d124d68b107b2efdc3.asciidoc b/docs/examples/af517b6936fa41d124d68b107b2efdc3.asciidoc index 2835b2069..fff63c281 100644 --- a/docs/examples/af517b6936fa41d124d68b107b2efdc3.asciidoc +++ b/docs/examples/af517b6936fa41d124d68b107b2efdc3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ilm/apis/delete-lifecycle.asciidoc:76 +// ilm/apis/delete-lifecycle.asciidoc:82 [source, python] ---- diff --git a/docs/examples/af607715d0693587dd12962266359a96.asciidoc b/docs/examples/af607715d0693587dd12962266359a96.asciidoc index c05664775..afd7cc471 100644 --- a/docs/examples/af607715d0693587dd12962266359a96.asciidoc +++ b/docs/examples/af607715d0693587dd12962266359a96.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/repository-s3.asciidoc:225 +// snapshot-restore/repository-s3.asciidoc:232 [source, python] ---- diff --git a/docs/examples/af91019991bee136df5460e2fd4ac72a.asciidoc b/docs/examples/af91019991bee136df5460e2fd4ac72a.asciidoc index 30977ee75..207f8fade 100644 --- a/docs/examples/af91019991bee136df5460e2fd4ac72a.asciidoc +++ b/docs/examples/af91019991bee136df5460e2fd4ac72a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/rollover-index.asciidoc:237 +// indices/rollover-index.asciidoc:243 [source, python] ---- diff --git a/docs/examples/af970eb8b93cdea52209e1256eba9d8c.asciidoc b/docs/examples/af970eb8b93cdea52209e1256eba9d8c.asciidoc index 04a36f456..ff65558a3 100644 --- a/docs/examples/af970eb8b93cdea52209e1256eba9d8c.asciidoc +++ b/docs/examples/af970eb8b93cdea52209e1256eba9d8c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/shard-stores.asciidoc:124 +// indices/shard-stores.asciidoc:130 [source, python] ---- diff --git a/docs/examples/afbea723c4ba0d50c67d04ebb73a4101.asciidoc b/docs/examples/afbea723c4ba0d50c67d04ebb73a4101.asciidoc index 97e849ed6..3e12c53f2 100644 --- a/docs/examples/afbea723c4ba0d50c67d04ebb73a4101.asciidoc +++ b/docs/examples/afbea723c4ba0d50c67d04ebb73a4101.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search-application/apis/delete-search-application.asciidoc:70 +// search-application/apis/delete-search-application.asciidoc:75 [source, python] ---- diff --git a/docs/examples/afc0a9cffc0100797a3f093094394763.asciidoc b/docs/examples/afc0a9cffc0100797a3f093094394763.asciidoc index a7d9d4dd4..46d056150 100644 --- a/docs/examples/afc0a9cffc0100797a3f093094394763.asciidoc +++ b/docs/examples/afc0a9cffc0100797a3f093094394763.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/saml-invalidate-api.asciidoc:82 +// rest-api/security/saml-invalidate-api.asciidoc:88 [source, python] ---- diff --git a/docs/examples/afe30f159937b38d74c869570cfcd369.asciidoc b/docs/examples/afe30f159937b38d74c869570cfcd369.asciidoc index 844a2f7cd..72df106f6 100644 --- a/docs/examples/afe30f159937b38d74c869570cfcd369.asciidoc +++ b/docs/examples/afe30f159937b38d74c869570cfcd369.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/restore-snapshot-api.asciidoc:268 +// snapshot-restore/apis/restore-snapshot-api.asciidoc:274 [source, python] ---- diff --git a/docs/examples/afe87a2850326e0328fbebbefec2e839.asciidoc b/docs/examples/afe87a2850326e0328fbebbefec2e839.asciidoc index 8a84f7103..689fa2c78 100644 --- a/docs/examples/afe87a2850326e0328fbebbefec2e839.asciidoc +++ b/docs/examples/afe87a2850326e0328fbebbefec2e839.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-shards.asciidoc:171 +// search/search-shards.asciidoc:177 [source, python] ---- diff --git a/docs/examples/b02e4907c9936c1adc16ccce9d49900d.asciidoc b/docs/examples/b02e4907c9936c1adc16ccce9d49900d.asciidoc index bb7fc6ce1..f88cd7811 100644 --- a/docs/examples/b02e4907c9936c1adc16ccce9d49900d.asciidoc +++ b/docs/examples/b02e4907c9936c1adc16ccce9d49900d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/health.asciidoc:159 +// cluster/health.asciidoc:165 [source, python] ---- diff --git a/docs/examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc b/docs/examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc index 2ac514f11..671165a4a 100644 --- a/docs/examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc +++ b/docs/examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc @@ -3,9 +3,9 @@ [source, python] ---- -resp = client.search_application.render_query( - name="my_search_application", - body=None, +resp = client.perform_request( + "POST", + "/_application/search_application/my_search_application/_render_query", ) print(resp) ---- diff --git a/docs/examples/b0fa301cd3c6b9db128e34114f0c1e8f.asciidoc b/docs/examples/b0fa301cd3c6b9db128e34114f0c1e8f.asciidoc index 720cd29ba..23a519fbc 100644 --- a/docs/examples/b0fa301cd3c6b9db128e34114f0c1e8f.asciidoc +++ b/docs/examples/b0fa301cd3c6b9db128e34114f0c1e8f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update.asciidoc:105 +// docs/update.asciidoc:111 [source, python] ---- diff --git a/docs/examples/b11a0675e49df0709be693297ca73a2c.asciidoc b/docs/examples/b11a0675e49df0709be693297ca73a2c.asciidoc index 73aa660e5..d86bed6c9 100644 --- a/docs/examples/b11a0675e49df0709be693297ca73a2c.asciidoc +++ b/docs/examples/b11a0675e49df0709be693297ca73a2c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/info.asciidoc:197 +// rest-api/info.asciidoc:199 [source, python] ---- diff --git a/docs/examples/b16700002af3aa70639f3e88c733bf35.asciidoc b/docs/examples/b16700002af3aa70639f3e88c733bf35.asciidoc index 7667385c2..09c525941 100644 --- a/docs/examples/b16700002af3aa70639f3e88c733bf35.asciidoc +++ b/docs/examples/b16700002af3aa70639f3e88c733bf35.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/point-in-time-api.asciidoc:95 +// search/point-in-time-api.asciidoc:101 [source, python] ---- diff --git a/docs/examples/b176e0d428726705298184ef39ad5cb2.asciidoc b/docs/examples/b176e0d428726705298184ef39ad5cb2.asciidoc index a3853583b..0f2a5076f 100644 --- a/docs/examples/b176e0d428726705298184ef39ad5cb2.asciidoc +++ b/docs/examples/b176e0d428726705298184ef39ad5cb2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/create-role-mappings.asciidoc:147 +// rest-api/security/create-role-mappings.asciidoc:153 [source, python] ---- diff --git a/docs/examples/b1ee1b0b5f7af596e5f81743cfd3755f.asciidoc b/docs/examples/b1ee1b0b5f7af596e5f81743cfd3755f.asciidoc index e49919089..f98f4b160 100644 --- a/docs/examples/b1ee1b0b5f7af596e5f81743cfd3755f.asciidoc +++ b/docs/examples/b1ee1b0b5f7af596e5f81743cfd3755f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/rollover-index.asciidoc:369 +// indices/rollover-index.asciidoc:375 [source, python] ---- diff --git a/docs/examples/b1efa1c51a34dd5ab5511b71a399f5b1.asciidoc b/docs/examples/b1efa1c51a34dd5ab5511b71a399f5b1.asciidoc index 52dccbd9a..4b66f35f8 100644 --- a/docs/examples/b1efa1c51a34dd5ab5511b71a399f5b1.asciidoc +++ b/docs/examples/b1efa1c51a34dd5ab5511b71a399f5b1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/reindex.asciidoc:450 +// docs/reindex.asciidoc:456 [source, python] ---- diff --git a/docs/examples/b22559a7c319f90bc63a41cac1c39b4c.asciidoc b/docs/examples/b22559a7c319f90bc63a41cac1c39b4c.asciidoc index df4bced52..b84d482e7 100644 --- a/docs/examples/b22559a7c319f90bc63a41cac1c39b4c.asciidoc +++ b/docs/examples/b22559a7c319f90bc63a41cac1c39b4c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/invalidate-api-keys.asciidoc:156 +// rest-api/security/invalidate-api-keys.asciidoc:162 [source, python] ---- diff --git a/docs/examples/b23ed357dce8ec0014708b7b2850a8fb.asciidoc b/docs/examples/b23ed357dce8ec0014708b7b2850a8fb.asciidoc index 068770a3d..a30a7bf5e 100644 --- a/docs/examples/b23ed357dce8ec0014708b7b2850a8fb.asciidoc +++ b/docs/examples/b23ed357dce8ec0014708b7b2850a8fb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/tasks.asciidoc:78 +// cat/tasks.asciidoc:84 [source, python] ---- diff --git a/docs/examples/b2440b492149b705ef107137fdccb0c2.asciidoc b/docs/examples/b2440b492149b705ef107137fdccb0c2.asciidoc index de487a140..a53fd2170 100644 --- a/docs/examples/b2440b492149b705ef107137fdccb0c2.asciidoc +++ b/docs/examples/b2440b492149b705ef107137fdccb0c2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ccr/apis/follow/get-follow-info.asciidoc:28 +// ccr/apis/follow/get-follow-info.asciidoc:34 [source, python] ---- diff --git a/docs/examples/b25256ed615cd837461b0bfa590526b7.asciidoc b/docs/examples/b25256ed615cd837461b0bfa590526b7.asciidoc index 4c2b3480e..216d8f1da 100644 --- a/docs/examples/b25256ed615cd837461b0bfa590526b7.asciidoc +++ b/docs/examples/b25256ed615cd837461b0bfa590526b7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc:79 +// ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc:85 [source, python] ---- diff --git a/docs/examples/b3756e700d0f6c7e8919003bdf26bc8f.asciidoc b/docs/examples/b3756e700d0f6c7e8919003bdf26bc8f.asciidoc index 3f151a158..c146919a8 100644 --- a/docs/examples/b3756e700d0f6c7e8919003bdf26bc8f.asciidoc +++ b/docs/examples/b3756e700d0f6c7e8919003bdf26bc8f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/troubleshooting-unbalanced-cluster.asciidoc:72 +// troubleshooting/troubleshooting-unbalanced-cluster.asciidoc:76 [source, python] ---- diff --git a/docs/examples/b37919cc438b47477343833b4e522408.asciidoc b/docs/examples/b37919cc438b47477343833b4e522408.asciidoc index 527c4a589..234facd65 100644 --- a/docs/examples/b37919cc438b47477343833b4e522408.asciidoc +++ b/docs/examples/b37919cc438b47477343833b4e522408.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/termvectors.asciidoc:418 +// docs/termvectors.asciidoc:424 [source, python] ---- diff --git a/docs/examples/b3a711c3deddcdb8a3f6623184a8b794.asciidoc b/docs/examples/b3a711c3deddcdb8a3f6623184a8b794.asciidoc index 1bb60badb..8d664f1b6 100644 --- a/docs/examples/b3a711c3deddcdb8a3f6623184a8b794.asciidoc +++ b/docs/examples/b3a711c3deddcdb8a3f6623184a8b794.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update.asciidoc:118 +// docs/update.asciidoc:124 [source, python] ---- diff --git a/docs/examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc b/docs/examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc index 93191cd7e..fe563aefe 100644 --- a/docs/examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc +++ b/docs/examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/post-inference.asciidoc:101 +// inference/post-inference.asciidoc:107 [source, python] ---- diff --git a/docs/examples/b47945c7db8868dd36ba079b742f2a90.asciidoc b/docs/examples/b47945c7db8868dd36ba079b742f2a90.asciidoc index 063d7e389..63aa846a0 100644 --- a/docs/examples/b47945c7db8868dd36ba079b742f2a90.asciidoc +++ b/docs/examples/b47945c7db8868dd36ba079b742f2a90.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search-application/apis/put-search-application.asciidoc:197 +// search-application/apis/put-search-application.asciidoc:202 [source, python] ---- diff --git a/docs/examples/b4aec2a1d353852507c091bdb629b765.asciidoc b/docs/examples/b4aec2a1d353852507c091bdb629b765.asciidoc index 8e73e995a..c8e4248f3 100644 --- a/docs/examples/b4aec2a1d353852507c091bdb629b765.asciidoc +++ b/docs/examples/b4aec2a1d353852507c091bdb629b765.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/put-filter.asciidoc:51 +// ml/anomaly-detection/apis/put-filter.asciidoc:57 [source, python] ---- diff --git a/docs/examples/b515427f8685ca7d79176def672d19fa.asciidoc b/docs/examples/b515427f8685ca7d79176def672d19fa.asciidoc index 968ed6448..16018d578 100644 --- a/docs/examples/b515427f8685ca7d79176def672d19fa.asciidoc +++ b/docs/examples/b515427f8685ca7d79176def672d19fa.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update-by-query.asciidoc:612 +// docs/update-by-query.asciidoc:618 [source, python] ---- diff --git a/docs/examples/b583bf8d3a2f49d633aa2cfed5606418.asciidoc b/docs/examples/b583bf8d3a2f49d633aa2cfed5606418.asciidoc index b7d77e2f8..61fa4718a 100644 --- a/docs/examples/b583bf8d3a2f49d633aa2cfed5606418.asciidoc +++ b/docs/examples/b583bf8d3a2f49d633aa2cfed5606418.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-component-template.asciidoc:189 +// indices/put-component-template.asciidoc:195 [source, python] ---- diff --git a/docs/examples/b590241c4296299b836fbb5a95bdd2dc.asciidoc b/docs/examples/b590241c4296299b836fbb5a95bdd2dc.asciidoc new file mode 100644 index 000000000..9cec50aa5 --- /dev/null +++ b/docs/examples/b590241c4296299b836fbb5a95bdd2dc.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/aggs-tutorial.asciidoc:299 + +[source, python] +---- +resp = client.search( + index="kibana_sample_data_ecommerce", + size=0, + aggs={ + "avg_order_value": { + "avg": { + "field": "taxful_total_price" + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/b5bc1bb7278f2f95bc54790c78c928e0.asciidoc b/docs/examples/b5bc1bb7278f2f95bc54790c78c928e0.asciidoc index 5f3f94bdc..30ad58e2e 100644 --- a/docs/examples/b5bc1bb7278f2f95bc54790c78c928e0.asciidoc +++ b/docs/examples/b5bc1bb7278f2f95bc54790c78c928e0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/get-job.asciidoc:163 +// rollup/apis/get-job.asciidoc:169 [source, python] ---- diff --git a/docs/examples/b61afb7ca29a11243232ffcc8b5a43cf.asciidoc b/docs/examples/b61afb7ca29a11243232ffcc8b5a43cf.asciidoc index 115fa8975..1741306a9 100644 --- a/docs/examples/b61afb7ca29a11243232ffcc8b5a43cf.asciidoc +++ b/docs/examples/b61afb7ca29a11243232ffcc8b5a43cf.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-field-mapping.asciidoc:165 +// indices/get-field-mapping.asciidoc:171 [source, python] ---- diff --git a/docs/examples/b620ef4400d2f660fe2c67835938442c.asciidoc b/docs/examples/b620ef4400d2f660fe2c67835938442c.asciidoc index 1960d81c3..ac87bba6e 100644 --- a/docs/examples/b620ef4400d2f660fe2c67835938442c.asciidoc +++ b/docs/examples/b620ef4400d2f660fe2c67835938442c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// autoscaling/apis/delete-autoscaling-policy.asciidoc:62 +// autoscaling/apis/delete-autoscaling-policy.asciidoc:68 [source, python] ---- diff --git a/docs/examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc b/docs/examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc index a785417c2..756defefe 100644 --- a/docs/examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc +++ b/docs/examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// text-structure/apis/find-field-structure.asciidoc:89 +// text-structure/apis/find-field-structure.asciidoc:95 [source, python] ---- @@ -186,10 +186,13 @@ resp = client.bulk( ) print(resp) -resp1 = client.text_structure.find_field_structure( - index="test-logs", - field="message", - body=None, +resp1 = client.perform_request( + "GET", + "/_text_structure/find_field_structure", + params={ + "index": "test-logs", + "field": "message" + }, ) print(resp1) ---- diff --git a/docs/examples/b66be1daf6c220eb66d94e708b2fae39.asciidoc b/docs/examples/b66be1daf6c220eb66d94e708b2fae39.asciidoc index 8902c863a..aef59ca57 100644 --- a/docs/examples/b66be1daf6c220eb66d94e708b2fae39.asciidoc +++ b/docs/examples/b66be1daf6c220eb66d94e708b2fae39.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/state.asciidoc:144 +// cluster/state.asciidoc:150 [source, python] ---- diff --git a/docs/examples/b6d278737d27973e498ac61cda9e5126.asciidoc b/docs/examples/b6d278737d27973e498ac61cda9e5126.asciidoc new file mode 100644 index 000000000..cb466a7e1 --- /dev/null +++ b/docs/examples/b6d278737d27973e498ac61cda9e5126.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/aggs-tutorial.asciidoc:509 + +[source, python] +---- +resp = client.search( + index="kibana_sample_data_ecommerce", + size=0, + aggs={ + "daily_orders": { + "date_histogram": { + "field": "order_date", + "calendar_interval": "day", + "format": "yyyy-MM-dd", + "min_doc_count": 0 + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/b6e385760e036e36827f719b540d9c11.asciidoc b/docs/examples/b6e385760e036e36827f719b540d9c11.asciidoc index c8743faa1..57c5190f5 100644 --- a/docs/examples/b6e385760e036e36827f719b540d9c11.asciidoc +++ b/docs/examples/b6e385760e036e36827f719b540d9c11.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/profile.asciidoc:1181 +// search/profile.asciidoc:1186 [source, python] ---- diff --git a/docs/examples/b6f690896001f8f9ad5bf24e1304a552.asciidoc b/docs/examples/b6f690896001f8f9ad5bf24e1304a552.asciidoc index 5b63289c6..c228264e0 100644 --- a/docs/examples/b6f690896001f8f9ad5bf24e1304a552.asciidoc +++ b/docs/examples/b6f690896001f8f9ad5bf24e1304a552.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/dense-vector.asciidoc:163 +// mapping/types/dense-vector.asciidoc:162 [source, python] ---- diff --git a/docs/examples/b728d6ba226dba719aadcd8b8099cc74.asciidoc b/docs/examples/b728d6ba226dba719aadcd8b8099cc74.asciidoc index 73ab3b7f8..6f3abd146 100644 --- a/docs/examples/b728d6ba226dba719aadcd8b8099cc74.asciidoc +++ b/docs/examples/b728d6ba226dba719aadcd8b8099cc74.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:174 +// troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:177 [source, python] ---- diff --git a/docs/examples/b7df0848b2dc3093f931976db5b8cfff.asciidoc b/docs/examples/b7df0848b2dc3093f931976db5b8cfff.asciidoc index eb44d6b47..3e7d9c41b 100644 --- a/docs/examples/b7df0848b2dc3093f931976db5b8cfff.asciidoc +++ b/docs/examples/b7df0848b2dc3093f931976db5b8cfff.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:32 +// troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:38 [source, python] ---- diff --git a/docs/examples/b7f8bd33c22f3c93336ab57c2e091f73.asciidoc b/docs/examples/b7f8bd33c22f3c93336ab57c2e091f73.asciidoc index 45fac83a4..0e2e2908a 100644 --- a/docs/examples/b7f8bd33c22f3c93336ab57c2e091f73.asciidoc +++ b/docs/examples/b7f8bd33c22f3c93336ab57c2e091f73.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-rules/apis/delete-query-rule.asciidoc:72 +// query-rules/apis/delete-query-rule.asciidoc:78 [source, python] ---- diff --git a/docs/examples/b80e1f5b26bae4f3c2f8a604b7caaf17.asciidoc b/docs/examples/b80e1f5b26bae4f3c2f8a604b7caaf17.asciidoc index df7cab9e6..5eba3747c 100644 --- a/docs/examples/b80e1f5b26bae4f3c2f8a604b7caaf17.asciidoc +++ b/docs/examples/b80e1f5b26bae4f3c2f8a604b7caaf17.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/create-role-mappings.asciidoc:284 +// rest-api/security/create-role-mappings.asciidoc:290 [source, python] ---- diff --git a/docs/examples/b839f79a5d58506baed5714f1876ab55.asciidoc b/docs/examples/b839f79a5d58506baed5714f1876ab55.asciidoc index 59095bc1a..10e3d1228 100644 --- a/docs/examples/b839f79a5d58506baed5714f1876ab55.asciidoc +++ b/docs/examples/b839f79a5d58506baed5714f1876ab55.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// eql/eql-search-api.asciidoc:24 +// eql/eql-search-api.asciidoc:30 [source, python] ---- diff --git a/docs/examples/b85716ba42a57096452665c38995da7d.asciidoc b/docs/examples/b85716ba42a57096452665c38995da7d.asciidoc index d6e72e9ce..7110472c4 100644 --- a/docs/examples/b85716ba42a57096452665c38995da7d.asciidoc +++ b/docs/examples/b85716ba42a57096452665c38995da7d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/df-analytics/apis/preview-dfanalytics.asciidoc:69 +// ml/df-analytics/apis/preview-dfanalytics.asciidoc:75 [source, python] ---- diff --git a/docs/examples/b88a2d96da1401d548a4540cca223d27.asciidoc b/docs/examples/b88a2d96da1401d548a4540cca223d27.asciidoc index 87c9d8ad7..eb6b9ac44 100644 --- a/docs/examples/b88a2d96da1401d548a4540cca223d27.asciidoc +++ b/docs/examples/b88a2d96da1401d548a4540cca223d27.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-vector-tile-api.asciidoc:702 +// search/search-vector-tile-api.asciidoc:707 [source, python] ---- diff --git a/docs/examples/ba10b644a4e9a2e7d78744ca607355d0.asciidoc b/docs/examples/ba10b644a4e9a2e7d78744ca607355d0.asciidoc index 4458c3d08..6d90b2c6e 100644 --- a/docs/examples/ba10b644a4e9a2e7d78744ca607355d0.asciidoc +++ b/docs/examples/ba10b644a4e9a2e7d78744ca607355d0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ccr/apis/follow/put-follow.asciidoc:85 +// ccr/apis/follow/put-follow.asciidoc:91 [source, python] ---- diff --git a/docs/examples/ba21a7fbb74180ff138d97032f28ace7.asciidoc b/docs/examples/ba21a7fbb74180ff138d97032f28ace7.asciidoc index 7895c8e6c..a03ce6cb0 100644 --- a/docs/examples/ba21a7fbb74180ff138d97032f28ace7.asciidoc +++ b/docs/examples/ba21a7fbb74180ff138d97032f28ace7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/update-user-profile-data.asciidoc:100 +// rest-api/security/update-user-profile-data.asciidoc:106 [source, python] ---- diff --git a/docs/examples/ba6040de55afb2c8fb9e5b24bb038820.asciidoc b/docs/examples/ba6040de55afb2c8fb9e5b24bb038820.asciidoc index 1655222d3..400d32098 100644 --- a/docs/examples/ba6040de55afb2c8fb9e5b24bb038820.asciidoc +++ b/docs/examples/ba6040de55afb2c8fb9e5b24bb038820.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-index-template-v1.asciidoc:88 +// indices/get-index-template-v1.asciidoc:94 [source, python] ---- diff --git a/docs/examples/baadbfffcd0c16f51eb3537f516dc3ed.asciidoc b/docs/examples/baadbfffcd0c16f51eb3537f516dc3ed.asciidoc index 3cc6bcd3f..b85e9ca70 100644 --- a/docs/examples/baadbfffcd0c16f51eb3537f516dc3ed.asciidoc +++ b/docs/examples/baadbfffcd0c16f51eb3537f516dc3ed.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/disable-user-profile.asciidoc:59 +// rest-api/security/disable-user-profile.asciidoc:65 [source, python] ---- diff --git a/docs/examples/bb293e1bdf0c6f6d9069eeb7edc9d399.asciidoc b/docs/examples/bb293e1bdf0c6f6d9069eeb7edc9d399.asciidoc index 29b3df548..ec608f476 100644 --- a/docs/examples/bb293e1bdf0c6f6d9069eeb7edc9d399.asciidoc +++ b/docs/examples/bb293e1bdf0c6f6d9069eeb7edc9d399.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/disable-users.asciidoc:45 +// rest-api/security/disable-users.asciidoc:51 [source, python] ---- diff --git a/docs/examples/bb2ba5d1885f87506f90dbb002e518f4.asciidoc b/docs/examples/bb2ba5d1885f87506f90dbb002e518f4.asciidoc new file mode 100644 index 000000000..42efc866a --- /dev/null +++ b/docs/examples/bb2ba5d1885f87506f90dbb002e518f4.asciidoc @@ -0,0 +1,49 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/retrievers-examples.asciidoc:354 + +[source, python] +---- +resp = client.search( + index="retrievers_example", + retriever={ + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "query_string": { + "query": "(information retrieval) OR (artificial intelligence)", + "default_field": "text" + } + } + } + }, + { + "knn": { + "field": "vector", + "query_vector": [ + 0.23, + 0.67, + 0.89 + ], + "k": 3, + "num_candidates": 5 + } + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + }, + highlight={ + "fields": { + "text": { + "fragment_size": 150, + "number_of_fragments": 3 + } + } + }, + source=False, +) +print(resp) +---- diff --git a/docs/examples/bb9e268ec62d19ca2a6366cbb48fae68.asciidoc b/docs/examples/bb9e268ec62d19ca2a6366cbb48fae68.asciidoc index 3c0f50533..6545eae47 100644 --- a/docs/examples/bb9e268ec62d19ca2a6366cbb48fae68.asciidoc +++ b/docs/examples/bb9e268ec62d19ca2a6366cbb48fae68.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/count.asciidoc:89 +// cat/count.asciidoc:95 [source, python] ---- diff --git a/docs/examples/bcae0f00ae1e6f08fa395ca741fe84f9.asciidoc b/docs/examples/bcae0f00ae1e6f08fa395ca741fe84f9.asciidoc index a58da8614..90a19379d 100644 --- a/docs/examples/bcae0f00ae1e6f08fa395ca741fe84f9.asciidoc +++ b/docs/examples/bcae0f00ae1e6f08fa395ca741fe84f9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/rank-eval.asciidoc:397 +// search/rank-eval.asciidoc:403 [source, python] ---- diff --git a/docs/examples/bcb572658986d69ae17c28ddd7e4bfd8.asciidoc b/docs/examples/bcb572658986d69ae17c28ddd7e4bfd8.asciidoc index 6725aea3d..ba7480151 100644 --- a/docs/examples/bcb572658986d69ae17c28ddd7e4bfd8.asciidoc +++ b/docs/examples/bcb572658986d69ae17c28ddd7e4bfd8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/field-usage-stats.asciidoc:166 +// indices/field-usage-stats.asciidoc:172 [source, python] ---- diff --git a/docs/examples/bccd4eb26b1a325d103b12e198a13c08.asciidoc b/docs/examples/bccd4eb26b1a325d103b12e198a13c08.asciidoc new file mode 100644 index 000000000..76f96d817 --- /dev/null +++ b/docs/examples/bccd4eb26b1a325d103b12e198a13c08.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// index-modules/slowlog.asciidoc:102 + +[source, python] +---- +resp = client.indices.get_settings( + index="_all", + expand_wildcards="all", + filter_path="*.settings.index.*.slowlog", +) +print(resp) +---- diff --git a/docs/examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc b/docs/examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc index 4b72e7551..f3b87e892 100644 --- a/docs/examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc +++ b/docs/examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc @@ -1,9 +1,12 @@ // This file is autogenerated, DO NOT EDIT -// ingest/apis/simulate-ingest.asciidoc:346 +// ingest/apis/simulate-ingest.asciidoc:352 [source, python] ---- -resp = client.simulate.ingest( +resp = client.perform_request( + "POST", + "/_ingest/_simulate", + headers={"Content-Type": "application/json"}, body={ "docs": [ { diff --git a/docs/examples/bd23c3a03907b1238dcb07ab9eecae7b.asciidoc b/docs/examples/bd23c3a03907b1238dcb07ab9eecae7b.asciidoc index c26a90e71..6e789e961 100644 --- a/docs/examples/bd23c3a03907b1238dcb07ab9eecae7b.asciidoc +++ b/docs/examples/bd23c3a03907b1238dcb07ab9eecae7b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update-by-query.asciidoc:361 +// docs/update-by-query.asciidoc:367 [source, python] ---- diff --git a/docs/examples/bd3d710ec50a151453e141691163af72.asciidoc b/docs/examples/bd3d710ec50a151453e141691163af72.asciidoc index 2c7c56029..d0315498f 100644 --- a/docs/examples/bd3d710ec50a151453e141691163af72.asciidoc +++ b/docs/examples/bd3d710ec50a151453e141691163af72.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/tasks.asciidoc:270 +// cluster/tasks.asciidoc:276 [source, python] ---- diff --git a/docs/examples/bd458073196a19ecdeb24a8016488c20.asciidoc b/docs/examples/bd458073196a19ecdeb24a8016488c20.asciidoc index de4ff9093..5e1f43e88 100644 --- a/docs/examples/bd458073196a19ecdeb24a8016488c20.asciidoc +++ b/docs/examples/bd458073196a19ecdeb24a8016488c20.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/delete-index-template.asciidoc:26 +// indices/delete-index-template.asciidoc:32 [source, python] ---- diff --git a/docs/examples/bd57976bc93ca64b2d3e001df9f06c82.asciidoc b/docs/examples/bd57976bc93ca64b2d3e001df9f06c82.asciidoc index 15a587aaa..29151f6fe 100644 --- a/docs/examples/bd57976bc93ca64b2d3e001df9f06c82.asciidoc +++ b/docs/examples/bd57976bc93ca64b2d3e001df9f06c82.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/resolve.asciidoc:101 +// indices/resolve.asciidoc:107 [source, python] ---- diff --git a/docs/examples/bd5bd5d8b3d81241335fe1e5747080ac.asciidoc b/docs/examples/bd5bd5d8b3d81241335fe1e5747080ac.asciidoc index 95a630fbe..24c08784c 100644 --- a/docs/examples/bd5bd5d8b3d81241335fe1e5747080ac.asciidoc +++ b/docs/examples/bd5bd5d8b3d81241335fe1e5747080ac.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ilm/error-handling.asciidoc:121 +// ilm/error-handling.asciidoc:122 [source, python] ---- diff --git a/docs/examples/bd68666ca2e0be12f7624016317a62bc.asciidoc b/docs/examples/bd68666ca2e0be12f7624016317a62bc.asciidoc index b9451d30c..c11eee665 100644 --- a/docs/examples/bd68666ca2e0be12f7624016317a62bc.asciidoc +++ b/docs/examples/bd68666ca2e0be12f7624016317a62bc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/nodes-stats.asciidoc:2567 +// cluster/nodes-stats.asciidoc:2573 [source, python] ---- diff --git a/docs/examples/bd6f30e3caa3632260da42d9ff82c98c.asciidoc b/docs/examples/bd6f30e3caa3632260da42d9ff82c98c.asciidoc index b9da78352..af92a432c 100644 --- a/docs/examples/bd6f30e3caa3632260da42d9ff82c98c.asciidoc +++ b/docs/examples/bd6f30e3caa3632260da42d9ff82c98c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/clear-api-key-cache.asciidoc:57 +// rest-api/security/clear-api-key-cache.asciidoc:63 [source, python] ---- diff --git a/docs/examples/bd7a1417fc27b5a801334ec44462b376.asciidoc b/docs/examples/bd7a1417fc27b5a801334ec44462b376.asciidoc index 6ba29e06c..09be0234b 100644 --- a/docs/examples/bd7a1417fc27b5a801334ec44462b376.asciidoc +++ b/docs/examples/bd7a1417fc27b5a801334ec44462b376.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/datafeeds.asciidoc:124 +// cat/datafeeds.asciidoc:130 [source, python] ---- diff --git a/docs/examples/bdb30dd52d32f50994008f4f9c0da5f0.asciidoc b/docs/examples/bdb30dd52d32f50994008f4f9c0da5f0.asciidoc index d7b3c2505..c0d5fa7e2 100644 --- a/docs/examples/bdb30dd52d32f50994008f4f9c0da5f0.asciidoc +++ b/docs/examples/bdb30dd52d32f50994008f4f9c0da5f0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update-by-query.asciidoc:565 +// docs/update-by-query.asciidoc:571 [source, python] ---- diff --git a/docs/examples/bdc1afd2181154bb78797360f9dbb1a0.asciidoc b/docs/examples/bdc1afd2181154bb78797360f9dbb1a0.asciidoc index 3754d13f6..5728f3c24 100644 --- a/docs/examples/bdc1afd2181154bb78797360f9dbb1a0.asciidoc +++ b/docs/examples/bdc1afd2181154bb78797360f9dbb1a0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/ack-watch.asciidoc:134 +// rest-api/watcher/ack-watch.asciidoc:140 [source, python] ---- diff --git a/docs/examples/bdc55256fa5f701680631a149dbb75a9.asciidoc b/docs/examples/bdc55256fa5f701680631a149dbb75a9.asciidoc new file mode 100644 index 000000000..7699e4d0e --- /dev/null +++ b/docs/examples/bdc55256fa5f701680631a149dbb75a9.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/aggs-tutorial.asciidoc:420 + +[source, python] +---- +resp = client.search( + index="kibana_sample_data_ecommerce", + size=0, + aggs={ + "sales_by_category": { + "terms": { + "field": "category.keyword", + "size": 5, + "order": { + "_count": "desc" + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/bdd28276618235487ac96bd6679bc206.asciidoc b/docs/examples/bdd28276618235487ac96bd6679bc206.asciidoc new file mode 100644 index 000000000..a5bf8f7df --- /dev/null +++ b/docs/examples/bdd28276618235487ac96bd6679bc206.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/aggs-tutorial.asciidoc:1770 + +[source, python] +---- +resp = client.search( + index="kibana_sample_data_ecommerce", + size=0, + aggs={ + "daily_sales": { + "date_histogram": { + "field": "order_date", + "calendar_interval": "day" + }, + "aggs": { + "revenue": { + "sum": { + "field": "taxful_total_price" + } + }, + "cumulative_revenue": { + "cumulative_sum": { + "buckets_path": "revenue" + } + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/bde74dbbcef8ebf8541cae2c1711255f.asciidoc b/docs/examples/bde74dbbcef8ebf8541cae2c1711255f.asciidoc index 1e4a1811f..df2a86237 100644 --- a/docs/examples/bde74dbbcef8ebf8541cae2c1711255f.asciidoc +++ b/docs/examples/bde74dbbcef8ebf8541cae2c1711255f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search-application/apis/get-search-application.asciidoc:88 +// search-application/apis/get-search-application.asciidoc:93 [source, python] ---- diff --git a/docs/examples/be3a6431d01846950dc1a39a7a6a1faa.asciidoc b/docs/examples/be3a6431d01846950dc1a39a7a6a1faa.asciidoc index aede29898..ab9ea6048 100644 --- a/docs/examples/be3a6431d01846950dc1a39a7a6a1faa.asciidoc +++ b/docs/examples/be3a6431d01846950dc1a39a7a6a1faa.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update-by-query.asciidoc:526 +// docs/update-by-query.asciidoc:532 [source, python] ---- diff --git a/docs/examples/be5b415d7f33d6f0397ac2f8b5c10521.asciidoc b/docs/examples/be5b415d7f33d6f0397ac2f8b5c10521.asciidoc index efc1ccbce..fce193164 100644 --- a/docs/examples/be5b415d7f33d6f0397ac2f8b5c10521.asciidoc +++ b/docs/examples/be5b415d7f33d6f0397ac2f8b5c10521.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update-by-query.asciidoc:641 +// docs/update-by-query.asciidoc:647 [source, python] ---- diff --git a/docs/examples/be5c5a9c25901737585e4fff9195da3c.asciidoc b/docs/examples/be5c5a9c25901737585e4fff9195da3c.asciidoc index ab6ef34d1..1590cce0b 100644 --- a/docs/examples/be5c5a9c25901737585e4fff9195da3c.asciidoc +++ b/docs/examples/be5c5a9c25901737585e4fff9195da3c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/dense-vector.asciidoc:437 +// mapping/types/dense-vector.asciidoc:435 [source, python] ---- diff --git a/docs/examples/be5fef0640c3a650ee96f84e3376a1be.asciidoc b/docs/examples/be5fef0640c3a650ee96f84e3376a1be.asciidoc index d57c9b600..416fc9bf1 100644 --- a/docs/examples/be5fef0640c3a650ee96f84e3376a1be.asciidoc +++ b/docs/examples/be5fef0640c3a650ee96f84e3376a1be.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update.asciidoc:333 +// docs/update.asciidoc:339 [source, python] ---- diff --git a/docs/examples/be6b0bfcdce1ef100af89f74da5d4748.asciidoc b/docs/examples/be6b0bfcdce1ef100af89f74da5d4748.asciidoc index e049ea57e..b0059ab57 100644 --- a/docs/examples/be6b0bfcdce1ef100af89f74da5d4748.asciidoc +++ b/docs/examples/be6b0bfcdce1ef100af89f74da5d4748.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/put-trained-model-definition-part.asciidoc:64 +// ml/trained-models/apis/put-trained-model-definition-part.asciidoc:70 [source, python] ---- diff --git a/docs/examples/beaf43b274b0f32cf3cf48f59e5cb1f2.asciidoc b/docs/examples/beaf43b274b0f32cf3cf48f59e5cb1f2.asciidoc index 3248655ee..81658ef75 100644 --- a/docs/examples/beaf43b274b0f32cf3cf48f59e5cb1f2.asciidoc +++ b/docs/examples/beaf43b274b0f32cf3cf48f59e5cb1f2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/get-snapshot-api.asciidoc:745 +// snapshot-restore/apis/get-snapshot-api.asciidoc:751 [source, python] ---- diff --git a/docs/examples/beb0b9ff4f68672273fcff1b7bae706b.asciidoc b/docs/examples/beb0b9ff4f68672273fcff1b7bae706b.asciidoc index 916bf24f0..4f6931ca5 100644 --- a/docs/examples/beb0b9ff4f68672273fcff1b7bae706b.asciidoc +++ b/docs/examples/beb0b9ff4f68672273fcff1b7bae706b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:405 +// indices/put-mapping.asciidoc:411 [source, python] ---- diff --git a/docs/examples/befa73a8a419fcf3b7798548b54a20bf.asciidoc b/docs/examples/befa73a8a419fcf3b7798548b54a20bf.asciidoc index 4f5c79684..7bd330d62 100644 --- a/docs/examples/befa73a8a419fcf3b7798548b54a20bf.asciidoc +++ b/docs/examples/befa73a8a419fcf3b7798548b54a20bf.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/knn-search.asciidoc:1167 +// search/search-your-data/knn-search.asciidoc:1146 [source, python] ---- diff --git a/docs/examples/bf1de9fa1b825fa875d27fa08821a6d1.asciidoc b/docs/examples/bf1de9fa1b825fa875d27fa08821a6d1.asciidoc index 11de217e5..23c293ac8 100644 --- a/docs/examples/bf1de9fa1b825fa875d27fa08821a6d1.asciidoc +++ b/docs/examples/bf1de9fa1b825fa875d27fa08821a6d1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-across-clusters.asciidoc:122 +// esql/esql-across-clusters.asciidoc:117 [source, python] ---- diff --git a/docs/examples/bf3c3bc41c593a80faebef1df353e483.asciidoc b/docs/examples/bf3c3bc41c593a80faebef1df353e483.asciidoc new file mode 100644 index 000000000..7ea310dd1 --- /dev/null +++ b/docs/examples/bf3c3bc41c593a80faebef1df353e483.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// inference/service-jinaai.asciidoc:169 + +[source, python] +---- +resp = client.inference.put( + task_type="rerank", + inference_id="jinaai-rerank", + inference_config={ + "service": "jinaai", + "service_settings": { + "api_key": "", + "model_id": "jina-reranker-v2-base-multilingual" + }, + "task_settings": { + "top_n": 10, + "return_documents": True + } + }, +) +print(resp) +---- diff --git a/docs/examples/bfb8a15cd05b43094ffbce8078bad3e1.asciidoc b/docs/examples/bfb8a15cd05b43094ffbce8078bad3e1.asciidoc index 2b3be1a00..23d0c7749 100644 --- a/docs/examples/bfb8a15cd05b43094ffbce8078bad3e1.asciidoc +++ b/docs/examples/bfb8a15cd05b43094ffbce8078bad3e1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/get-snapshot-api.asciidoc:351 +// snapshot-restore/apis/get-snapshot-api.asciidoc:357 [source, python] ---- diff --git a/docs/examples/bfdad8a928ea30d7cf60d0a0a6bc6e2e.asciidoc b/docs/examples/bfdad8a928ea30d7cf60d0a0a6bc6e2e.asciidoc index 1cca191c4..f252a8b5a 100644 --- a/docs/examples/bfdad8a928ea30d7cf60d0a0a6bc6e2e.asciidoc +++ b/docs/examples/bfdad8a928ea30d7cf60d0a0a6bc6e2e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/bulk.asciidoc:711 +// docs/bulk.asciidoc:721 [source, python] ---- diff --git a/docs/examples/c00c9412609832ebceb9e786dd9542df.asciidoc b/docs/examples/c00c9412609832ebceb9e786dd9542df.asciidoc index b6b337eac..e98a75057 100644 --- a/docs/examples/c00c9412609832ebceb9e786dd9542df.asciidoc +++ b/docs/examples/c00c9412609832ebceb9e786dd9542df.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-name-description-api.asciidoc:80 +// connector/apis/update-connector-name-description-api.asciidoc:85 [source, python] ---- diff --git a/docs/examples/c067182d385f59ce5952fb9a716fbf05.asciidoc b/docs/examples/c067182d385f59ce5952fb9a716fbf05.asciidoc index 317a4d06d..44ec44783 100644 --- a/docs/examples/c067182d385f59ce5952fb9a716fbf05.asciidoc +++ b/docs/examples/c067182d385f59ce5952fb9a716fbf05.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/post-calendar-event.asciidoc:79 +// ml/anomaly-detection/apis/post-calendar-event.asciidoc:85 [source, python] ---- diff --git a/docs/examples/c088ce5291ae28650b6091cdec489398.asciidoc b/docs/examples/c088ce5291ae28650b6091cdec489398.asciidoc index 283484b2e..275200553 100644 --- a/docs/examples/c088ce5291ae28650b6091cdec489398.asciidoc +++ b/docs/examples/c088ce5291ae28650b6091cdec489398.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/point-in-time-api.asciidoc:49 +// search/point-in-time-api.asciidoc:55 [source, python] ---- diff --git a/docs/examples/c0ddfb2e6315f5bcf0d3ef414b5bbed3.asciidoc b/docs/examples/c0ddfb2e6315f5bcf0d3ef414b5bbed3.asciidoc index e41d19d84..2c089393c 100644 --- a/docs/examples/c0ddfb2e6315f5bcf0d3ef414b5bbed3.asciidoc +++ b/docs/examples/c0ddfb2e6315f5bcf0d3ef414b5bbed3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-configuration-api.asciidoc:336 +// connector/apis/update-connector-configuration-api.asciidoc:342 [source, python] ---- diff --git a/docs/examples/c0ebaa33e750b87555dc352073f692e8.asciidoc b/docs/examples/c0ebaa33e750b87555dc352073f692e8.asciidoc index 8d65ebc46..a0352b7c5 100644 --- a/docs/examples/c0ebaa33e750b87555dc352073f692e8.asciidoc +++ b/docs/examples/c0ebaa33e750b87555dc352073f692e8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/update-settings.asciidoc:181 +// indices/update-settings.asciidoc:187 [source, python] ---- diff --git a/docs/examples/c0ff8b3db994c4736f7579dde18097d2.asciidoc b/docs/examples/c0ff8b3db994c4736f7579dde18097d2.asciidoc index aaccc1288..79ce5358c 100644 --- a/docs/examples/c0ff8b3db994c4736f7579dde18097d2.asciidoc +++ b/docs/examples/c0ff8b3db994c4736f7579dde18097d2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/get.asciidoc:297 +// docs/get.asciidoc:303 [source, python] ---- diff --git a/docs/examples/c12d6e962f083c728f9397932f05202e.asciidoc b/docs/examples/c12d6e962f083c728f9397932f05202e.asciidoc index 89fab2035..a8aaad14e 100644 --- a/docs/examples/c12d6e962f083c728f9397932f05202e.asciidoc +++ b/docs/examples/c12d6e962f083c728f9397932f05202e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/list-connector-sync-jobs-api.asciidoc:72 +// connector/apis/list-connector-sync-jobs-api.asciidoc:78 [source, python] ---- diff --git a/docs/examples/c1409f591a01589638d9b00436ce42c0.asciidoc b/docs/examples/c1409f591a01589638d9b00436ce42c0.asciidoc index 34d8ac063..9ee6afc5d 100644 --- a/docs/examples/c1409f591a01589638d9b00436ce42c0.asciidoc +++ b/docs/examples/c1409f591a01589638d9b00436ce42c0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/clear-cache.asciidoc:61 +// rest-api/security/clear-cache.asciidoc:67 [source, python] ---- diff --git a/docs/examples/c18100d62ed31bc9e05f62900156e6a8.asciidoc b/docs/examples/c18100d62ed31bc9e05f62900156e6a8.asciidoc index 0011dff0f..0df79b777 100644 --- a/docs/examples/c18100d62ed31bc9e05f62900156e6a8.asciidoc +++ b/docs/examples/c18100d62ed31bc9e05f62900156e6a8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/list-connectors-api.asciidoc:93 +// connector/apis/list-connectors-api.asciidoc:102 [source, python] ---- diff --git a/docs/examples/c187b52646cedeebe0716327add65642.asciidoc b/docs/examples/c187b52646cedeebe0716327add65642.asciidoc index 876d69159..cd37f933e 100644 --- a/docs/examples/c187b52646cedeebe0716327add65642.asciidoc +++ b/docs/examples/c187b52646cedeebe0716327add65642.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// sql/apis/get-async-sql-search-api.asciidoc:12 +// sql/apis/get-async-sql-search-api.asciidoc:18 [source, python] ---- diff --git a/docs/examples/c1ac9e53b04f7acee4b4933969d6b574.asciidoc b/docs/examples/c1ac9e53b04f7acee4b4933969d6b574.asciidoc index 2f923d81d..36082bf5e 100644 --- a/docs/examples/c1ac9e53b04f7acee4b4933969d6b574.asciidoc +++ b/docs/examples/c1ac9e53b04f7acee4b4933969d6b574.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// transform/apis/preview-transform.asciidoc:290 +// transform/apis/preview-transform.asciidoc:296 [source, python] ---- diff --git a/docs/examples/c1ad9ff64728a5bfeeb485e60ec694a1.asciidoc b/docs/examples/c1ad9ff64728a5bfeeb485e60ec694a1.asciidoc index 14f229c73..0217abf41 100644 --- a/docs/examples/c1ad9ff64728a5bfeeb485e60ec694a1.asciidoc +++ b/docs/examples/c1ad9ff64728a5bfeeb485e60ec694a1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/rank-eval.asciidoc:453 +// search/rank-eval.asciidoc:459 [source, python] ---- diff --git a/docs/examples/c208a06212dc0cf6ac413d4f2c154296.asciidoc b/docs/examples/c208a06212dc0cf6ac413d4f2c154296.asciidoc index 6c5826271..527a3cbe2 100644 --- a/docs/examples/c208a06212dc0cf6ac413d4f2c154296.asciidoc +++ b/docs/examples/c208a06212dc0cf6ac413d4f2c154296.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/flush.asciidoc:131 +// indices/flush.asciidoc:137 [source, python] ---- diff --git a/docs/examples/c21aaedb5752a83489476fa3b5e2e9ff.asciidoc b/docs/examples/c21aaedb5752a83489476fa3b5e2e9ff.asciidoc index a30469252..fc46fa4b8 100644 --- a/docs/examples/c21aaedb5752a83489476fa3b5e2e9ff.asciidoc +++ b/docs/examples/c21aaedb5752a83489476fa3b5e2e9ff.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-rules/apis/put-query-rule.asciidoc:114 +// query-rules/apis/put-query-rule.asciidoc:120 [source, python] ---- diff --git a/docs/examples/c21eb4bc30087188241cbba6b6b89999.asciidoc b/docs/examples/c21eb4bc30087188241cbba6b6b89999.asciidoc index 8f36dbfdd..56ad7bb67 100644 --- a/docs/examples/c21eb4bc30087188241cbba6b6b89999.asciidoc +++ b/docs/examples/c21eb4bc30087188241cbba6b6b89999.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-service-type-api.asciidoc:78 +// connector/apis/update-connector-service-type-api.asciidoc:84 [source, python] ---- diff --git a/docs/examples/c23e32775340d7bc6f46820313014d8a.asciidoc b/docs/examples/c23e32775340d7bc6f46820313014d8a.asciidoc index b7197b25f..55a754aa6 100644 --- a/docs/examples/c23e32775340d7bc6f46820313014d8a.asciidoc +++ b/docs/examples/c23e32775340d7bc6f46820313014d8a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// scripting/using.asciidoc:520 +// scripting/using.asciidoc:525 [source, python] ---- diff --git a/docs/examples/c27b7d9836aa4ea756f59e9c42911721.asciidoc b/docs/examples/c27b7d9836aa4ea756f59e9c42911721.asciidoc index 090491b43..baaf5ca47 100644 --- a/docs/examples/c27b7d9836aa4ea756f59e9c42911721.asciidoc +++ b/docs/examples/c27b7d9836aa4ea756f59e9c42911721.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/scroll-api.asciidoc:29 +// search/scroll-api.asciidoc:35 [source, python] ---- diff --git a/docs/examples/c38c882c642dd412e8fa4c3eed49d12f.asciidoc b/docs/examples/c38c882c642dd412e8fa4c3eed49d12f.asciidoc index c08062c1d..951ff74b1 100644 --- a/docs/examples/c38c882c642dd412e8fa4c3eed49d12f.asciidoc +++ b/docs/examples/c38c882c642dd412e8fa4c3eed49d12f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/search-as-you-type.asciidoc:147 +// mapping/types/search-as-you-type.asciidoc:162 [source, python] ---- diff --git a/docs/examples/c47f030216a3c89f92f31787fc4d5df5.asciidoc b/docs/examples/c47f030216a3c89f92f31787fc4d5df5.asciidoc index 9aa8729a4..5a205275c 100644 --- a/docs/examples/c47f030216a3c89f92f31787fc4d5df5.asciidoc +++ b/docs/examples/c47f030216a3c89f92f31787fc4d5df5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/plugins.asciidoc:51 +// cat/plugins.asciidoc:56 [source, python] ---- diff --git a/docs/examples/c4a1d03dcfb82913d0724a42b0a89f20.asciidoc b/docs/examples/c4a1d03dcfb82913d0724a42b0a89f20.asciidoc index 671dab80f..982758f0e 100644 --- a/docs/examples/c4a1d03dcfb82913d0724a42b0a89f20.asciidoc +++ b/docs/examples/c4a1d03dcfb82913d0724a42b0a89f20.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/clearcache.asciidoc:152 +// indices/clearcache.asciidoc:158 [source, python] ---- diff --git a/docs/examples/c4b727723b57052b6504bb74fe09abc6.asciidoc b/docs/examples/c4b727723b57052b6504bb74fe09abc6.asciidoc index 6f9e84063..0a0dda82a 100644 --- a/docs/examples/c4b727723b57052b6504bb74fe09abc6.asciidoc +++ b/docs/examples/c4b727723b57052b6504bb74fe09abc6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-index-template.asciidoc:12 +// indices/put-index-template.asciidoc:18 [source, python] ---- diff --git a/docs/examples/c526fca1609b4c3c1d12dfd218d69a50.asciidoc b/docs/examples/c526fca1609b4c3c1d12dfd218d69a50.asciidoc index 1134d03e1..8ff744913 100644 --- a/docs/examples/c526fca1609b4c3c1d12dfd218d69a50.asciidoc +++ b/docs/examples/c526fca1609b4c3c1d12dfd218d69a50.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:377 +// indices/put-mapping.asciidoc:383 [source, python] ---- diff --git a/docs/examples/c54597143ac86540726f6422fd98b22e.asciidoc b/docs/examples/c54597143ac86540726f6422fd98b22e.asciidoc index 62e88c770..88ab8810e 100644 --- a/docs/examples/c54597143ac86540726f6422fd98b22e.asciidoc +++ b/docs/examples/c54597143ac86540726f6422fd98b22e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/update-settings.asciidoc:50 +// rest-api/security/update-settings.asciidoc:56 [source, python] ---- diff --git a/docs/examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc b/docs/examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc index 3d3044f4a..20dca25b2 100644 --- a/docs/examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc +++ b/docs/examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc @@ -1,9 +1,12 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/bulk-update-api-keys.asciidoc:236 +// rest-api/security/bulk-update-api-keys.asciidoc:242 [source, python] ---- -resp = client.security.bulk_update_api_keys( +resp = client.perform_request( + "POST", + "/_security/api_key/_bulk_update", + headers={"Content-Type": "application/json"}, body={ "ids": [ "VuaCfGcBCdbkQm-e5aOx", diff --git a/docs/examples/c5ba7c4badb5ef5ca32740106e4aa6b6.asciidoc b/docs/examples/c5ba7c4badb5ef5ca32740106e4aa6b6.asciidoc index fa3ef8edf..641c97480 100644 --- a/docs/examples/c5ba7c4badb5ef5ca32740106e4aa6b6.asciidoc +++ b/docs/examples/c5ba7c4badb5ef5ca32740106e4aa6b6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/termvectors.asciidoc:36 +// docs/termvectors.asciidoc:42 [source, python] ---- diff --git a/docs/examples/c6339d09f85000a6432304b0ec63b8f6.asciidoc b/docs/examples/c6339d09f85000a6432304b0ec63b8f6.asciidoc index a25ac7386..448adcb42 100644 --- a/docs/examples/c6339d09f85000a6432304b0ec63b8f6.asciidoc +++ b/docs/examples/c6339d09f85000a6432304b0ec63b8f6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-component-template.asciidoc:229 +// indices/put-component-template.asciidoc:235 [source, python] ---- diff --git a/docs/examples/c64b61bedb21b9def8fce5092e677af9.asciidoc b/docs/examples/c64b61bedb21b9def8fce5092e677af9.asciidoc index d7ae6d555..645a460ed 100644 --- a/docs/examples/c64b61bedb21b9def8fce5092e677af9.asciidoc +++ b/docs/examples/c64b61bedb21b9def8fce5092e677af9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/suggesters.asciidoc:46 +// search/suggesters.asciidoc:52 [source, python] ---- diff --git a/docs/examples/c65b00a285f510dcd2865aa3539b4e03.asciidoc b/docs/examples/c65b00a285f510dcd2865aa3539b4e03.asciidoc index 23e572ea8..acb1e92e7 100644 --- a/docs/examples/c65b00a285f510dcd2865aa3539b4e03.asciidoc +++ b/docs/examples/c65b00a285f510dcd2865aa3539b4e03.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// transform/apis/get-transform.asciidoc:100 +// transform/apis/get-transform.asciidoc:106 [source, python] ---- diff --git a/docs/examples/c67b0f00c2e690303c0e5af2f51e0fea.asciidoc b/docs/examples/c67b0f00c2e690303c0e5af2f51e0fea.asciidoc index 15e29b183..19869bb83 100644 --- a/docs/examples/c67b0f00c2e690303c0e5af2f51e0fea.asciidoc +++ b/docs/examples/c67b0f00c2e690303c0e5af2f51e0fea.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/suggesters.asciidoc:7 +// search/suggesters.asciidoc:13 [source, python] ---- diff --git a/docs/examples/c6b5c695a9b757b5e7325345b206bde5.asciidoc b/docs/examples/c6b5c695a9b757b5e7325345b206bde5.asciidoc index 6d84b16b7..cf1c2d5e0 100644 --- a/docs/examples/c6b5c695a9b757b5e7325345b206bde5.asciidoc +++ b/docs/examples/c6b5c695a9b757b5e7325345b206bde5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/apis/delete-pipeline.asciidoc:82 +// ingest/apis/delete-pipeline.asciidoc:88 [source, python] ---- diff --git a/docs/examples/c6bdd5c7de79d6d9ac8e33a397b511e8.asciidoc b/docs/examples/c6bdd5c7de79d6d9ac8e33a397b511e8.asciidoc index 95bf11903..0fe500a97 100644 --- a/docs/examples/c6bdd5c7de79d6d9ac8e33a397b511e8.asciidoc +++ b/docs/examples/c6bdd5c7de79d6d9ac8e33a397b511e8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:321 +// indices/put-mapping.asciidoc:327 [source, python] ---- diff --git a/docs/examples/c765ce78f3605c0e70d213f22aac8a53.asciidoc b/docs/examples/c765ce78f3605c0e70d213f22aac8a53.asciidoc index 8164e0fb0..6385d38d3 100644 --- a/docs/examples/c765ce78f3605c0e70d213f22aac8a53.asciidoc +++ b/docs/examples/c765ce78f3605c0e70d213f22aac8a53.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// autoscaling/apis/put-autoscaling-policy.asciidoc:67 +// autoscaling/apis/put-autoscaling-policy.asciidoc:73 [source, python] ---- diff --git a/docs/examples/c87038b96ab06d9a741a130f94de4f02.asciidoc b/docs/examples/c87038b96ab06d9a741a130f94de4f02.asciidoc index aae4b0770..ea01b3863 100644 --- a/docs/examples/c87038b96ab06d9a741a130f94de4f02.asciidoc +++ b/docs/examples/c87038b96ab06d9a741a130f94de4f02.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/delete.asciidoc:138 +// docs/delete.asciidoc:144 [source, python] ---- diff --git a/docs/examples/c873f9cd093e26515148f052e28c7805.asciidoc b/docs/examples/c873f9cd093e26515148f052e28c7805.asciidoc index 3c8efb1f1..c1b267238 100644 --- a/docs/examples/c873f9cd093e26515148f052e28c7805.asciidoc +++ b/docs/examples/c873f9cd093e26515148f052e28c7805.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/get-snapshot.asciidoc:242 +// ml/anomaly-detection/apis/get-snapshot.asciidoc:248 [source, python] ---- diff --git a/docs/examples/c8e2109b19d50467ab83a40006462e9f.asciidoc b/docs/examples/c8e2109b19d50467ab83a40006462e9f.asciidoc index 3a0678550..1d983ce69 100644 --- a/docs/examples/c8e2109b19d50467ab83a40006462e9f.asciidoc +++ b/docs/examples/c8e2109b19d50467ab83a40006462e9f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/apis/enrich/execute-enrich-policy.asciidoc:39 +// ingest/apis/enrich/execute-enrich-policy.asciidoc:45 [source, python] ---- diff --git a/docs/examples/c956bf1f0829a5f0357c0494ed8b6ca3.asciidoc b/docs/examples/c956bf1f0829a5f0357c0494ed8b6ca3.asciidoc index 24b3619a6..76e31cb91 100644 --- a/docs/examples/c956bf1f0829a5f0357c0494ed8b6ca3.asciidoc +++ b/docs/examples/c956bf1f0829a5f0357c0494ed8b6ca3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-template-api.asciidoc:37 +// search/search-template-api.asciidoc:43 [source, python] ---- diff --git a/docs/examples/c97fd95ebdcf56cc973582e37f732ed2.asciidoc b/docs/examples/c97fd95ebdcf56cc973582e37f732ed2.asciidoc index 75fc57939..b131ee6dc 100644 --- a/docs/examples/c97fd95ebdcf56cc973582e37f732ed2.asciidoc +++ b/docs/examples/c97fd95ebdcf56cc973582e37f732ed2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/apis/enrich/get-enrich-policy.asciidoc:176 +// ingest/apis/enrich/get-enrich-policy.asciidoc:182 [source, python] ---- diff --git a/docs/examples/c9c396b94bb88098477e2b08b55a12ee.asciidoc b/docs/examples/c9c396b94bb88098477e2b08b55a12ee.asciidoc index 0bfaabd41..e300dd9bb 100644 --- a/docs/examples/c9c396b94bb88098477e2b08b55a12ee.asciidoc +++ b/docs/examples/c9c396b94bb88098477e2b08b55a12ee.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/bulk.asciidoc:764 +// docs/bulk.asciidoc:774 [source, python] ---- diff --git a/docs/examples/c9ce07a7d3d8a317f08535bdd3aa69a3.asciidoc b/docs/examples/c9ce07a7d3d8a317f08535bdd3aa69a3.asciidoc index 3716ae2e5..477e99432 100644 --- a/docs/examples/c9ce07a7d3d8a317f08535bdd3aa69a3.asciidoc +++ b/docs/examples/c9ce07a7d3d8a317f08535bdd3aa69a3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update.asciidoc:218 +// docs/update.asciidoc:224 [source, python] ---- diff --git a/docs/examples/ca3bcd6278510ebced5f74484033cb36.asciidoc b/docs/examples/ca3bcd6278510ebced5f74484033cb36.asciidoc index b28f8a28a..09d949b21 100644 --- a/docs/examples/ca3bcd6278510ebced5f74484033cb36.asciidoc +++ b/docs/examples/ca3bcd6278510ebced5f74484033cb36.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// scripting/apis/get-script-languages-api.asciidoc:11 +// scripting/apis/get-script-languages-api.asciidoc:17 [source, python] ---- diff --git a/docs/examples/ca5ae0eb7709f3807bc6239cd4bd9141.asciidoc b/docs/examples/ca5ae0eb7709f3807bc6239cd4bd9141.asciidoc index d9c529956..87288c597 100644 --- a/docs/examples/ca5ae0eb7709f3807bc6239cd4bd9141.asciidoc +++ b/docs/examples/ca5ae0eb7709f3807bc6239cd4bd9141.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-api-keys.asciidoc:240 +// rest-api/security/get-api-keys.asciidoc:246 [source, python] ---- diff --git a/docs/examples/ca5dda98e977125d40a7fe1e178e213f.asciidoc b/docs/examples/ca5dda98e977125d40a7fe1e178e213f.asciidoc index 2d19bb671..1ae157bfb 100644 --- a/docs/examples/ca5dda98e977125d40a7fe1e178e213f.asciidoc +++ b/docs/examples/ca5dda98e977125d40a7fe1e178e213f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/sparse-vector-query.asciidoc:129 +// query-dsl/sparse-vector-query.asciidoc:131 [source, python] ---- diff --git a/docs/examples/ca98afbd6a90f63e02f62239d225313b.asciidoc b/docs/examples/ca98afbd6a90f63e02f62239d225313b.asciidoc index 44fc24e51..a41fd0d74 100644 --- a/docs/examples/ca98afbd6a90f63e02f62239d225313b.asciidoc +++ b/docs/examples/ca98afbd6a90f63e02f62239d225313b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/dangling-index-import.asciidoc:59 +// indices/dangling-index-import.asciidoc:65 [source, python] ---- diff --git a/docs/examples/caaafef1a76c2bec677704c2dc233218.asciidoc b/docs/examples/caaafef1a76c2bec677704c2dc233218.asciidoc index 79cb3d0fa..eda82a171 100644 --- a/docs/examples/caaafef1a76c2bec677704c2dc233218.asciidoc +++ b/docs/examples/caaafef1a76c2bec677704c2dc233218.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/simulate-index.asciidoc:33 +// indices/simulate-index.asciidoc:39 [source, python] ---- diff --git a/docs/examples/caab99520d3fe41f6154d74a7f696057.asciidoc b/docs/examples/caab99520d3fe41f6154d74a7f696057.asciidoc index 5ee574efc..bfb5bdd7b 100644 --- a/docs/examples/caab99520d3fe41f6154d74a7f696057.asciidoc +++ b/docs/examples/caab99520d3fe41f6154d74a7f696057.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/delete-index.asciidoc:10 +// indices/delete-index.asciidoc:16 [source, python] ---- diff --git a/docs/examples/cafed0e2c2b1d1574eb4a5ecd514a97a.asciidoc b/docs/examples/cafed0e2c2b1d1574eb4a5ecd514a97a.asciidoc index ca255c59b..783d37942 100644 --- a/docs/examples/cafed0e2c2b1d1574eb4a5ecd514a97a.asciidoc +++ b/docs/examples/cafed0e2c2b1d1574eb4a5ecd514a97a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/split-index.asciidoc:10 +// indices/split-index.asciidoc:16 [source, python] ---- diff --git a/docs/examples/cb0c3223fd45148497df73adfba2e9ce.asciidoc b/docs/examples/cb0c3223fd45148497df73adfba2e9ce.asciidoc index 96a7054cf..aabe1e15b 100644 --- a/docs/examples/cb0c3223fd45148497df73adfba2e9ce.asciidoc +++ b/docs/examples/cb0c3223fd45148497df73adfba2e9ce.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/reindex.asciidoc:668 +// docs/reindex.asciidoc:674 [source, python] ---- diff --git a/docs/examples/cb2f70601cb004b9ece9b0b43a9dc21a.asciidoc b/docs/examples/cb2f70601cb004b9ece9b0b43a9dc21a.asciidoc index 881808964..e79ec43a7 100644 --- a/docs/examples/cb2f70601cb004b9ece9b0b43a9dc21a.asciidoc +++ b/docs/examples/cb2f70601cb004b9ece9b0b43a9dc21a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// modules/indices/request_cache.asciidoc:47 +// shard-request-cache.asciidoc:49 [source, python] ---- diff --git a/docs/examples/cb71332115c92cfb89375abd30b8bbbb.asciidoc b/docs/examples/cb71332115c92cfb89375abd30b8bbbb.asciidoc index 1070f618e..1c2f2e4ab 100644 --- a/docs/examples/cb71332115c92cfb89375abd30b8bbbb.asciidoc +++ b/docs/examples/cb71332115c92cfb89375abd30b8bbbb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat.asciidoc:36 +// cat.asciidoc:42 [source, python] ---- diff --git a/docs/examples/cba3462a307e2483c14e3e198f6960e3.asciidoc b/docs/examples/cba3462a307e2483c14e3e198f6960e3.asciidoc index d135a1805..8901f62e7 100644 --- a/docs/examples/cba3462a307e2483c14e3e198f6960e3.asciidoc +++ b/docs/examples/cba3462a307e2483c14e3e198f6960e3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ilm/apis/put-lifecycle.asciidoc:60 +// ilm/apis/put-lifecycle.asciidoc:66 [source, python] ---- diff --git a/docs/examples/cbfd6f23f8283e64ec3157c65bb722c4.asciidoc b/docs/examples/cbfd6f23f8283e64ec3157c65bb722c4.asciidoc index b962ea061..b0c8c3ee0 100644 --- a/docs/examples/cbfd6f23f8283e64ec3157c65bb722c4.asciidoc +++ b/docs/examples/cbfd6f23f8283e64ec3157c65bb722c4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat.asciidoc:212 +// cat.asciidoc:218 [source, python] ---- diff --git a/docs/examples/cc56be758d5d75febbd975786187c861.asciidoc b/docs/examples/cc56be758d5d75febbd975786187c861.asciidoc index 3af0ff287..6e3db1557 100644 --- a/docs/examples/cc56be758d5d75febbd975786187c861.asciidoc +++ b/docs/examples/cc56be758d5d75febbd975786187c861.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/create-service-token.asciidoc:97 +// rest-api/security/create-service-token.asciidoc:103 [source, python] ---- diff --git a/docs/examples/cc90639f2e65bd89cb73296cac6135cf.asciidoc b/docs/examples/cc90639f2e65bd89cb73296cac6135cf.asciidoc index 58814beee..6f32a16b9 100644 --- a/docs/examples/cc90639f2e65bd89cb73296cac6135cf.asciidoc +++ b/docs/examples/cc90639f2e65bd89cb73296cac6135cf.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/delete-trained-models.asciidoc:54 +// ml/trained-models/apis/delete-trained-models.asciidoc:60 [source, python] ---- diff --git a/docs/examples/cc9dac8db7a1482e2fbe3235197c3de1.asciidoc b/docs/examples/cc9dac8db7a1482e2fbe3235197c3de1.asciidoc index 6ea9a977e..e9f3ea560 100644 --- a/docs/examples/cc9dac8db7a1482e2fbe3235197c3de1.asciidoc +++ b/docs/examples/cc9dac8db7a1482e2fbe3235197c3de1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/restore-snapshot-api.asciidoc:242 +// snapshot-restore/apis/restore-snapshot-api.asciidoc:248 [source, python] ---- diff --git a/docs/examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc b/docs/examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc index f42b8b06d..38ea94f46 100644 --- a/docs/examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc +++ b/docs/examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc @@ -1,9 +1,12 @@ // This file is autogenerated, DO NOT EDIT -// ingest/apis/simulate-ingest.asciidoc:56 +// ingest/apis/simulate-ingest.asciidoc:62 [source, python] ---- -resp = client.simulate.ingest( +resp = client.perform_request( + "POST", + "/_ingest/_simulate", + headers={"Content-Type": "application/json"}, body={ "docs": [ { diff --git a/docs/examples/ccec66fb20d5ede6c691e0890cfe402a.asciidoc b/docs/examples/ccec66fb20d5ede6c691e0890cfe402a.asciidoc index 1dc1475d9..944545aec 100644 --- a/docs/examples/ccec66fb20d5ede6c691e0890cfe402a.asciidoc +++ b/docs/examples/ccec66fb20d5ede6c691e0890cfe402a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/delete-job.asciidoc:85 +// ml/anomaly-detection/apis/delete-job.asciidoc:91 [source, python] ---- diff --git a/docs/examples/cd373a6eb1ef4748616500b26fab3006.asciidoc b/docs/examples/cd373a6eb1ef4748616500b26fab3006.asciidoc index 4c58be4f3..50cdf22c1 100644 --- a/docs/examples/cd373a6eb1ef4748616500b26fab3006.asciidoc +++ b/docs/examples/cd373a6eb1ef4748616500b26fab3006.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/async-search.asciidoc:15 +// search/async-search.asciidoc:21 [source, python] ---- diff --git a/docs/examples/cd38c601ab293a6ec0e2df71d0c96b58.asciidoc b/docs/examples/cd38c601ab293a6ec0e2df71d0c96b58.asciidoc index e743b527a..c1c4768d6 100644 --- a/docs/examples/cd38c601ab293a6ec0e2df71d0c96b58.asciidoc +++ b/docs/examples/cd38c601ab293a6ec0e2df71d0c96b58.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-index-template.asciidoc:347 +// indices/put-index-template.asciidoc:353 [source, python] ---- diff --git a/docs/examples/cd67ad2c09fafef2d441c3502d0bb3d7.asciidoc b/docs/examples/cd67ad2c09fafef2d441c3502d0bb3d7.asciidoc index fc5ff0439..2a2f8d0a3 100644 --- a/docs/examples/cd67ad2c09fafef2d441c3502d0bb3d7.asciidoc +++ b/docs/examples/cd67ad2c09fafef2d441c3502d0bb3d7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/lifecycle/apis/put-lifecycle.asciidoc:78 +// data-streams/lifecycle/apis/put-lifecycle.asciidoc:84 [source, python] ---- diff --git a/docs/examples/cd8006165ac64f1ef99af48e5a35a25b.asciidoc b/docs/examples/cd8006165ac64f1ef99af48e5a35a25b.asciidoc index 75d163ad4..3decea8d2 100644 --- a/docs/examples/cd8006165ac64f1ef99af48e5a35a25b.asciidoc +++ b/docs/examples/cd8006165ac64f1ef99af48e5a35a25b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-app-privileges.asciidoc:58 +// rest-api/security/get-app-privileges.asciidoc:64 [source, python] ---- diff --git a/docs/examples/cd93919e13f656ad2e6629f45c579b93.asciidoc b/docs/examples/cd93919e13f656ad2e6629f45c579b93.asciidoc index c922da32f..b59f5e22a 100644 --- a/docs/examples/cd93919e13f656ad2e6629f45c579b93.asciidoc +++ b/docs/examples/cd93919e13f656ad2e6629f45c579b93.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/shard-stores.asciidoc:114 +// indices/shard-stores.asciidoc:120 [source, python] ---- diff --git a/docs/examples/cdb68b3f565df7c85e52a55864b37d40.asciidoc b/docs/examples/cdb68b3f565df7c85e52a55864b37d40.asciidoc index 3009eb74b..f3ee218e6 100644 --- a/docs/examples/cdb68b3f565df7c85e52a55864b37d40.asciidoc +++ b/docs/examples/cdb68b3f565df7c85e52a55864b37d40.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:358 +// indices/put-mapping.asciidoc:364 [source, python] ---- diff --git a/docs/examples/cdb7613b445e6ed6e8b473f9cae1af90.asciidoc b/docs/examples/cdb7613b445e6ed6e8b473f9cae1af90.asciidoc new file mode 100644 index 000000000..ec203bab7 --- /dev/null +++ b/docs/examples/cdb7613b445e6ed6e8b473f9cae1af90.asciidoc @@ -0,0 +1,35 @@ +// This file is autogenerated, DO NOT EDIT +// query-dsl/intervals-query.asciidoc:497 + +[source, python] +---- +resp = client.search( + query={ + "intervals": { + "my_text": { + "all_of": { + "ordered": True, + "max_gaps": 1, + "intervals": [ + { + "match": { + "query": "my favorite food", + "max_gaps": 0, + "ordered": True + } + }, + { + "match": { + "query": "cold porridge", + "max_gaps": 4, + "ordered": True + } + } + ] + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/cdce7bc083dfb36e6f1d465a5c9d5049.asciidoc b/docs/examples/cdce7bc083dfb36e6f1d465a5c9d5049.asciidoc index e6a36d4c5..ec6802cda 100644 --- a/docs/examples/cdce7bc083dfb36e6f1d465a5c9d5049.asciidoc +++ b/docs/examples/cdce7bc083dfb36e6f1d465a5c9d5049.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/list-connector-sync-jobs-api.asciidoc:50 +// connector/apis/list-connector-sync-jobs-api.asciidoc:56 [source, python] ---- diff --git a/docs/examples/cdd29b01e730b3996de68a2788050021.asciidoc b/docs/examples/cdd29b01e730b3996de68a2788050021.asciidoc index 0e2c7e21c..6646dfebc 100644 --- a/docs/examples/cdd29b01e730b3996de68a2788050021.asciidoc +++ b/docs/examples/cdd29b01e730b3996de68a2788050021.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/apis/enrich/delete-enrich-policy.asciidoc:36 +// ingest/apis/enrich/delete-enrich-policy.asciidoc:42 [source, python] ---- diff --git a/docs/examples/cdf400299acd1c7b1b7bb42e284e3d08.asciidoc b/docs/examples/cdf400299acd1c7b1b7bb42e284e3d08.asciidoc index a7e4c711d..e4f4228bc 100644 --- a/docs/examples/cdf400299acd1c7b1b7bb42e284e3d08.asciidoc +++ b/docs/examples/cdf400299acd1c7b1b7bb42e284e3d08.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update.asciidoc:135 +// docs/update.asciidoc:141 [source, python] ---- diff --git a/docs/examples/cdfd4fef983c1c0fe8d7417f67d01eae.asciidoc b/docs/examples/cdfd4fef983c1c0fe8d7417f67d01eae.asciidoc index d61b8b76f..f73420eb9 100644 --- a/docs/examples/cdfd4fef983c1c0fe8d7417f67d01eae.asciidoc +++ b/docs/examples/cdfd4fef983c1c0fe8d7417f67d01eae.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:153 +// troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:157 [source, python] ---- diff --git a/docs/examples/ce0c3d7330727f7673cf68fc9a1cfb86.asciidoc b/docs/examples/ce0c3d7330727f7673cf68fc9a1cfb86.asciidoc index c0642da1f..feef5c26f 100644 --- a/docs/examples/ce0c3d7330727f7673cf68fc9a1cfb86.asciidoc +++ b/docs/examples/ce0c3d7330727f7673cf68fc9a1cfb86.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/clearcache.asciidoc:11 +// indices/clearcache.asciidoc:17 [source, python] ---- diff --git a/docs/examples/ce2c2e8f5a2e4daf051b6e10122e5aae.asciidoc b/docs/examples/ce2c2e8f5a2e4daf051b6e10122e5aae.asciidoc index e6cd4cf1e..2e8eefb75 100644 --- a/docs/examples/ce2c2e8f5a2e4daf051b6e10122e5aae.asciidoc +++ b/docs/examples/ce2c2e8f5a2e4daf051b6e10122e5aae.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/dense-vector.asciidoc:521 +// mapping/types/dense-vector.asciidoc:519 [source, python] ---- diff --git a/docs/examples/ce3c391c2b1915cfc44a2917bca71d19.asciidoc b/docs/examples/ce3c391c2b1915cfc44a2917bca71d19.asciidoc index b1166bc60..e039c088e 100644 --- a/docs/examples/ce3c391c2b1915cfc44a2917bca71d19.asciidoc +++ b/docs/examples/ce3c391c2b1915cfc44a2917bca71d19.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/df-analytics/apis/put-dfanalytics.asciidoc:644 +// ml/df-analytics/apis/put-dfanalytics.asciidoc:650 [source, python] ---- diff --git a/docs/examples/ce8471d31e5d60309e142feb040fd2f8.asciidoc b/docs/examples/ce8471d31e5d60309e142feb040fd2f8.asciidoc index 99b1f9058..6f6452a25 100644 --- a/docs/examples/ce8471d31e5d60309e142feb040fd2f8.asciidoc +++ b/docs/examples/ce8471d31e5d60309e142feb040fd2f8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/query-watches.asciidoc:67 +// rest-api/watcher/query-watches.asciidoc:73 [source, python] ---- diff --git a/docs/examples/ce8eebfb810335803630abe83278bee7.asciidoc b/docs/examples/ce8eebfb810335803630abe83278bee7.asciidoc index 7ca152642..7ea39330f 100644 --- a/docs/examples/ce8eebfb810335803630abe83278bee7.asciidoc +++ b/docs/examples/ce8eebfb810335803630abe83278bee7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-api-keys.asciidoc:247 +// rest-api/security/get-api-keys.asciidoc:253 [source, python] ---- diff --git a/docs/examples/b26b5574438e4eaf146b2428bf537c51.asciidoc b/docs/examples/cecfaa659af6646b3b67d7b311586fa0.asciidoc similarity index 91% rename from docs/examples/b26b5574438e4eaf146b2428bf537c51.asciidoc rename to docs/examples/cecfaa659af6646b3b67d7b311586fa0.asciidoc index ad8b79e9a..dd81ff102 100644 --- a/docs/examples/b26b5574438e4eaf146b2428bf537c51.asciidoc +++ b/docs/examples/cecfaa659af6646b3b67d7b311586fa0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/processors/attachment.asciidoc:349 +// ingest/processors/attachment.asciidoc:396 [source, python] ---- @@ -14,7 +14,7 @@ resp = client.ingest.put_pipeline( "attachment": { "target_field": "_ingest._value.attachment", "field": "_ingest._value.data", - "remove_binary": False + "remove_binary": True } } } diff --git a/docs/examples/cedb56a71cc743d80263ce352bb21720.asciidoc b/docs/examples/cedb56a71cc743d80263ce352bb21720.asciidoc index c101699af..f9a7673cd 100644 --- a/docs/examples/cedb56a71cc743d80263ce352bb21720.asciidoc +++ b/docs/examples/cedb56a71cc743d80263ce352bb21720.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-elser.asciidoc:113 +// inference/service-elser.asciidoc:157 [source, python] ---- diff --git a/docs/examples/cee491dd0a8d10ed0cb11a2faa0c99f0.asciidoc b/docs/examples/cee491dd0a8d10ed0cb11a2faa0c99f0.asciidoc index 3dfe110b6..9f939fd21 100644 --- a/docs/examples/cee491dd0a8d10ed0cb11a2faa0c99f0.asciidoc +++ b/docs/examples/cee491dd0a8d10ed0cb11a2faa0c99f0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/infer-trained-model.asciidoc:1179 +// ml/trained-models/apis/infer-trained-model.asciidoc:1185 [source, python] ---- diff --git a/docs/examples/cee591c1fc70d4f180c623a3a6d07755.asciidoc b/docs/examples/cee591c1fc70d4f180c623a3a6d07755.asciidoc index 5b7dabd86..074c899a4 100644 --- a/docs/examples/cee591c1fc70d4f180c623a3a6d07755.asciidoc +++ b/docs/examples/cee591c1fc70d4f180c623a3a6d07755.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/invalidate-tokens.asciidoc:72 +// rest-api/security/invalidate-tokens.asciidoc:78 [source, python] ---- diff --git a/docs/examples/cf8ca470156698dbf47fdc822d0a714f.asciidoc b/docs/examples/cf8ca470156698dbf47fdc822d0a714f.asciidoc index 1210a1941..b3a8628a7 100644 --- a/docs/examples/cf8ca470156698dbf47fdc822d0a714f.asciidoc +++ b/docs/examples/cf8ca470156698dbf47fdc822d0a714f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/get-desired-nodes.asciidoc:64 +// cluster/get-desired-nodes.asciidoc:70 [source, python] ---- diff --git a/docs/examples/cfad3631be0634ee49c424f9ccec62d9.asciidoc b/docs/examples/cfad3631be0634ee49c424f9ccec62d9.asciidoc index dba931e60..3259ea16f 100644 --- a/docs/examples/cfad3631be0634ee49c424f9ccec62d9.asciidoc +++ b/docs/examples/cfad3631be0634ee49c424f9ccec62d9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/invalidate-api-keys.asciidoc:168 +// rest-api/security/invalidate-api-keys.asciidoc:174 [source, python] ---- diff --git a/docs/examples/cffce059425d3d21e7f9571500d63524.asciidoc b/docs/examples/cffce059425d3d21e7f9571500d63524.asciidoc index 8f33f2bdc..398be53fe 100644 --- a/docs/examples/cffce059425d3d21e7f9571500d63524.asciidoc +++ b/docs/examples/cffce059425d3d21e7f9571500d63524.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/delete-roles.asciidoc:40 +// rest-api/security/delete-roles.asciidoc:46 [source, python] ---- diff --git a/docs/examples/d01a590fa9ea8a0cb34ed8dda502296c.asciidoc b/docs/examples/d01a590fa9ea8a0cb34ed8dda502296c.asciidoc deleted file mode 100644 index a696e0d6d..000000000 --- a/docs/examples/d01a590fa9ea8a0cb34ed8dda502296c.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// migration/migrate_9_0/transient-settings-migration-guide.asciidoc:38 - -[source, python] ----- -resp = client.cluster.get_settings( - flat_settings=True, - filter_path="transient", -) -print(resp) ----- diff --git a/docs/examples/d01d309b0257d6fbca6d0941adeb3256.asciidoc b/docs/examples/d01d309b0257d6fbca6d0941adeb3256.asciidoc index 971c4b3e6..aaa4270d5 100644 --- a/docs/examples/d01d309b0257d6fbca6d0941adeb3256.asciidoc +++ b/docs/examples/d01d309b0257d6fbca6d0941adeb3256.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/simulate-index.asciidoc:145 +// indices/simulate-index.asciidoc:151 [source, python] ---- diff --git a/docs/examples/d03139a851888db53f8b7affd85eb495.asciidoc b/docs/examples/d03139a851888db53f8b7affd85eb495.asciidoc index fb06927ed..c003a5730 100644 --- a/docs/examples/d03139a851888db53f8b7affd85eb495.asciidoc +++ b/docs/examples/d03139a851888db53f8b7affd85eb495.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/check-in-connector-api.asciidoc:69 +// connector/apis/check-in-connector-api.asciidoc:75 [source, python] ---- diff --git a/docs/examples/d095b422d9803c02b62c01adffc85376.asciidoc b/docs/examples/d095b422d9803c02b62c01adffc85376.asciidoc index c56dbd0a0..8a4331655 100644 --- a/docs/examples/d095b422d9803c02b62c01adffc85376.asciidoc +++ b/docs/examples/d095b422d9803c02b62c01adffc85376.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/get-job.asciidoc:88 +// rollup/apis/get-job.asciidoc:94 [source, python] ---- diff --git a/docs/examples/d14fe5838fc02224f4b5ade2626d6026.asciidoc b/docs/examples/d14fe5838fc02224f4b5ade2626d6026.asciidoc index cb72ce548..eefd5c71b 100644 --- a/docs/examples/d14fe5838fc02224f4b5ade2626d6026.asciidoc +++ b/docs/examples/d14fe5838fc02224f4b5ade2626d6026.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ilm/apis/explain.asciidoc:100 +// ilm/apis/explain.asciidoc:106 [source, python] ---- diff --git a/docs/examples/d1b53bc9794e8609bd6f2245624bf977.asciidoc b/docs/examples/d1b53bc9794e8609bd6f2245624bf977.asciidoc index f7b027a88..872762ff1 100644 --- a/docs/examples/d1b53bc9794e8609bd6f2245624bf977.asciidoc +++ b/docs/examples/d1b53bc9794e8609bd6f2245624bf977.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/estimate-model-memory.asciidoc:54 +// ml/anomaly-detection/apis/estimate-model-memory.asciidoc:60 [source, python] ---- diff --git a/docs/examples/d1d8b6e642db1a7c70dbbf0fe6d8e92d.asciidoc b/docs/examples/d1d8b6e642db1a7c70dbbf0fe6d8e92d.asciidoc index 22b7b5a16..fb9b3dc8c 100644 --- a/docs/examples/d1d8b6e642db1a7c70dbbf0fe6d8e92d.asciidoc +++ b/docs/examples/d1d8b6e642db1a7c70dbbf0fe6d8e92d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/sparse-vector-query.asciidoc:190 +// query-dsl/sparse-vector-query.asciidoc:192 [source, python] ---- diff --git a/docs/examples/d1e0fee64389e7c8d4c092030626b61f.asciidoc b/docs/examples/d1e0fee64389e7c8d4c092030626b61f.asciidoc index f7602e3f4..bec485a6c 100644 --- a/docs/examples/d1e0fee64389e7c8d4c092030626b61f.asciidoc +++ b/docs/examples/d1e0fee64389e7c8d4c092030626b61f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-api-keys.asciidoc:209 +// rest-api/security/get-api-keys.asciidoc:215 [source, python] ---- diff --git a/docs/examples/d1fde25de1980b7e84fa878289fd0bcb.asciidoc b/docs/examples/d1fde25de1980b7e84fa878289fd0bcb.asciidoc index 38483671b..712879b6b 100644 --- a/docs/examples/d1fde25de1980b7e84fa878289fd0bcb.asciidoc +++ b/docs/examples/d1fde25de1980b7e84fa878289fd0bcb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update-by-query.asciidoc:654 +// docs/update-by-query.asciidoc:660 [source, python] ---- diff --git a/docs/examples/d29031409016b2b798148ef173a196ae.asciidoc b/docs/examples/d29031409016b2b798148ef173a196ae.asciidoc new file mode 100644 index 000000000..9a2275025 --- /dev/null +++ b/docs/examples/d29031409016b2b798148ef173a196ae.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// mapping/types/semantic-text.asciidoc:132 + +[source, python] +---- +resp = client.indices.create( + index="test-index", + query={ + "semantic": { + "field": "my_semantic_field" + } + }, + highlight={ + "fields": { + "my_semantic_field": { + "type": "semantic", + "number_of_fragments": 2, + "order": "score" + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/d3263afc69b6f969b9bbd8738cd07b97.asciidoc b/docs/examples/d3263afc69b6f969b9bbd8738cd07b97.asciidoc index d0717b7b6..3301055fa 100644 --- a/docs/examples/d3263afc69b6f969b9bbd8738cd07b97.asciidoc +++ b/docs/examples/d3263afc69b6f969b9bbd8738cd07b97.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ccr/apis/follow/post-pause-follow.asciidoc:67 +// ccr/apis/follow/post-pause-follow.asciidoc:73 [source, python] ---- diff --git a/docs/examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc b/docs/examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc index 4edb74f87..bda513c8a 100644 --- a/docs/examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc +++ b/docs/examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc @@ -1,9 +1,12 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/oidc-prepare-authentication-api.asciidoc:128 +// rest-api/security/oidc-prepare-authentication-api.asciidoc:134 [source, python] ---- -resp = client.security.oidc_prepare_authentication( +resp = client.perform_request( + "POST", + "/_security/oidc/prepare", + headers={"Content-Type": "application/json"}, body={ "iss": "http://127.0.0.1:8080", "login_hint": "this_is_an_opaque_string" diff --git a/docs/examples/d3672a87a857ddb87519788236e57497.asciidoc b/docs/examples/d3672a87a857ddb87519788236e57497.asciidoc new file mode 100644 index 000000000..756cdf5be --- /dev/null +++ b/docs/examples/d3672a87a857ddb87519788236e57497.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// inference/service-jinaai.asciidoc:232 + +[source, python] +---- +resp = client.search( + index="jinaai-index", + retriever={ + "text_similarity_reranker": { + "retriever": { + "standard": { + "query": { + "semantic": { + "field": "content", + "query": "who inspired taking care of the sea?" + } + } + } + }, + "field": "content", + "rank_window_size": 100, + "inference_id": "jinaai-rerank", + "inference_text": "who inspired taking care of the sea?" + } + }, +) +print(resp) +---- diff --git a/docs/examples/d4158d486e7fee2702a14068b69e3b33.asciidoc b/docs/examples/d4158d486e7fee2702a14068b69e3b33.asciidoc new file mode 100644 index 000000000..917e7bfd7 --- /dev/null +++ b/docs/examples/d4158d486e7fee2702a14068b69e3b33.asciidoc @@ -0,0 +1,156 @@ +// This file is autogenerated, DO NOT EDIT +// data-streams/downsampling-dsl.asciidoc:45 + +[source, python] +---- +resp = client.indices.put_index_template( + name="datastream_template", + index_patterns=[ + "datastream*" + ], + data_stream={}, + template={ + "lifecycle": { + "downsampling": [ + { + "after": "1m", + "fixed_interval": "1h" + } + ] + }, + "settings": { + "index": { + "mode": "time_series" + } + }, + "mappings": { + "properties": { + "@timestamp": { + "type": "date" + }, + "kubernetes": { + "properties": { + "container": { + "properties": { + "cpu": { + "properties": { + "usage": { + "properties": { + "core": { + "properties": { + "ns": { + "type": "long" + } + } + }, + "limit": { + "properties": { + "pct": { + "type": "float" + } + } + }, + "nanocores": { + "type": "long", + "time_series_metric": "gauge" + }, + "node": { + "properties": { + "pct": { + "type": "float" + } + } + } + } + } + } + }, + "memory": { + "properties": { + "available": { + "properties": { + "bytes": { + "type": "long", + "time_series_metric": "gauge" + } + } + }, + "majorpagefaults": { + "type": "long" + }, + "pagefaults": { + "type": "long", + "time_series_metric": "gauge" + }, + "rss": { + "properties": { + "bytes": { + "type": "long", + "time_series_metric": "gauge" + } + } + }, + "usage": { + "properties": { + "bytes": { + "type": "long", + "time_series_metric": "gauge" + }, + "limit": { + "properties": { + "pct": { + "type": "float" + } + } + }, + "node": { + "properties": { + "pct": { + "type": "float" + } + } + } + } + }, + "workingset": { + "properties": { + "bytes": { + "type": "long", + "time_series_metric": "gauge" + } + } + } + } + }, + "name": { + "type": "keyword" + }, + "start_time": { + "type": "date" + } + } + }, + "host": { + "type": "keyword", + "time_series_dimension": True + }, + "namespace": { + "type": "keyword", + "time_series_dimension": True + }, + "node": { + "type": "keyword", + "time_series_dimension": True + }, + "pod": { + "type": "keyword", + "time_series_dimension": True + } + } + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/d4323be84152fa91abd76e966d4751dc.asciidoc b/docs/examples/d4323be84152fa91abd76e966d4751dc.asciidoc index 4e212eaf9..66d97f60e 100644 --- a/docs/examples/d4323be84152fa91abd76e966d4751dc.asciidoc +++ b/docs/examples/d4323be84152fa91abd76e966d4751dc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/query-api-key.asciidoc:469 +// rest-api/security/query-api-key.asciidoc:474 [source, python] ---- diff --git a/docs/examples/d46e9739bbf25eb2f7225f58ab08b2a7.asciidoc b/docs/examples/d46e9739bbf25eb2f7225f58ab08b2a7.asciidoc index 920b8ddb3..f52d17618 100644 --- a/docs/examples/d46e9739bbf25eb2f7225f58ab08b2a7.asciidoc +++ b/docs/examples/d46e9739bbf25eb2f7225f58ab08b2a7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/saml-complete-logout-api.asciidoc:83 +// rest-api/security/saml-complete-logout-api.asciidoc:89 [source, python] ---- diff --git a/docs/examples/d48b274a4b6098ffef0c016c6c945fb9.asciidoc b/docs/examples/d48b274a4b6098ffef0c016c6c945fb9.asciidoc index 77bb46877..bf5f7a444 100644 --- a/docs/examples/d48b274a4b6098ffef0c016c6c945fb9.asciidoc +++ b/docs/examples/d48b274a4b6098ffef0c016c6c945fb9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-tokens.asciidoc:216 +// rest-api/security/get-tokens.asciidoc:222 [source, python] ---- diff --git a/docs/examples/d4b405ef0302227e050ac8f0e39068e1.asciidoc b/docs/examples/d4b405ef0302227e050ac8f0e39068e1.asciidoc index 0a0a05e6a..2ea22c903 100644 --- a/docs/examples/d4b405ef0302227e050ac8f0e39068e1.asciidoc +++ b/docs/examples/d4b405ef0302227e050ac8f0e39068e1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/df-analytics/apis/evaluate-dfanalytics.asciidoc:253 +// ml/df-analytics/apis/evaluate-dfanalytics.asciidoc:259 [source, python] ---- diff --git a/docs/examples/d4d450f536d747d5ef5050d2d8c66f09.asciidoc b/docs/examples/d4d450f536d747d5ef5050d2d8c66f09.asciidoc index b1a92fc25..4837789c1 100644 --- a/docs/examples/d4d450f536d747d5ef5050d2d8c66f09.asciidoc +++ b/docs/examples/d4d450f536d747d5ef5050d2d8c66f09.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/validate.asciidoc:87 +// search/validate.asciidoc:93 [source, python] ---- diff --git a/docs/examples/d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc b/docs/examples/d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc new file mode 100644 index 000000000..8726658f3 --- /dev/null +++ b/docs/examples/d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// ingest/apis/put-ip-location-database.asciidoc:40 + +[source, python] +---- +resp = client.perform_request( + "PUT", + "/_ingest/ip_location/database/my-database-2", + headers={"Content-Type": "application/json"}, + body={ + "name": "standard_location", + "ipinfo": {} + }, +) +print(resp) +---- diff --git a/docs/examples/d4ef6ac034c4d42cb75d830ec69146e6.asciidoc b/docs/examples/d4ef6ac034c4d42cb75d830ec69146e6.asciidoc index 76daae69a..cac078e13 100644 --- a/docs/examples/d4ef6ac034c4d42cb75d830ec69146e6.asciidoc +++ b/docs/examples/d4ef6ac034c4d42cb75d830ec69146e6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc:69 +// ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc:75 [source, python] ---- diff --git a/docs/examples/d50b030edfe6d1128eb76aa5ba9d4e27.asciidoc b/docs/examples/d50b030edfe6d1128eb76aa5ba9d4e27.asciidoc index 0d4378362..acf3c899e 100644 --- a/docs/examples/d50b030edfe6d1128eb76aa5ba9d4e27.asciidoc +++ b/docs/examples/d50b030edfe6d1128eb76aa5ba9d4e27.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/put-trained-models-aliases.asciidoc:94 +// ml/trained-models/apis/put-trained-models-aliases.asciidoc:99 [source, python] ---- diff --git a/docs/examples/35fd9549350926f8d57dc1765e2f40d3.asciidoc b/docs/examples/d5242b1ab0213f25e5e0742032274ce6.asciidoc similarity index 94% rename from docs/examples/35fd9549350926f8d57dc1765e2f40d3.asciidoc rename to docs/examples/d5242b1ab0213f25e5e0742032274ce6.asciidoc index f70f2943f..1598f7708 100644 --- a/docs/examples/35fd9549350926f8d57dc1765e2f40d3.asciidoc +++ b/docs/examples/d5242b1ab0213f25e5e0742032274ce6.asciidoc @@ -10,7 +10,7 @@ resp = client.ingest.put_pipeline( { "attachment": { "field": "data", - "remove_binary": False + "remove_binary": True } } ], diff --git a/docs/examples/d524db57be9f16abac5396895b9a2a59.asciidoc b/docs/examples/d524db57be9f16abac5396895b9a2a59.asciidoc index b16084909..02b65b221 100644 --- a/docs/examples/d524db57be9f16abac5396895b9a2a59.asciidoc +++ b/docs/examples/d524db57be9f16abac5396895b9a2a59.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/resolve.asciidoc:47 +// indices/resolve.asciidoc:53 [source, python] ---- diff --git a/docs/examples/d5533f08f5cc0479f07a46c761f0786b.asciidoc b/docs/examples/d5533f08f5cc0479f07a46c761f0786b.asciidoc index bc24e9978..12f1b6a1a 100644 --- a/docs/examples/d5533f08f5cc0479f07a46c761f0786b.asciidoc +++ b/docs/examples/d5533f08f5cc0479f07a46c761f0786b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/get.asciidoc:321 +// docs/get.asciidoc:327 [source, python] ---- diff --git a/docs/examples/d56a9d89282df56adbbc34b91390ac17.asciidoc b/docs/examples/d56a9d89282df56adbbc34b91390ac17.asciidoc index fdc2e6aba..8f9d5911f 100644 --- a/docs/examples/d56a9d89282df56adbbc34b91390ac17.asciidoc +++ b/docs/examples/d56a9d89282df56adbbc34b91390ac17.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc:49 +// ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc:55 [source, python] ---- diff --git a/docs/examples/d5d0ecf75843ddb5f92cfebd089e53e9.asciidoc b/docs/examples/d5d0ecf75843ddb5f92cfebd089e53e9.asciidoc index 4b4e8163c..d871dbc9f 100644 --- a/docs/examples/d5d0ecf75843ddb5f92cfebd089e53e9.asciidoc +++ b/docs/examples/d5d0ecf75843ddb5f92cfebd089e53e9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/reindex.asciidoc:742 +// docs/reindex.asciidoc:748 [source, python] ---- diff --git a/docs/examples/d5ead6aacbfbedc8396f87bb34acc880.asciidoc b/docs/examples/d5ead6aacbfbedc8396f87bb34acc880.asciidoc index 50b1633ff..d728103a6 100644 --- a/docs/examples/d5ead6aacbfbedc8396f87bb34acc880.asciidoc +++ b/docs/examples/d5ead6aacbfbedc8396f87bb34acc880.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// eql/get-async-eql-search-api.asciidoc:14 +// eql/get-async-eql-search-api.asciidoc:20 [source, python] ---- diff --git a/docs/examples/d603e76ab70131f7ec6b08758f95a0e3.asciidoc b/docs/examples/d603e76ab70131f7ec6b08758f95a0e3.asciidoc index 2d05e9e67..b2a1f1635 100644 --- a/docs/examples/d603e76ab70131f7ec6b08758f95a0e3.asciidoc +++ b/docs/examples/d603e76ab70131f7ec6b08758f95a0e3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/recovery.asciidoc:142 +// cat/recovery.asciidoc:148 [source, python] ---- diff --git a/docs/examples/d681508a745b2bc777d47ba606d24224.asciidoc b/docs/examples/d681508a745b2bc777d47ba606d24224.asciidoc index ac543ef18..3e907d7a5 100644 --- a/docs/examples/d681508a745b2bc777d47ba606d24224.asciidoc +++ b/docs/examples/d681508a745b2bc777d47ba606d24224.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/fielddata.asciidoc:152 +// cat/fielddata.asciidoc:158 [source, python] ---- diff --git a/docs/examples/d6a21afa4a94b9baa734eac430940bcf.asciidoc b/docs/examples/d6a21afa4a94b9baa734eac430940bcf.asciidoc index 7857d03d4..44333100e 100644 --- a/docs/examples/d6a21afa4a94b9baa734eac430940bcf.asciidoc +++ b/docs/examples/d6a21afa4a94b9baa734eac430940bcf.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/list-connectors-api.asciidoc:86 +// connector/apis/list-connectors-api.asciidoc:95 [source, python] ---- diff --git a/docs/examples/d7141bd4d0db964f5cc4a872ad79dce9.asciidoc b/docs/examples/d7141bd4d0db964f5cc4a872ad79dce9.asciidoc index 6c9d7e7d0..2b3a76482 100644 --- a/docs/examples/d7141bd4d0db964f5cc4a872ad79dce9.asciidoc +++ b/docs/examples/d7141bd4d0db964f5cc4a872ad79dce9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// features/apis/reset-features-api.asciidoc:14 +// features/apis/reset-features-api.asciidoc:20 [source, python] ---- diff --git a/docs/examples/d775836a0d7abecc6637aa988f204c30.asciidoc b/docs/examples/d775836a0d7abecc6637aa988f204c30.asciidoc index 46b21f906..8a55e35ce 100644 --- a/docs/examples/d775836a0d7abecc6637aa988f204c30.asciidoc +++ b/docs/examples/d775836a0d7abecc6637aa988f204c30.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/termvectors.asciidoc:218 +// docs/termvectors.asciidoc:224 [source, python] ---- diff --git a/docs/examples/d7898526d239d2aea83727fb982f8f77.asciidoc b/docs/examples/d7898526d239d2aea83727fb982f8f77.asciidoc index d02e7b8c5..fbf8ababd 100644 --- a/docs/examples/d7898526d239d2aea83727fb982f8f77.asciidoc +++ b/docs/examples/d7898526d239d2aea83727fb982f8f77.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/refresh.asciidoc:113 +// indices/refresh.asciidoc:119 [source, python] ---- diff --git a/docs/examples/d7ae456f119246e95f2f4c37e7544b8c.asciidoc b/docs/examples/d7ae456f119246e95f2f4c37e7544b8c.asciidoc index ed5729b4f..8f3de184f 100644 --- a/docs/examples/d7ae456f119246e95f2f4c37e7544b8c.asciidoc +++ b/docs/examples/d7ae456f119246e95f2f4c37e7544b8c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/start-datafeed.asciidoc:109 +// ml/anomaly-detection/apis/start-datafeed.asciidoc:115 [source, python] ---- diff --git a/docs/examples/d7fe687201ac87b307cd06ed015dd317.asciidoc b/docs/examples/d7fe687201ac87b307cd06ed015dd317.asciidoc index f77b3758f..e713f4ada 100644 --- a/docs/examples/d7fe687201ac87b307cd06ed015dd317.asciidoc +++ b/docs/examples/d7fe687201ac87b307cd06ed015dd317.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:282 +// indices/put-mapping.asciidoc:288 [source, python] ---- diff --git a/docs/examples/d80ac403d8d936ca9dec185c7da13f2f.asciidoc b/docs/examples/d80ac403d8d936ca9dec185c7da13f2f.asciidoc index 8a73c0098..592d64248 100644 --- a/docs/examples/d80ac403d8d936ca9dec185c7da13f2f.asciidoc +++ b/docs/examples/d80ac403d8d936ca9dec185c7da13f2f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// scripting/apis/create-stored-script-api.asciidoc:11 +// scripting/apis/create-stored-script-api.asciidoc:17 [source, python] ---- diff --git a/docs/examples/d870d5bd1f97fc75872a298fcddec513.asciidoc b/docs/examples/d870d5bd1f97fc75872a298fcddec513.asciidoc index e128de0b6..adc5382ea 100644 --- a/docs/examples/d870d5bd1f97fc75872a298fcddec513.asciidoc +++ b/docs/examples/d870d5bd1f97fc75872a298fcddec513.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// text-structure/apis/find-structure.asciidoc:95 +// text-structure/apis/find-structure.asciidoc:101 [source, python] ---- diff --git a/docs/examples/d87175daed2327565d4325528c6d8b38.asciidoc b/docs/examples/d87175daed2327565d4325528c6d8b38.asciidoc index 2aca8df64..7f274a06b 100644 --- a/docs/examples/d87175daed2327565d4325528c6d8b38.asciidoc +++ b/docs/examples/d87175daed2327565d4325528c6d8b38.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/get.asciidoc:229 +// docs/get.asciidoc:235 [source, python] ---- diff --git a/docs/examples/d89d36741d906a71eca6c144e8d83889.asciidoc b/docs/examples/d89d36741d906a71eca6c144e8d83889.asciidoc index 66975ea95..de73d93f5 100644 --- a/docs/examples/d89d36741d906a71eca6c144e8d83889.asciidoc +++ b/docs/examples/d89d36741d906a71eca6c144e8d83889.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/tasks.asciidoc:237 +// cluster/tasks.asciidoc:243 [source, python] ---- diff --git a/docs/examples/d8a82511cb94f49b4fe4828fee3ba074.asciidoc b/docs/examples/d8a82511cb94f49b4fe4828fee3ba074.asciidoc index 7f8a3b734..393d427ca 100644 --- a/docs/examples/d8a82511cb94f49b4fe4828fee3ba074.asciidoc +++ b/docs/examples/d8a82511cb94f49b4fe4828fee3ba074.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/circuit-breaker-errors.asciidoc:56 +// troubleshooting/common-issues/circuit-breaker-errors.asciidoc:63 [source, python] ---- diff --git a/docs/examples/d8c053ee26c1533ce936ec81101d8e1b.asciidoc b/docs/examples/d8c053ee26c1533ce936ec81101d8e1b.asciidoc new file mode 100644 index 000000000..78590c949 --- /dev/null +++ b/docs/examples/d8c053ee26c1533ce936ec81101d8e1b.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// ingest/apis/get-ip-location-database.asciidoc:61 + +[source, python] +---- +resp = client.perform_request( + "GET", + "/_ingest/ip_location/database/my-database-id", +) +print(resp) +---- diff --git a/docs/examples/da0fe1316e5b8fd68e2a8525bcd8b0f6.asciidoc b/docs/examples/da0fe1316e5b8fd68e2a8525bcd8b0f6.asciidoc index 6670d51f8..b2d4e5719 100644 --- a/docs/examples/da0fe1316e5b8fd68e2a8525bcd8b0f6.asciidoc +++ b/docs/examples/da0fe1316e5b8fd68e2a8525bcd8b0f6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// how-to/recipes/scoring.asciidoc:168 +// how-to/recipes/scoring.asciidoc:169 [source, python] ---- diff --git a/docs/examples/da18bae37cda566c0254b30c15221b01.asciidoc b/docs/examples/da18bae37cda566c0254b30c15221b01.asciidoc index 9f1939a58..5d577108a 100644 --- a/docs/examples/da18bae37cda566c0254b30c15221b01.asciidoc +++ b/docs/examples/da18bae37cda566c0254b30c15221b01.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/clear-service-token-caches.asciidoc:55 +// rest-api/security/clear-service-token-caches.asciidoc:61 [source, python] ---- diff --git a/docs/examples/da3f280bc65b581fb3097be768061bee.asciidoc b/docs/examples/da3f280bc65b581fb3097be768061bee.asciidoc index 7dfabdaa3..0b88203ea 100644 --- a/docs/examples/da3f280bc65b581fb3097be768061bee.asciidoc +++ b/docs/examples/da3f280bc65b581fb3097be768061bee.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/saml-prepare-authentication-api.asciidoc:90 +// rest-api/security/saml-prepare-authentication-api.asciidoc:96 [source, python] ---- diff --git a/docs/examples/daae2e6acebc84e537764f4ba07f2e6e.asciidoc b/docs/examples/daae2e6acebc84e537764f4ba07f2e6e.asciidoc index 1205c8e57..213e662a8 100644 --- a/docs/examples/daae2e6acebc84e537764f4ba07f2e6e.asciidoc +++ b/docs/examples/daae2e6acebc84e537764f4ba07f2e6e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// setup/important-settings/path-settings.asciidoc:95 +// path-settings-overview.asciidoc:75 [source, python] ---- diff --git a/docs/examples/dac8ec8547bc446637fd97d9fa872f4f.asciidoc b/docs/examples/dac8ec8547bc446637fd97d9fa872f4f.asciidoc index d5957ecad..d0da2cfd8 100644 --- a/docs/examples/dac8ec8547bc446637fd97d9fa872f4f.asciidoc +++ b/docs/examples/dac8ec8547bc446637fd97d9fa872f4f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/df-analytics/apis/put-dfanalytics.asciidoc:816 +// ml/df-analytics/apis/put-dfanalytics.asciidoc:822 [source, python] ---- diff --git a/docs/examples/db19cc7a26ca80106d86d688f4be67a8.asciidoc b/docs/examples/db19cc7a26ca80106d86d688f4be67a8.asciidoc index 707ba862c..b6a2076b6 100644 --- a/docs/examples/db19cc7a26ca80106d86d688f4be67a8.asciidoc +++ b/docs/examples/db19cc7a26ca80106d86d688f4be67a8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/df-analytics/apis/stop-dfanalytics.asciidoc:69 +// ml/df-analytics/apis/stop-dfanalytics.asciidoc:75 [source, python] ---- diff --git a/docs/examples/db773f690edf659ac9b044dc854c77eb.asciidoc b/docs/examples/db773f690edf659ac9b044dc854c77eb.asciidoc index b579ccc7f..1082dab75 100644 --- a/docs/examples/db773f690edf659ac9b044dc854c77eb.asciidoc +++ b/docs/examples/db773f690edf659ac9b044dc854c77eb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-vector-tile-api.asciidoc:666 +// search/search-vector-tile-api.asciidoc:671 [source, python] ---- diff --git a/docs/examples/db8710a9793ae0817a45892d33468160.asciidoc b/docs/examples/db8710a9793ae0817a45892d33468160.asciidoc index ec6ab218d..48ecd1500 100644 --- a/docs/examples/db8710a9793ae0817a45892d33468160.asciidoc +++ b/docs/examples/db8710a9793ae0817a45892d33468160.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/diskusage.asciidoc:69 +// indices/diskusage.asciidoc:75 [source, python] ---- diff --git a/docs/examples/dbc50b8c934171e94604575a8b36f349.asciidoc b/docs/examples/dbc50b8c934171e94604575a8b36f349.asciidoc index 77af53991..a3f05c019 100644 --- a/docs/examples/dbc50b8c934171e94604575a8b36f349.asciidoc +++ b/docs/examples/dbc50b8c934171e94604575a8b36f349.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/update-settings.asciidoc:145 +// indices/update-settings.asciidoc:151 [source, python] ---- diff --git a/docs/examples/dbdd58cdeac9ef20b42ff73e4864e697.asciidoc b/docs/examples/dbdd58cdeac9ef20b42ff73e4864e697.asciidoc index 9371ae3f1..1fe7a327f 100644 --- a/docs/examples/dbdd58cdeac9ef20b42ff73e4864e697.asciidoc +++ b/docs/examples/dbdd58cdeac9ef20b42ff73e4864e697.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-field-mapping.asciidoc:245 +// indices/get-field-mapping.asciidoc:251 [source, python] ---- diff --git a/docs/examples/dbf93d02ab86a09929a21232b19709cc.asciidoc b/docs/examples/dbf93d02ab86a09929a21232b19709cc.asciidoc index bbdbc2287..b4f4b70ce 100644 --- a/docs/examples/dbf93d02ab86a09929a21232b19709cc.asciidoc +++ b/docs/examples/dbf93d02ab86a09929a21232b19709cc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/stop-trained-model-deployment.asciidoc:67 +// ml/trained-models/apis/stop-trained-model-deployment.asciidoc:73 [source, python] ---- diff --git a/docs/examples/dbf9abc37899352751dab0ede62af2fd.asciidoc b/docs/examples/dbf9abc37899352751dab0ede62af2fd.asciidoc index eeccebacc..029f025ca 100644 --- a/docs/examples/dbf9abc37899352751dab0ede62af2fd.asciidoc +++ b/docs/examples/dbf9abc37899352751dab0ede62af2fd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/invalidate-tokens.asciidoc:115 +// rest-api/security/invalidate-tokens.asciidoc:121 [source, python] ---- diff --git a/docs/examples/dc468865da947b4a9136a5b92878d918.asciidoc b/docs/examples/dc468865da947b4a9136a5b92878d918.asciidoc index 856c3b54b..959201872 100644 --- a/docs/examples/dc468865da947b4a9136a5b92878d918.asciidoc +++ b/docs/examples/dc468865da947b4a9136a5b92878d918.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/bulk-update-api-keys.asciidoc:125 +// rest-api/security/bulk-update-api-keys.asciidoc:131 [source, python] ---- diff --git a/docs/examples/dc8c94c9bef1f879282caea5c406f36e.asciidoc b/docs/examples/dc8c94c9bef1f879282caea5c406f36e.asciidoc index 9e0d71ce6..4c04a5ddc 100644 --- a/docs/examples/dc8c94c9bef1f879282caea5c406f36e.asciidoc +++ b/docs/examples/dc8c94c9bef1f879282caea5c406f36e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/analyze.asciidoc:183 +// indices/analyze.asciidoc:189 [source, python] ---- diff --git a/docs/examples/dcc02ad69da0a5aa10c4e53b34be8ec0.asciidoc b/docs/examples/dcc02ad69da0a5aa10c4e53b34be8ec0.asciidoc index d5a65b419..b769cd037 100644 --- a/docs/examples/dcc02ad69da0a5aa10c4e53b34be8ec0.asciidoc +++ b/docs/examples/dcc02ad69da0a5aa10c4e53b34be8ec0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/multi-get.asciidoc:10 +// docs/multi-get.asciidoc:16 [source, python] ---- diff --git a/docs/examples/dcf82f3aacae49c0bb4ccbc673f13e9f.asciidoc b/docs/examples/dcf82f3aacae49c0bb4ccbc673f13e9f.asciidoc index b26cee2dc..c98d06e98 100644 --- a/docs/examples/dcf82f3aacae49c0bb4ccbc673f13e9f.asciidoc +++ b/docs/examples/dcf82f3aacae49c0bb4ccbc673f13e9f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/knn-search.asciidoc:1215 +// search/search-your-data/knn-search.asciidoc:1202 [source, python] ---- diff --git a/docs/examples/dcfa7f479a33f459a2d222a92e651451.asciidoc b/docs/examples/dcfa7f479a33f459a2d222a92e651451.asciidoc index 031b39953..e49e3a06e 100644 --- a/docs/examples/dcfa7f479a33f459a2d222a92e651451.asciidoc +++ b/docs/examples/dcfa7f479a33f459a2d222a92e651451.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/create-roles.asciidoc:120 +// rest-api/security/create-roles.asciidoc:126 [source, python] ---- diff --git a/docs/examples/dd4f051ab62f0507e3b6e3d6f333e85f.asciidoc b/docs/examples/dd4f051ab62f0507e3b6e3d6f333e85f.asciidoc index bbab73fed..62d99f7e9 100644 --- a/docs/examples/dd4f051ab62f0507e3b6e3d6f333e85f.asciidoc +++ b/docs/examples/dd4f051ab62f0507e3b6e3d6f333e85f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-component-template.asciidoc:95 +// indices/get-component-template.asciidoc:101 [source, python] ---- diff --git a/docs/examples/dd71b0c9f9197684ff29c61062c55660.asciidoc b/docs/examples/dd71b0c9f9197684ff29c61062c55660.asciidoc index 9c29e08a5..3f2fdfb21 100644 --- a/docs/examples/dd71b0c9f9197684ff29c61062c55660.asciidoc +++ b/docs/examples/dd71b0c9f9197684ff29c61062c55660.asciidoc @@ -1,8 +1,11 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-settings.asciidoc:32 +// rest-api/security/get-settings.asciidoc:38 [source, python] ---- -resp = client.security.get_settings() +resp = client.perform_request( + "GET", + "/_security/settings", +) print(resp) ---- diff --git a/docs/examples/dda949d20d07a9edbe64cefc623df945.asciidoc b/docs/examples/dda949d20d07a9edbe64cefc623df945.asciidoc index 744a4739f..bbb44713f 100644 --- a/docs/examples/dda949d20d07a9edbe64cefc623df945.asciidoc +++ b/docs/examples/dda949d20d07a9edbe64cefc623df945.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// scripting/using.asciidoc:467 +// scripting/using.asciidoc:472 [source, python] ---- diff --git a/docs/examples/ddaadd91b7743a1c7e946ce1b593cd1b.asciidoc b/docs/examples/ddaadd91b7743a1c7e946ce1b593cd1b.asciidoc deleted file mode 100644 index 6944bbb1c..000000000 --- a/docs/examples/ddaadd91b7743a1c7e946ce1b593cd1b.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// inference/update-inference.asciidoc:77 - -[source, python] ----- -resp = client.inference.inference( - task_type="my-inference-endpoint", - inference_id="_update", - service_settings={ - "api_key": "" - }, -) -print(resp) ----- diff --git a/docs/examples/dde283eab92608e7bfbfa09c6482a12e.asciidoc b/docs/examples/dde283eab92608e7bfbfa09c6482a12e.asciidoc index c8f32431a..17a42fe0b 100644 --- a/docs/examples/dde283eab92608e7bfbfa09c6482a12e.asciidoc +++ b/docs/examples/dde283eab92608e7bfbfa09c6482a12e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/invalidate-api-keys.asciidoc:134 +// rest-api/security/invalidate-api-keys.asciidoc:140 [source, python] ---- diff --git a/docs/examples/ddf375e4b6175d830fa4097ea0b41536.asciidoc b/docs/examples/ddf375e4b6175d830fa4097ea0b41536.asciidoc index ca50fe2d1..914be135b 100644 --- a/docs/examples/ddf375e4b6175d830fa4097ea0b41536.asciidoc +++ b/docs/examples/ddf375e4b6175d830fa4097ea0b41536.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/delete-desired-nodes.asciidoc:55 +// cluster/delete-desired-nodes.asciidoc:61 [source, python] ---- diff --git a/docs/examples/ddf56782ecc7eaeb3115e150c4830013.asciidoc b/docs/examples/ddf56782ecc7eaeb3115e150c4830013.asciidoc index c21a7216f..b77c1e337 100644 --- a/docs/examples/ddf56782ecc7eaeb3115e150c4830013.asciidoc +++ b/docs/examples/ddf56782ecc7eaeb3115e150c4830013.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update-by-query.asciidoc:585 +// docs/update-by-query.asciidoc:591 [source, python] ---- diff --git a/docs/examples/de2f59887737de3a27716177b60393a2.asciidoc b/docs/examples/de2f59887737de3a27716177b60393a2.asciidoc index 12fb371a6..d0f8746de 100644 --- a/docs/examples/de2f59887737de3a27716177b60393a2.asciidoc +++ b/docs/examples/de2f59887737de3a27716177b60393a2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/analyze.asciidoc:239 +// indices/analyze.asciidoc:245 [source, python] ---- diff --git a/docs/examples/de876505acc75d371d1f6f484c449197.asciidoc b/docs/examples/de876505acc75d371d1f6f484c449197.asciidoc index c3fb4ff3c..a82a998b3 100644 --- a/docs/examples/de876505acc75d371d1f6f484c449197.asciidoc +++ b/docs/examples/de876505acc75d371d1f6f484c449197.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/create-index.asciidoc:251 +// indices/create-index.asciidoc:257 [source, python] ---- diff --git a/docs/examples/de90249caeac6f1601a7e7e9f98f1bec.asciidoc b/docs/examples/de90249caeac6f1601a7e7e9f98f1bec.asciidoc index 70c2d979b..0b7c4e510 100644 --- a/docs/examples/de90249caeac6f1601a7e7e9f98f1bec.asciidoc +++ b/docs/examples/de90249caeac6f1601a7e7e9f98f1bec.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/query-api-key.asciidoc:395 +// rest-api/security/query-api-key.asciidoc:400 [source, python] ---- diff --git a/docs/examples/df0d27d3abd286b75aef7ddcf0e6c66c.asciidoc b/docs/examples/df0d27d3abd286b75aef7ddcf0e6c66c.asciidoc index 4d21c00df..bba0cb949 100644 --- a/docs/examples/df0d27d3abd286b75aef7ddcf0e6c66c.asciidoc +++ b/docs/examples/df0d27d3abd286b75aef7ddcf0e6c66c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/apis/reload-analyzers.asciidoc:110 +// indices/apis/reload-analyzers.asciidoc:116 [source, python] ---- diff --git a/docs/examples/df7dbac966b67404b8bfa9cdda5ef480.asciidoc b/docs/examples/df7dbac966b67404b8bfa9cdda5ef480.asciidoc index ffbfb83ee..e1a3e6284 100644 --- a/docs/examples/df7dbac966b67404b8bfa9cdda5ef480.asciidoc +++ b/docs/examples/df7dbac966b67404b8bfa9cdda5ef480.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/ack-watch.asciidoc:253 +// rest-api/watcher/ack-watch.asciidoc:259 [source, python] ---- diff --git a/docs/examples/df7ed126d8c92ddd3655c59ce4f305c9.asciidoc b/docs/examples/df7ed126d8c92ddd3655c59ce4f305c9.asciidoc index 3c77d1754..a6d31b62b 100644 --- a/docs/examples/df7ed126d8c92ddd3655c59ce4f305c9.asciidoc +++ b/docs/examples/df7ed126d8c92ddd3655c59ce4f305c9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/thread_pool.asciidoc:172 +// cat/thread_pool.asciidoc:178 [source, python] ---- diff --git a/docs/examples/a225fc8c134cb21a85bc6025dac9368b.asciidoc b/docs/examples/df81b88a2192dd6f9912e0c948a44487.asciidoc similarity index 91% rename from docs/examples/a225fc8c134cb21a85bc6025dac9368b.asciidoc rename to docs/examples/df81b88a2192dd6f9912e0c948a44487.asciidoc index 073a56a4e..7241e1518 100644 --- a/docs/examples/a225fc8c134cb21a85bc6025dac9368b.asciidoc +++ b/docs/examples/df81b88a2192dd6f9912e0c948a44487.asciidoc @@ -7,7 +7,7 @@ resp = client.inference.put( task_type="sparse_embedding", inference_id="elser_embeddings", inference_config={ - "service": "elser", + "service": "elasticsearch", "service_settings": { "num_allocations": 1, "num_threads": 1 diff --git a/docs/examples/dfcdcd3ea6753dcc391a4a52cf640527.asciidoc b/docs/examples/dfcdcd3ea6753dcc391a4a52cf640527.asciidoc index 5176519eb..bd66391fb 100644 --- a/docs/examples/dfcdcd3ea6753dcc391a4a52cf640527.asciidoc +++ b/docs/examples/dfcdcd3ea6753dcc391a4a52cf640527.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/update-desired-nodes.asciidoc:112 +// cluster/update-desired-nodes.asciidoc:118 [source, python] ---- diff --git a/docs/examples/dfce1be1d035aff0b8fdf4a8839f7795.asciidoc b/docs/examples/dfce1be1d035aff0b8fdf4a8839f7795.asciidoc index 4d49fe44f..d57f7495d 100644 --- a/docs/examples/dfce1be1d035aff0b8fdf4a8839f7795.asciidoc +++ b/docs/examples/dfce1be1d035aff0b8fdf4a8839f7795.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/update-trained-model-deployment.asciidoc:115 +// ml/trained-models/apis/update-trained-model-deployment.asciidoc:121 [source, python] ---- diff --git a/docs/examples/dff61a76d5ef9ca8cbe59a416269a84b.asciidoc b/docs/examples/dff61a76d5ef9ca8cbe59a416269a84b.asciidoc index 50fae83d8..85539c607 100644 --- a/docs/examples/dff61a76d5ef9ca8cbe59a416269a84b.asciidoc +++ b/docs/examples/dff61a76d5ef9ca8cbe59a416269a84b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/apis/delete-pipeline.asciidoc:28 +// ingest/apis/delete-pipeline.asciidoc:34 [source, python] ---- diff --git a/docs/examples/dffbbdc4025e5777c647d8818847b960.asciidoc b/docs/examples/dffbbdc4025e5777c647d8818847b960.asciidoc index b0758980c..05eb40de6 100644 --- a/docs/examples/dffbbdc4025e5777c647d8818847b960.asciidoc +++ b/docs/examples/dffbbdc4025e5777c647d8818847b960.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-api-keys.asciidoc:269 +// rest-api/security/get-api-keys.asciidoc:275 [source, python] ---- diff --git a/docs/examples/e017c2de6f93a8dd97f5c6e002dd5c4f.asciidoc b/docs/examples/e017c2de6f93a8dd97f5c6e002dd5c4f.asciidoc index bdc91a1ff..ad5ef4204 100644 --- a/docs/examples/e017c2de6f93a8dd97f5c6e002dd5c4f.asciidoc +++ b/docs/examples/e017c2de6f93a8dd97f5c6e002dd5c4f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/post-calendar-event.asciidoc:126 +// ml/anomaly-detection/apis/post-calendar-event.asciidoc:132 [source, python] ---- diff --git a/docs/examples/e095fc96504efecc588f97673912e3d3.asciidoc b/docs/examples/e095fc96504efecc588f97673912e3d3.asciidoc index d4ec08315..32766347f 100644 --- a/docs/examples/e095fc96504efecc588f97673912e3d3.asciidoc +++ b/docs/examples/e095fc96504efecc588f97673912e3d3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/put-job.asciidoc:414 +// ml/anomaly-detection/apis/put-job.asciidoc:420 [source, python] ---- diff --git a/docs/examples/e0a7c730ef0f22e3edffe9a254bc56e7.asciidoc b/docs/examples/e0a7c730ef0f22e3edffe9a254bc56e7.asciidoc index acefd9f2b..b6d91ff3a 100644 --- a/docs/examples/e0a7c730ef0f22e3edffe9a254bc56e7.asciidoc +++ b/docs/examples/e0a7c730ef0f22e3edffe9a254bc56e7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/reindex.asciidoc:234 +// docs/reindex.asciidoc:240 [source, python] ---- diff --git a/docs/examples/e0b2f56c34e33ff52f8f9658be2f7ca1.asciidoc b/docs/examples/e0b2f56c34e33ff52f8f9658be2f7ca1.asciidoc index 977bd87df..90b9f0ad8 100644 --- a/docs/examples/e0b2f56c34e33ff52f8f9658be2f7ca1.asciidoc +++ b/docs/examples/e0b2f56c34e33ff52f8f9658be2f7ca1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/stats.asciidoc:105 +// indices/stats.asciidoc:111 [source, python] ---- diff --git a/docs/examples/e0bbfb368eae307e9508ab8d6e9cf23c.asciidoc b/docs/examples/e0bbfb368eae307e9508ab8d6e9cf23c.asciidoc index cfae9905a..ace4a2891 100644 --- a/docs/examples/e0bbfb368eae307e9508ab8d6e9cf23c.asciidoc +++ b/docs/examples/e0bbfb368eae307e9508ab8d6e9cf23c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/fielddata.asciidoc:102 +// cat/fielddata.asciidoc:108 [source, python] ---- diff --git a/docs/examples/e0d4a800de2d8f4062e69433586c38db.asciidoc b/docs/examples/e0d4a800de2d8f4062e69433586c38db.asciidoc index 089cc5b46..ae7a86e03 100644 --- a/docs/examples/e0d4a800de2d8f4062e69433586c38db.asciidoc +++ b/docs/examples/e0d4a800de2d8f4062e69433586c38db.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/saml-complete-logout-api.asciidoc:69 +// rest-api/security/saml-complete-logout-api.asciidoc:75 [source, python] ---- diff --git a/docs/examples/e1337c6b76defd5a46d05220f9d9c9fc.asciidoc b/docs/examples/e1337c6b76defd5a46d05220f9d9c9fc.asciidoc index d6324f804..5d4c3eedc 100644 --- a/docs/examples/e1337c6b76defd5a46d05220f9d9c9fc.asciidoc +++ b/docs/examples/e1337c6b76defd5a46d05220f9d9c9fc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/invalidate-tokens.asciidoc:128 +// rest-api/security/invalidate-tokens.asciidoc:134 [source, python] ---- diff --git a/docs/examples/e14a5a5a1c880031486bfff43031fa3a.asciidoc b/docs/examples/e14a5a5a1c880031486bfff43031fa3a.asciidoc index 12335ef7e..655560a56 100644 --- a/docs/examples/e14a5a5a1c880031486bfff43031fa3a.asciidoc +++ b/docs/examples/e14a5a5a1c880031486bfff43031fa3a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/circuit-breaker-errors.asciidoc:64 +// troubleshooting/common-issues/circuit-breaker-errors.asciidoc:71 [source, python] ---- diff --git a/docs/examples/e20037f66bf54bcac7d10f536f031f34.asciidoc b/docs/examples/e20037f66bf54bcac7d10f536f031f34.asciidoc deleted file mode 100644 index 57dd81542..000000000 --- a/docs/examples/e20037f66bf54bcac7d10f536f031f34.asciidoc +++ /dev/null @@ -1,13 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// modules/cluster/disk_allocator.asciidoc:108 - -[source, python] ----- -resp = client.indices.put_settings( - index="my-index-000001", - settings={ - "index.blocks.read_only_allow_delete": None - }, -) -print(resp) ----- diff --git a/docs/examples/e22a1da3c622611be6855e534c0709ae.asciidoc b/docs/examples/e22a1da3c622611be6855e534c0709ae.asciidoc index d0c5e0340..54e333bee 100644 --- a/docs/examples/e22a1da3c622611be6855e534c0709ae.asciidoc +++ b/docs/examples/e22a1da3c622611be6855e534c0709ae.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-rules/apis/test-query-ruleset.asciidoc:111 +// query-rules/apis/test-query-ruleset.asciidoc:117 [source, python] ---- diff --git a/docs/examples/e26c96978096ccc592849cca9db67ffc.asciidoc b/docs/examples/e26c96978096ccc592849cca9db67ffc.asciidoc index 3e39921e1..aff66e7d7 100644 --- a/docs/examples/e26c96978096ccc592849cca9db67ffc.asciidoc +++ b/docs/examples/e26c96978096ccc592849cca9db67ffc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// modules/indices/request_cache.asciidoc:72 +// shard-request-cache.asciidoc:74 [source, python] ---- diff --git a/docs/examples/e2a22c6fd58cc0becf4c383134a08f8b.asciidoc b/docs/examples/e2a22c6fd58cc0becf4c383134a08f8b.asciidoc index 26a8a13b2..e4114ca17 100644 --- a/docs/examples/e2a22c6fd58cc0becf4c383134a08f8b.asciidoc +++ b/docs/examples/e2a22c6fd58cc0becf4c383134a08f8b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/intervals-query.asciidoc:445 +// query-dsl/intervals-query.asciidoc:455 [source, python] ---- diff --git a/docs/examples/e2a753029b450942a3228e3003a55a7d.asciidoc b/docs/examples/e2a753029b450942a3228e3003a55a7d.asciidoc index 1c1422a21..8c8a3bb03 100644 --- a/docs/examples/e2a753029b450942a3228e3003a55a7d.asciidoc +++ b/docs/examples/e2a753029b450942a3228e3003a55a7d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/lifecycle/apis/put-lifecycle.asciidoc:105 +// data-streams/lifecycle/apis/put-lifecycle.asciidoc:111 [source, python] ---- diff --git a/docs/examples/e2bcc8f4ed2b4de82729e7a5a7c8f634.asciidoc b/docs/examples/e2bcc8f4ed2b4de82729e7a5a7c8f634.asciidoc index d266cb8a5..ff2f3c34e 100644 --- a/docs/examples/e2bcc8f4ed2b4de82729e7a5a7c8f634.asciidoc +++ b/docs/examples/e2bcc8f4ed2b4de82729e7a5a7c8f634.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// synonyms/apis/list-synonyms-sets.asciidoc:80 +// synonyms/apis/list-synonyms-sets.asciidoc:86 [source, python] ---- diff --git a/docs/examples/e2ec9e867f7141b304b53ebc59098f2a.asciidoc b/docs/examples/e2ec9e867f7141b304b53ebc59098f2a.asciidoc index 05f401af0..3f77c725d 100644 --- a/docs/examples/e2ec9e867f7141b304b53ebc59098f2a.asciidoc +++ b/docs/examples/e2ec9e867f7141b304b53ebc59098f2a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/update-api-key.asciidoc:252 +// rest-api/security/update-api-key.asciidoc:258 [source, python] ---- diff --git a/docs/examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc b/docs/examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc index cc0793e90..0c74e3bf6 100644 --- a/docs/examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc +++ b/docs/examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc @@ -1,9 +1,12 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/oidc-prepare-authentication-api.asciidoc:72 +// rest-api/security/oidc-prepare-authentication-api.asciidoc:78 [source, python] ---- -resp = client.security.oidc_prepare_authentication( +resp = client.perform_request( + "POST", + "/_security/oidc/prepare", + headers={"Content-Type": "application/json"}, body={ "realm": "oidc1" }, diff --git a/docs/examples/e35abc9403e4aef7d538ab29ccc363b3.asciidoc b/docs/examples/e35abc9403e4aef7d538ab29ccc363b3.asciidoc index 5bd24e6e3..bc7a2001b 100644 --- a/docs/examples/e35abc9403e4aef7d538ab29ccc363b3.asciidoc +++ b/docs/examples/e35abc9403e4aef7d538ab29ccc363b3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/prevalidate-node-removal.asciidoc:105 +// cluster/prevalidate-node-removal.asciidoc:111 [source, python] ---- diff --git a/docs/examples/e375c7da666276c4df6664c6821cd5f4.asciidoc b/docs/examples/e375c7da666276c4df6664c6821cd5f4.asciidoc new file mode 100644 index 000000000..3d9beee42 --- /dev/null +++ b/docs/examples/e375c7da666276c4df6664c6821cd5f4.asciidoc @@ -0,0 +1,37 @@ +// This file is autogenerated, DO NOT EDIT +// mapping/types/rank-vectors.asciidoc:17 + +[source, python] +---- +resp = client.indices.create( + index="my-rank-vectors-float", + mappings={ + "properties": { + "my_vector": { + "type": "rank_vectors" + } + } + }, +) +print(resp) + +resp1 = client.index( + index="my-rank-vectors-float", + id="1", + document={ + "my_vector": [ + [ + 0.5, + 10, + 6 + ], + [ + -0.5, + 10, + 10 + ] + ] + }, +) +print(resp1) +---- diff --git a/docs/examples/e3fe842951dc873d7d00c8f6a010c53f.asciidoc b/docs/examples/e3fe842951dc873d7d00c8f6a010c53f.asciidoc index 0109eed73..d9002cd4d 100644 --- a/docs/examples/e3fe842951dc873d7d00c8f6a010c53f.asciidoc +++ b/docs/examples/e3fe842951dc873d7d00c8f6a010c53f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/task-queue-backlog.asciidoc:67 +// troubleshooting/common-issues/task-queue-backlog.asciidoc:90 [source, python] ---- diff --git a/docs/examples/e48e7da65c2b32d724fd7e3bfa175c6f.asciidoc b/docs/examples/e48e7da65c2b32d724fd7e3bfa175c6f.asciidoc index 3d3a03d23..86bfe67dc 100644 --- a/docs/examples/e48e7da65c2b32d724fd7e3bfa175c6f.asciidoc +++ b/docs/examples/e48e7da65c2b32d724fd7e3bfa175c6f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/get-overall-buckets.asciidoc:130 +// ml/anomaly-detection/apis/get-overall-buckets.asciidoc:136 [source, python] ---- diff --git a/docs/examples/e4b38973c74037335378d8480f1ce894.asciidoc b/docs/examples/e4b38973c74037335378d8480f1ce894.asciidoc index 54d3fd679..a9514e8d7 100644 --- a/docs/examples/e4b38973c74037335378d8480f1ce894.asciidoc +++ b/docs/examples/e4b38973c74037335378d8480f1ce894.asciidoc @@ -1,9 +1,12 @@ // This file is autogenerated, DO NOT EDIT -// ingest/apis/simulate-ingest.asciidoc:429 +// ingest/apis/simulate-ingest.asciidoc:435 [source, python] ---- -resp = client.simulate.ingest( +resp = client.perform_request( + "POST", + "/_ingest/_simulate", + headers={"Content-Type": "application/json"}, body={ "docs": [ { diff --git a/docs/examples/e4b6a6a921c97b4c0bbe97bd89f4cf33.asciidoc b/docs/examples/e4b6a6a921c97b4c0bbe97bd89f4cf33.asciidoc index 4dfde8e77..fcc4bd7da 100644 --- a/docs/examples/e4b6a6a921c97b4c0bbe97bd89f4cf33.asciidoc +++ b/docs/examples/e4b6a6a921c97b4c0bbe97bd89f4cf33.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/promote-data-stream-api.asciidoc:26 +// data-streams/promote-data-stream-api.asciidoc:32 [source, python] ---- diff --git a/docs/examples/e4be53736bcc02b03068fd72fdbfe271.asciidoc b/docs/examples/e4be53736bcc02b03068fd72fdbfe271.asciidoc index 990f57ccb..ee96d3031 100644 --- a/docs/examples/e4be53736bcc02b03068fd72fdbfe271.asciidoc +++ b/docs/examples/e4be53736bcc02b03068fd72fdbfe271.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:108 +// indices/put-mapping.asciidoc:114 [source, python] ---- diff --git a/docs/examples/e4d1f01c025fb797a1d87f372760eabf.asciidoc b/docs/examples/e4d1f01c025fb797a1d87f372760eabf.asciidoc index 754f5c188..e07e9e82c 100644 --- a/docs/examples/e4d1f01c025fb797a1d87f372760eabf.asciidoc +++ b/docs/examples/e4d1f01c025fb797a1d87f372760eabf.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/hotspotting.asciidoc:267 +// troubleshooting/common-issues/hotspotting.asciidoc:271 [source, python] ---- diff --git a/docs/examples/e51a86b666f447cda5f634547a8e1a4a.asciidoc b/docs/examples/e51a86b666f447cda5f634547a8e1a4a.asciidoc index 78145f5de..a385a5114 100644 --- a/docs/examples/e51a86b666f447cda5f634547a8e1a4a.asciidoc +++ b/docs/examples/e51a86b666f447cda5f634547a8e1a4a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/create-data-stream.asciidoc:22 +// indices/create-data-stream.asciidoc:28 [source, python] ---- diff --git a/docs/examples/e58833449d01379df20ad06dc28144d8.asciidoc b/docs/examples/e58833449d01379df20ad06dc28144d8.asciidoc index 38181abf0..c0b720faf 100644 --- a/docs/examples/e58833449d01379df20ad06dc28144d8.asciidoc +++ b/docs/examples/e58833449d01379df20ad06dc28144d8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update-by-query.asciidoc:325 +// docs/update-by-query.asciidoc:331 [source, python] ---- diff --git a/docs/examples/e5901f48eb8a419b878fc2cb815d8691.asciidoc b/docs/examples/e5901f48eb8a419b878fc2cb815d8691.asciidoc index 76d26ada5..2debf6d13 100644 --- a/docs/examples/e5901f48eb8a419b878fc2cb815d8691.asciidoc +++ b/docs/examples/e5901f48eb8a419b878fc2cb815d8691.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/update-settings.asciidoc:45 +// cluster/update-settings.asciidoc:50 [source, python] ---- diff --git a/docs/examples/e5f50b31f165462d883ecbff45f74985.asciidoc b/docs/examples/e5f50b31f165462d883ecbff45f74985.asciidoc index 8fa533456..9d3eb5f2a 100644 --- a/docs/examples/e5f50b31f165462d883ecbff45f74985.asciidoc +++ b/docs/examples/e5f50b31f165462d883ecbff45f74985.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-index-template-v1.asciidoc:14 +// indices/put-index-template-v1.asciidoc:20 [source, python] ---- diff --git a/docs/examples/e5f89a04f50df707a0a53ec0f2eecbbd.asciidoc b/docs/examples/e5f89a04f50df707a0a53ec0f2eecbbd.asciidoc index a4fc3d5f2..f160683fc 100644 --- a/docs/examples/e5f89a04f50df707a0a53ec0f2eecbbd.asciidoc +++ b/docs/examples/e5f89a04f50df707a0a53ec0f2eecbbd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/get.asciidoc:71 +// docs/get.asciidoc:77 [source, python] ---- diff --git a/docs/examples/e60b7f75ca806f2c74927c3d9409a986.asciidoc b/docs/examples/e60b7f75ca806f2c74927c3d9409a986.asciidoc index 0935cf182..01e53f9b6 100644 --- a/docs/examples/e60b7f75ca806f2c74927c3d9409a986.asciidoc +++ b/docs/examples/e60b7f75ca806f2c74927c3d9409a986.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/create-role-mappings.asciidoc:160 +// rest-api/security/create-role-mappings.asciidoc:166 [source, python] ---- diff --git a/docs/examples/e61b5abe85000cc954a42e2cd74f3a26.asciidoc b/docs/examples/e61b5abe85000cc954a42e2cd74f3a26.asciidoc index afc63dda5..dfea2bee0 100644 --- a/docs/examples/e61b5abe85000cc954a42e2cd74f3a26.asciidoc +++ b/docs/examples/e61b5abe85000cc954a42e2cd74f3a26.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/put-calendar.asciidoc:44 +// ml/anomaly-detection/apis/put-calendar.asciidoc:50 [source, python] ---- diff --git a/docs/examples/e63775a2ff22b945ab9d5f630b80c506.asciidoc b/docs/examples/e63775a2ff22b945ab9d5f630b80c506.asciidoc index 648bd4ab1..2fd176ff4 100644 --- a/docs/examples/e63775a2ff22b945ab9d5f630b80c506.asciidoc +++ b/docs/examples/e63775a2ff22b945ab9d5f630b80c506.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/health.asciidoc:196 +// cluster/health.asciidoc:202 [source, python] ---- diff --git a/docs/examples/e650d73c57ab313e686fec01e3b0c90f.asciidoc b/docs/examples/e650d73c57ab313e686fec01e3b0c90f.asciidoc index 23631b956..a18141565 100644 --- a/docs/examples/e650d73c57ab313e686fec01e3b0c90f.asciidoc +++ b/docs/examples/e650d73c57ab313e686fec01e3b0c90f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/reindex.asciidoc:909 +// docs/reindex.asciidoc:915 [source, python] ---- diff --git a/docs/examples/e6dcc2911d2416a65eaec9846b956e15.asciidoc b/docs/examples/e6dcc2911d2416a65eaec9846b956e15.asciidoc index 559139160..452027669 100644 --- a/docs/examples/e6dcc2911d2416a65eaec9846b956e15.asciidoc +++ b/docs/examples/e6dcc2911d2416a65eaec9846b956e15.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/refresh.asciidoc:13 +// indices/refresh.asciidoc:19 [source, python] ---- diff --git a/docs/examples/e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc b/docs/examples/e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc new file mode 100644 index 000000000..15cb77d28 --- /dev/null +++ b/docs/examples/e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc @@ -0,0 +1,50 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/retrievers-examples.asciidoc:198 + +[source, python] +---- +resp = client.search( + index="retrievers_example", + retriever={ + "rrf": { + "retrievers": [ + { + "standard": { + "query": { + "query_string": { + "query": "(information retrieval) OR (artificial intelligence)", + "default_field": "text" + } + } + } + }, + { + "knn": { + "field": "vector", + "query_vector": [ + 0.23, + 0.67, + 0.89 + ], + "k": 3, + "num_candidates": 5 + } + } + ], + "rank_window_size": 10, + "rank_constant": 1 + } + }, + collapse={ + "field": "year", + "inner_hits": { + "name": "topic related documents", + "_source": [ + "year" + ] + } + }, + source=False, +) +print(resp) +---- diff --git a/docs/examples/e71d300cd87f09a9527cf45395dd7eb1.asciidoc b/docs/examples/e71d300cd87f09a9527cf45395dd7eb1.asciidoc index fdc0e6034..bf278d586 100644 --- a/docs/examples/e71d300cd87f09a9527cf45395dd7eb1.asciidoc +++ b/docs/examples/e71d300cd87f09a9527cf45395dd7eb1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// slm/apis/slm-execute-retention.asciidoc:34 +// slm/apis/slm-execute-retention.asciidoc:40 [source, python] ---- diff --git a/docs/examples/5ba32ebaa7ee28a339c7693696d305ca.asciidoc b/docs/examples/e77c2f41a7eca765b0c5f734a66d919f.asciidoc similarity index 83% rename from docs/examples/5ba32ebaa7ee28a339c7693696d305ca.asciidoc rename to docs/examples/e77c2f41a7eca765b0c5f734a66d919f.asciidoc index 95de4965d..2cd80bb0e 100644 --- a/docs/examples/5ba32ebaa7ee28a339c7693696d305ca.asciidoc +++ b/docs/examples/e77c2f41a7eca765b0c5f734a66d919f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/processors/attachment.asciidoc:137 +// ingest/processors/attachment.asciidoc:133 [source, python] ---- @@ -14,7 +14,7 @@ resp = client.ingest.put_pipeline( "content", "title" ], - "remove_binary": False + "remove_binary": True } } ], diff --git a/docs/examples/e784fc00894635470adfd78a0c46b427.asciidoc b/docs/examples/e784fc00894635470adfd78a0c46b427.asciidoc index ee0c04115..a5c0ed26a 100644 --- a/docs/examples/e784fc00894635470adfd78a0c46b427.asciidoc +++ b/docs/examples/e784fc00894635470adfd78a0c46b427.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-component-template.asciidoc:13 +// indices/put-component-template.asciidoc:19 [source, python] ---- diff --git a/docs/examples/e7d819634d765cde269e2669e2dc677f.asciidoc b/docs/examples/e7d819634d765cde269e2669e2dc677f.asciidoc index 6f60455fb..38ce37cf7 100644 --- a/docs/examples/e7d819634d765cde269e2669e2dc677f.asciidoc +++ b/docs/examples/e7d819634d765cde269e2669e2dc677f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/invalidate-api-keys.asciidoc:145 +// rest-api/security/invalidate-api-keys.asciidoc:151 [source, python] ---- diff --git a/docs/examples/e8211247c280a3fbbbdd32850b743b7b.asciidoc b/docs/examples/e8211247c280a3fbbbdd32850b743b7b.asciidoc index 47c64fe27..64b82529b 100644 --- a/docs/examples/e8211247c280a3fbbbdd32850b743b7b.asciidoc +++ b/docs/examples/e8211247c280a3fbbbdd32850b743b7b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/df-analytics/apis/put-dfanalytics.asciidoc:717 +// ml/df-analytics/apis/put-dfanalytics.asciidoc:723 [source, python] ---- diff --git a/docs/examples/e821d27a8b810821707ba860e31f8b78.asciidoc b/docs/examples/e821d27a8b810821707ba860e31f8b78.asciidoc index 2e60eeb2f..d5cfc9760 100644 --- a/docs/examples/e821d27a8b810821707ba860e31f8b78.asciidoc +++ b/docs/examples/e821d27a8b810821707ba860e31f8b78.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:232 +// indices/put-mapping.asciidoc:238 [source, python] ---- diff --git a/docs/examples/e827a9040e137410d62d10bb3b3cbb71.asciidoc b/docs/examples/e827a9040e137410d62d10bb3b3cbb71.asciidoc index 1b6b284b1..5a1aa8a01 100644 --- a/docs/examples/e827a9040e137410d62d10bb3b3cbb71.asciidoc +++ b/docs/examples/e827a9040e137410d62d10bb3b3cbb71.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/get-watch.asciidoc:49 +// rest-api/watcher/get-watch.asciidoc:55 [source, python] ---- diff --git a/docs/examples/e84e23232c7ecc8d6377ec2c16a60269.asciidoc b/docs/examples/e84e23232c7ecc8d6377ec2c16a60269.asciidoc index ec4c69a47..c75b01feb 100644 --- a/docs/examples/e84e23232c7ecc8d6377ec2c16a60269.asciidoc +++ b/docs/examples/e84e23232c7ecc8d6377ec2c16a60269.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/create-index.asciidoc:192 +// indices/create-index.asciidoc:198 [source, python] ---- diff --git a/docs/examples/e8c348cabe15dfe58ab4c3cc13a963fe.asciidoc b/docs/examples/e8c348cabe15dfe58ab4c3cc13a963fe.asciidoc index 4c3ecc511..93350f484 100644 --- a/docs/examples/e8c348cabe15dfe58ab4c3cc13a963fe.asciidoc +++ b/docs/examples/e8c348cabe15dfe58ab4c3cc13a963fe.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-shards.asciidoc:72 +// search/search-shards.asciidoc:78 [source, python] ---- diff --git a/docs/examples/e8f1c9ee003d115ec8f55e57990df6e4.asciidoc b/docs/examples/e8f1c9ee003d115ec8f55e57990df6e4.asciidoc index d59f83d38..d499ba26c 100644 --- a/docs/examples/e8f1c9ee003d115ec8f55e57990df6e4.asciidoc +++ b/docs/examples/e8f1c9ee003d115ec8f55e57990df6e4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/get-category.asciidoc:148 +// ml/anomaly-detection/apis/get-category.asciidoc:154 [source, python] ---- diff --git a/docs/examples/e9625da419bff6470ffd9927c59ca159.asciidoc b/docs/examples/e9625da419bff6470ffd9927c59ca159.asciidoc index d23a92313..1e71e2b91 100644 --- a/docs/examples/e9625da419bff6470ffd9927c59ca159.asciidoc +++ b/docs/examples/e9625da419bff6470ffd9927c59ca159.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/rejected-requests.asciidoc:25 +// troubleshooting/common-issues/rejected-requests.asciidoc:29 [source, python] ---- diff --git a/docs/examples/e9fc47015922d51c2b05e502ce9c622e.asciidoc b/docs/examples/e9fc47015922d51c2b05e502ce9c622e.asciidoc index 0e8ed80cd..55cc01563 100644 --- a/docs/examples/e9fc47015922d51c2b05e502ce9c622e.asciidoc +++ b/docs/examples/e9fc47015922d51c2b05e502ce9c622e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-google-ai-studio.asciidoc:97 +// inference/service-google-ai-studio.asciidoc:103 [source, python] ---- diff --git a/docs/examples/ea5391267ced860c00214c096e08c8d4.asciidoc b/docs/examples/ea5391267ced860c00214c096e08c8d4.asciidoc index b9988415e..d2b271c51 100644 --- a/docs/examples/ea5391267ced860c00214c096e08c8d4.asciidoc +++ b/docs/examples/ea5391267ced860c00214c096e08c8d4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/update-settings.asciidoc:13 +// indices/update-settings.asciidoc:19 [source, python] ---- diff --git a/docs/examples/ea68e3428cc2ca3455bf312d09451489.asciidoc b/docs/examples/ea68e3428cc2ca3455bf312d09451489.asciidoc index cfee2236c..a6ec582ef 100644 --- a/docs/examples/ea68e3428cc2ca3455bf312d09451489.asciidoc +++ b/docs/examples/ea68e3428cc2ca3455bf312d09451489.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/knn-search.asciidoc:1080 +// search/search-your-data/knn-search.asciidoc:1244 [source, python] ---- diff --git a/docs/examples/bb5a1319c496acc862c670cc7224e59a.asciidoc b/docs/examples/ea8c4229afa6dd4f1321355542be9912.asciidoc similarity index 89% rename from docs/examples/bb5a1319c496acc862c670cc7224e59a.asciidoc rename to docs/examples/ea8c4229afa6dd4f1321355542be9912.asciidoc index 672d06ae5..8936ca63a 100644 --- a/docs/examples/bb5a1319c496acc862c670cc7224e59a.asciidoc +++ b/docs/examples/ea8c4229afa6dd4f1321355542be9912.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/processors/attachment.asciidoc:219 +// ingest/processors/attachment.asciidoc:268 [source, python] ---- @@ -12,7 +12,7 @@ resp = client.ingest.put_pipeline( "field": "data", "indexed_chars": 11, "indexed_chars_field": "max_size", - "remove_binary": False + "remove_binary": True } } ], diff --git a/docs/examples/eafdabe80b21b90495555fa6d9089412.asciidoc b/docs/examples/eafdabe80b21b90495555fa6d9089412.asciidoc index fb29b3198..ac0090fc3 100644 --- a/docs/examples/eafdabe80b21b90495555fa6d9089412.asciidoc +++ b/docs/examples/eafdabe80b21b90495555fa6d9089412.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/clear-service-token-caches.asciidoc:62 +// rest-api/security/clear-service-token-caches.asciidoc:68 [source, python] ---- diff --git a/docs/examples/eb14cedd3bdda9ffef3c118f3d528dcd.asciidoc b/docs/examples/eb14cedd3bdda9ffef3c118f3d528dcd.asciidoc index 63e6844bc..73b38e5d7 100644 --- a/docs/examples/eb14cedd3bdda9ffef3c118f3d528dcd.asciidoc +++ b/docs/examples/eb14cedd3bdda9ffef3c118f3d528dcd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update.asciidoc:172 +// docs/update.asciidoc:178 [source, python] ---- diff --git a/docs/examples/eb4e43b47867b54214a8630172dd0e21.asciidoc b/docs/examples/eb4e43b47867b54214a8630172dd0e21.asciidoc index 71fc1718f..4789a526e 100644 --- a/docs/examples/eb4e43b47867b54214a8630172dd0e21.asciidoc +++ b/docs/examples/eb4e43b47867b54214a8630172dd0e21.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/delete-forecast.asciidoc:69 +// ml/anomaly-detection/apis/delete-forecast.asciidoc:75 [source, python] ---- diff --git a/docs/examples/eb54506fbc71a7d250e86b22d0600114.asciidoc b/docs/examples/eb54506fbc71a7d250e86b22d0600114.asciidoc index 89c81acd0..1b7faae1f 100644 --- a/docs/examples/eb54506fbc71a7d250e86b22d0600114.asciidoc +++ b/docs/examples/eb54506fbc71a7d250e86b22d0600114.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/list-connectors-api.asciidoc:108 +// connector/apis/list-connectors-api.asciidoc:117 [source, python] ---- diff --git a/docs/examples/eb964d8d7f27c057a4542448ba5b74e4.asciidoc b/docs/examples/eb964d8d7f27c057a4542448ba5b74e4.asciidoc index 8fde55ac3..c534fe21a 100644 --- a/docs/examples/eb964d8d7f27c057a4542448ba5b74e4.asciidoc +++ b/docs/examples/eb964d8d7f27c057a4542448ba5b74e4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/get-snapshot-api.asciidoc:482 +// snapshot-restore/apis/get-snapshot-api.asciidoc:488 [source, python] ---- diff --git a/docs/examples/eb96d7dd5f3116a50f7a86b729f1a934.asciidoc b/docs/examples/eb96d7dd5f3116a50f7a86b729f1a934.asciidoc index fcb571114..1edc39f32 100644 --- a/docs/examples/eb96d7dd5f3116a50f7a86b729f1a934.asciidoc +++ b/docs/examples/eb96d7dd5f3116a50f7a86b729f1a934.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-scheduling-api.asciidoc:120 +// connector/apis/update-connector-scheduling-api.asciidoc:126 [source, python] ---- diff --git a/docs/examples/eb9a41f7fc8bdf5559bb9db822ae3a65.asciidoc b/docs/examples/eb9a41f7fc8bdf5559bb9db822ae3a65.asciidoc index 182d9c5ff..ae9694132 100644 --- a/docs/examples/eb9a41f7fc8bdf5559bb9db822ae3a65.asciidoc +++ b/docs/examples/eb9a41f7fc8bdf5559bb9db822ae3a65.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/bulk-create-roles.asciidoc:230 +// rest-api/security/bulk-create-roles.asciidoc:236 [source, python] ---- diff --git a/docs/examples/ebb1c7554e91adb4552599f3e5de1865.asciidoc b/docs/examples/ebb1c7554e91adb4552599f3e5de1865.asciidoc index eeaf1d1f8..f48d32f16 100644 --- a/docs/examples/ebb1c7554e91adb4552599f3e5de1865.asciidoc +++ b/docs/examples/ebb1c7554e91adb4552599f3e5de1865.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/split-index.asciidoc:84 +// indices/split-index.asciidoc:90 [source, python] ---- diff --git a/docs/examples/ebd76a45e153c4656c5871e23b7b5508.asciidoc b/docs/examples/ebd76a45e153c4656c5871e23b7b5508.asciidoc index 65636068e..e7b081d20 100644 --- a/docs/examples/ebd76a45e153c4656c5871e23b7b5508.asciidoc +++ b/docs/examples/ebd76a45e153c4656c5871e23b7b5508.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/delete-app-privileges.asciidoc:41 +// rest-api/security/delete-app-privileges.asciidoc:47 [source, python] ---- diff --git a/docs/examples/ec0e50f78390b8622cef4e0b0cd45967.asciidoc b/docs/examples/ec0e50f78390b8622cef4e0b0cd45967.asciidoc index 494db9504..810df64ac 100644 --- a/docs/examples/ec0e50f78390b8622cef4e0b0cd45967.asciidoc +++ b/docs/examples/ec0e50f78390b8622cef4e0b0cd45967.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// eql/eql-search-api.asciidoc:533 +// eql/eql-search-api.asciidoc:586 [source, python] ---- diff --git a/docs/examples/ec4b43c3ebd8816799fa004596b2f0cb.asciidoc b/docs/examples/ec4b43c3ebd8816799fa004596b2f0cb.asciidoc new file mode 100644 index 000000000..effaf77bd --- /dev/null +++ b/docs/examples/ec4b43c3ebd8816799fa004596b2f0cb.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// index-modules/slowlog.asciidoc:232 + +[source, python] +---- +resp = client.indices.put_settings( + index="*", + settings={ + "index.indexing.slowlog.include.user": True, + "index.indexing.slowlog.threshold.index.warn": "30s" + }, +) +print(resp) +---- diff --git a/docs/examples/ecfd0d94dd14ef05dfa861f22544b388.asciidoc b/docs/examples/ecfd0d94dd14ef05dfa861f22544b388.asciidoc index 051f91b70..961ca9008 100644 --- a/docs/examples/ecfd0d94dd14ef05dfa861f22544b388.asciidoc +++ b/docs/examples/ecfd0d94dd14ef05dfa861f22544b388.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-error-api.asciidoc:81 +// connector/apis/update-connector-error-api.asciidoc:87 [source, python] ---- diff --git a/docs/examples/ed09432c6069e41409f0a5e0d1d3842a.asciidoc b/docs/examples/ed09432c6069e41409f0a5e0d1d3842a.asciidoc index d056e9d33..d7e084598 100644 --- a/docs/examples/ed09432c6069e41409f0a5e0d1d3842a.asciidoc +++ b/docs/examples/ed09432c6069e41409f0a5e0d1d3842a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/apis/reload-analyzers.asciidoc:10 +// indices/apis/reload-analyzers.asciidoc:16 [source, python] ---- diff --git a/docs/examples/ed3bdf4d6799b43526851e92b6a60c55.asciidoc b/docs/examples/ed3bdf4d6799b43526851e92b6a60c55.asciidoc index fa96997d3..460e9b642 100644 --- a/docs/examples/ed3bdf4d6799b43526851e92b6a60c55.asciidoc +++ b/docs/examples/ed3bdf4d6799b43526851e92b6a60c55.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-field-mapping.asciidoc:129 +// indices/get-field-mapping.asciidoc:135 [source, python] ---- diff --git a/docs/examples/ed5bfa68d01e079aac94de78dc5caddf.asciidoc b/docs/examples/ed5bfa68d01e079aac94de78dc5caddf.asciidoc index 85db15e13..8da4f139b 100644 --- a/docs/examples/ed5bfa68d01e079aac94de78dc5caddf.asciidoc +++ b/docs/examples/ed5bfa68d01e079aac94de78dc5caddf.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/master.asciidoc:51 +// cat/master.asciidoc:57 [source, python] ---- diff --git a/docs/examples/edb25dc0162b039d477cb06aed2d6275.asciidoc b/docs/examples/edb25dc0162b039d477cb06aed2d6275.asciidoc index fa681946d..90f2ebf6c 100644 --- a/docs/examples/edb25dc0162b039d477cb06aed2d6275.asciidoc +++ b/docs/examples/edb25dc0162b039d477cb06aed2d6275.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/sparse-vector-query.asciidoc:147 +// query-dsl/sparse-vector-query.asciidoc:149 [source, python] ---- diff --git a/docs/examples/ee223e604bb695cad2517d28ae63ac34.asciidoc b/docs/examples/ee223e604bb695cad2517d28ae63ac34.asciidoc index cb15a14c3..3efa63670 100644 --- a/docs/examples/ee223e604bb695cad2517d28ae63ac34.asciidoc +++ b/docs/examples/ee223e604bb695cad2517d28ae63ac34.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/rrf.asciidoc:47 +// search/rrf.asciidoc:53 [source, python] ---- diff --git a/docs/examples/ee577c4c7cc723e99569ea2d1137adba.asciidoc b/docs/examples/ee577c4c7cc723e99569ea2d1137adba.asciidoc index 51d611002..642fb80df 100644 --- a/docs/examples/ee577c4c7cc723e99569ea2d1137adba.asciidoc +++ b/docs/examples/ee577c4c7cc723e99569ea2d1137adba.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/clear-roles-cache.asciidoc:42 +// rest-api/security/clear-roles-cache.asciidoc:48 [source, python] ---- diff --git a/docs/examples/ee90d1fb22b59d30da339d825303b912.asciidoc b/docs/examples/ee90d1fb22b59d30da339d825303b912.asciidoc index 52d677d20..0d1fd70aa 100644 --- a/docs/examples/ee90d1fb22b59d30da339d825303b912.asciidoc +++ b/docs/examples/ee90d1fb22b59d30da339d825303b912.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/put-app-privileges.asciidoc:130 +// rest-api/security/put-app-privileges.asciidoc:136 [source, python] ---- diff --git a/docs/examples/eec051555c8050d017d3fe38ea59e3a0.asciidoc b/docs/examples/eec051555c8050d017d3fe38ea59e3a0.asciidoc index cab93affd..cb381a406 100644 --- a/docs/examples/eec051555c8050d017d3fe38ea59e3a0.asciidoc +++ b/docs/examples/eec051555c8050d017d3fe38ea59e3a0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search.asciidoc:907 +// search/search.asciidoc:915 [source, python] ---- diff --git a/docs/examples/eee6110831c08b9c1b3f56b24656e95b.asciidoc b/docs/examples/eee6110831c08b9c1b3f56b24656e95b.asciidoc index 515c4e510..d15d395da 100644 --- a/docs/examples/eee6110831c08b9c1b3f56b24656e95b.asciidoc +++ b/docs/examples/eee6110831c08b9c1b3f56b24656e95b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-hugging-face.asciidoc:101 +// inference/service-hugging-face.asciidoc:107 [source, python] ---- diff --git a/docs/examples/ef22234b97cc06d7dd620b4ce7c97b31.asciidoc b/docs/examples/ef22234b97cc06d7dd620b4ce7c97b31.asciidoc index 7235ecc06..47af1f873 100644 --- a/docs/examples/ef22234b97cc06d7dd620b4ce7c97b31.asciidoc +++ b/docs/examples/ef22234b97cc06d7dd620b4ce7c97b31.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/reindex.asciidoc:694 +// docs/reindex.asciidoc:700 [source, python] ---- diff --git a/docs/examples/ef33b3b373f7040b874146599db5d557.asciidoc b/docs/examples/ef33b3b373f7040b874146599db5d557.asciidoc index 97a18cef8..c901f6a56 100644 --- a/docs/examples/ef33b3b373f7040b874146599db5d557.asciidoc +++ b/docs/examples/ef33b3b373f7040b874146599db5d557.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/analyze.asciidoc:173 +// indices/analyze.asciidoc:179 [source, python] ---- diff --git a/docs/examples/ef9c29759459904fef162acd223462c4.asciidoc b/docs/examples/ef9c29759459904fef162acd223462c4.asciidoc index 7d7cd3d0c..62ab676bd 100644 --- a/docs/examples/ef9c29759459904fef162acd223462c4.asciidoc +++ b/docs/examples/ef9c29759459904fef162acd223462c4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/nodes-stats.asciidoc:2589 +// cluster/nodes-stats.asciidoc:2595 [source, python] ---- diff --git a/docs/examples/eff8ecaed1ed084909c64450fc363a20.asciidoc b/docs/examples/eff8ecaed1ed084909c64450fc363a20.asciidoc index b87269ad4..09d50fdfd 100644 --- a/docs/examples/eff8ecaed1ed084909c64450fc363a20.asciidoc +++ b/docs/examples/eff8ecaed1ed084909c64450fc363a20.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/update-settings.asciidoc:96 +// cluster/update-settings.asciidoc:101 [source, python] ---- diff --git a/docs/examples/f04e1284d09ceb4443d67b2ef9c7f476.asciidoc b/docs/examples/f04e1284d09ceb4443d67b2ef9c7f476.asciidoc index 7b8b3092d..04374468a 100644 --- a/docs/examples/f04e1284d09ceb4443d67b2ef9c7f476.asciidoc +++ b/docs/examples/f04e1284d09ceb4443d67b2ef9c7f476.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/delete-snapshot-api.asciidoc:30 +// snapshot-restore/apis/delete-snapshot-api.asciidoc:36 [source, python] ---- diff --git a/docs/examples/f0816beb8ac21cb0940858b72f6b1946.asciidoc b/docs/examples/f0816beb8ac21cb0940858b72f6b1946.asciidoc index 903a04c6c..f7e0379a5 100644 --- a/docs/examples/f0816beb8ac21cb0940858b72f6b1946.asciidoc +++ b/docs/examples/f0816beb8ac21cb0940858b72f6b1946.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/fielddata.asciidoc:126 +// cat/fielddata.asciidoc:132 [source, python] ---- diff --git a/docs/examples/f128a9dff5051b47efe2c53c4454a68f.asciidoc b/docs/examples/f128a9dff5051b47efe2c53c4454a68f.asciidoc index 9a2bc95e0..1569cc402 100644 --- a/docs/examples/f128a9dff5051b47efe2c53c4454a68f.asciidoc +++ b/docs/examples/f128a9dff5051b47efe2c53c4454a68f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/rollover-index.asciidoc:255 +// indices/rollover-index.asciidoc:261 [source, python] ---- diff --git a/docs/examples/f160561efab38e40c2feebf5a2542ab5.asciidoc b/docs/examples/f160561efab38e40c2feebf5a2542ab5.asciidoc index b60a3d3af..aa6a1e78c 100644 --- a/docs/examples/f160561efab38e40c2feebf5a2542ab5.asciidoc +++ b/docs/examples/f160561efab38e40c2feebf5a2542ab5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/nodes-stats.asciidoc:2597 +// cluster/nodes-stats.asciidoc:2603 [source, python] ---- diff --git a/docs/examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc b/docs/examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc index bde573a84..a23aeb237 100644 --- a/docs/examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc +++ b/docs/examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/post-inference.asciidoc:132 +// inference/post-inference.asciidoc:138 [source, python] ---- diff --git a/docs/examples/f1bf0c03581b79c3324cfa3246a60e4d.asciidoc b/docs/examples/f1bf0c03581b79c3324cfa3246a60e4d.asciidoc index 2b937027b..594d3bd47 100644 --- a/docs/examples/f1bf0c03581b79c3324cfa3246a60e4d.asciidoc +++ b/docs/examples/f1bf0c03581b79c3324cfa3246a60e4d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/dense-vector.asciidoc:184 +// mapping/types/dense-vector.asciidoc:183 [source, python] ---- diff --git a/docs/examples/f1e2af6dbb30fc5335e7d0b5507a2a93.asciidoc b/docs/examples/f1e2af6dbb30fc5335e7d0b5507a2a93.asciidoc index 075670f1c..3b9a26d8c 100644 --- a/docs/examples/f1e2af6dbb30fc5335e7d0b5507a2a93.asciidoc +++ b/docs/examples/f1e2af6dbb30fc5335e7d0b5507a2a93.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/reset-job.asciidoc:56 +// ml/anomaly-detection/apis/reset-job.asciidoc:62 [source, python] ---- diff --git a/docs/examples/f2175feadc2abe545899889e6d4ffcad.asciidoc b/docs/examples/f2175feadc2abe545899889e6d4ffcad.asciidoc index bcfa55c34..5e628b0ae 100644 --- a/docs/examples/f2175feadc2abe545899889e6d4ffcad.asciidoc +++ b/docs/examples/f2175feadc2abe545899889e6d4ffcad.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// slm/apis/slm-get.asciidoc:71 +// slm/apis/slm-get.asciidoc:77 [source, python] ---- diff --git a/docs/examples/f27c28ddbf4c266b5f42d14da837b8de.asciidoc b/docs/examples/f27c28ddbf4c266b5f42d14da837b8de.asciidoc index cf0fa265f..613c5fe07 100644 --- a/docs/examples/f27c28ddbf4c266b5f42d14da837b8de.asciidoc +++ b/docs/examples/f27c28ddbf4c266b5f42d14da837b8de.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/flush.asciidoc:141 +// indices/flush.asciidoc:147 [source, python] ---- diff --git a/docs/examples/f298c4eb50ea97b34c57f8756eb350d3.asciidoc b/docs/examples/f298c4eb50ea97b34c57f8756eb350d3.asciidoc index b62249d4e..e6b401225 100644 --- a/docs/examples/f298c4eb50ea97b34c57f8756eb350d3.asciidoc +++ b/docs/examples/f298c4eb50ea97b34c57f8756eb350d3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/pending_tasks.asciidoc:51 +// cat/pending_tasks.asciidoc:57 [source, python] ---- diff --git a/docs/examples/f29b2674299ddf51a25ed87619025ede.asciidoc b/docs/examples/f29b2674299ddf51a25ed87619025ede.asciidoc index 1c6f7e551..ce3e50526 100644 --- a/docs/examples/f29b2674299ddf51a25ed87619025ede.asciidoc +++ b/docs/examples/f29b2674299ddf51a25ed87619025ede.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/rollup-search.asciidoc:115 +// rollup/apis/rollup-search.asciidoc:121 [source, python] ---- diff --git a/docs/examples/f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc b/docs/examples/f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc index 61c8fc4f7..322a5d096 100644 --- a/docs/examples/f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc +++ b/docs/examples/f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/get-connector-api.asciidoc:65 +// connector/apis/get-connector-api.asciidoc:74 [source, python] ---- diff --git a/docs/examples/f2e854b6c99659ccc1824e86c096e433.asciidoc b/docs/examples/f2e854b6c99659ccc1824e86c096e433.asciidoc index 580ae0c8f..c42f37698 100644 --- a/docs/examples/f2e854b6c99659ccc1824e86c096e433.asciidoc +++ b/docs/examples/f2e854b6c99659ccc1824e86c096e433.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc:80 +// ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc:86 [source, python] ---- diff --git a/docs/examples/f2ec53c0ef5025de8890d0ff8ec287a0.asciidoc b/docs/examples/f2ec53c0ef5025de8890d0ff8ec287a0.asciidoc index 9fb946dc7..16fb92c4c 100644 --- a/docs/examples/f2ec53c0ef5025de8890d0ff8ec287a0.asciidoc +++ b/docs/examples/f2ec53c0ef5025de8890d0ff8ec287a0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/rank-eval.asciidoc:353 +// search/rank-eval.asciidoc:359 [source, python] ---- diff --git a/docs/examples/f2f1cae094855a45fd8f73478bec8e70.asciidoc b/docs/examples/f2f1cae094855a45fd8f73478bec8e70.asciidoc index 2a5e63f3e..d3824a920 100644 --- a/docs/examples/f2f1cae094855a45fd8f73478bec8e70.asciidoc +++ b/docs/examples/f2f1cae094855a45fd8f73478bec8e70.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/split-index.asciidoc:203 +// indices/split-index.asciidoc:209 [source, python] ---- diff --git a/docs/examples/f3594de7ef39ab09b0bb12c1e76bfe6b.asciidoc b/docs/examples/f3594de7ef39ab09b0bb12c1e76bfe6b.asciidoc index 5f7cd0dd9..c0d76f3e8 100644 --- a/docs/examples/f3594de7ef39ab09b0bb12c1e76bfe6b.asciidoc +++ b/docs/examples/f3594de7ef39ab09b0bb12c1e76bfe6b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/shrink-index.asciidoc:119 +// indices/shrink-index.asciidoc:125 [source, python] ---- diff --git a/docs/examples/f3697682a886ab129530f3e5c1b30632.asciidoc b/docs/examples/f3697682a886ab129530f3e5c1b30632.asciidoc index bee2d347e..2d3e503da 100644 --- a/docs/examples/f3697682a886ab129530f3e5c1b30632.asciidoc +++ b/docs/examples/f3697682a886ab129530f3e5c1b30632.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/termvectors.asciidoc:10 +// docs/termvectors.asciidoc:16 [source, python] ---- diff --git a/docs/examples/f37173a75cd1b0d683c6f67819dd1de3.asciidoc b/docs/examples/f37173a75cd1b0d683c6f67819dd1de3.asciidoc index 3d3d9027a..3b030c27d 100644 --- a/docs/examples/f37173a75cd1b0d683c6f67819dd1de3.asciidoc +++ b/docs/examples/f37173a75cd1b0d683c6f67819dd1de3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/reindex.asciidoc:794 +// docs/reindex.asciidoc:800 [source, python] ---- diff --git a/docs/examples/f388e571224dd6850f8c9f9f08fca3da.asciidoc b/docs/examples/f388e571224dd6850f8c9f9f08fca3da.asciidoc index b062c34a7..4231f47ea 100644 --- a/docs/examples/f388e571224dd6850f8c9f9f08fca3da.asciidoc +++ b/docs/examples/f388e571224dd6850f8c9f9f08fca3da.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/invalidate-api-keys.asciidoc:123 +// rest-api/security/invalidate-api-keys.asciidoc:129 [source, python] ---- diff --git a/docs/examples/f3b185131f40687c25d2f85e1231d8bd.asciidoc b/docs/examples/f3b185131f40687c25d2f85e1231d8bd.asciidoc index 96ab82784..9fbd49437 100644 --- a/docs/examples/f3b185131f40687c25d2f85e1231d8bd.asciidoc +++ b/docs/examples/f3b185131f40687c25d2f85e1231d8bd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/validate.asciidoc:99 +// search/validate.asciidoc:105 [source, python] ---- diff --git a/docs/examples/f3fb3cba44988b6e9fee93316138b2cf.asciidoc b/docs/examples/f3fb3cba44988b6e9fee93316138b2cf.asciidoc index eaaa1b0ec..0dd4534b2 100644 --- a/docs/examples/f3fb3cba44988b6e9fee93316138b2cf.asciidoc +++ b/docs/examples/f3fb3cba44988b6e9fee93316138b2cf.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/clear-privileges-cache.asciidoc:50 +// rest-api/security/clear-privileges-cache.asciidoc:56 [source, python] ---- diff --git a/docs/examples/f43ec4041e3b72bbde063452990bfc4b.asciidoc b/docs/examples/f43ec4041e3b72bbde063452990bfc4b.asciidoc index b1c76f31a..d5e608559 100644 --- a/docs/examples/f43ec4041e3b72bbde063452990bfc4b.asciidoc +++ b/docs/examples/f43ec4041e3b72bbde063452990bfc4b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/clearcache.asciidoc:142 +// indices/clearcache.asciidoc:148 [source, python] ---- diff --git a/docs/examples/f44d287c6937785eb09b91353c1deb1e.asciidoc b/docs/examples/f44d287c6937785eb09b91353c1deb1e.asciidoc index 8d7c695cc..6a44a94e9 100644 --- a/docs/examples/f44d287c6937785eb09b91353c1deb1e.asciidoc +++ b/docs/examples/f44d287c6937785eb09b91353c1deb1e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/get-datafeed-stats.asciidoc:177 +// ml/anomaly-detection/apis/get-datafeed-stats.asciidoc:183 [source, python] ---- diff --git a/docs/examples/f454e3f8ad5f5bd82a4a25af7dee9ca1.asciidoc b/docs/examples/f454e3f8ad5f5bd82a4a25af7dee9ca1.asciidoc index 8f571685d..4bc61fdf6 100644 --- a/docs/examples/f454e3f8ad5f5bd82a4a25af7dee9ca1.asciidoc +++ b/docs/examples/f454e3f8ad5f5bd82a4a25af7dee9ca1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/array.asciidoc:42 +// mapping/types/array.asciidoc:39 [source, python] ---- diff --git a/docs/examples/f49ac80f0130cae8d0ea6f4472a149dd.asciidoc b/docs/examples/f49ac80f0130cae8d0ea6f4472a149dd.asciidoc index df68fc906..2cccf26e2 100644 --- a/docs/examples/f49ac80f0130cae8d0ea6f4472a149dd.asciidoc +++ b/docs/examples/f49ac80f0130cae8d0ea6f4472a149dd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/knn-query.asciidoc:17 +// query-dsl/knn-query.asciidoc:18 [source, python] ---- diff --git a/docs/examples/f4b9baed3c6a82be3672cbc8999c2368.asciidoc b/docs/examples/f4b9baed3c6a82be3672cbc8999c2368.asciidoc index 5ad334f01..9adf5ba34 100644 --- a/docs/examples/f4b9baed3c6a82be3672cbc8999c2368.asciidoc +++ b/docs/examples/f4b9baed3c6a82be3672cbc8999c2368.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/terms-enum.asciidoc:13 +// search/terms-enum.asciidoc:19 [source, python] ---- diff --git a/docs/examples/f4dc1286d0a2f8d1fde64fbf12fd9f8d.asciidoc b/docs/examples/f4dc1286d0a2f8d1fde64fbf12fd9f8d.asciidoc index b48e2704b..920da5537 100644 --- a/docs/examples/f4dc1286d0a2f8d1fde64fbf12fd9f8d.asciidoc +++ b/docs/examples/f4dc1286d0a2f8d1fde64fbf12fd9f8d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/disk-usage-exceeded.asciidoc:86 +// troubleshooting/common-issues/disk-usage-exceeded.asciidoc:90 [source, python] ---- diff --git a/docs/examples/f4f557716049b23f8840d58d71e748f0.asciidoc b/docs/examples/f4f557716049b23f8840d58d71e748f0.asciidoc index 087424acd..7ff14c5da 100644 --- a/docs/examples/f4f557716049b23f8840d58d71e748f0.asciidoc +++ b/docs/examples/f4f557716049b23f8840d58d71e748f0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/update-settings.asciidoc:115 +// indices/update-settings.asciidoc:121 [source, python] ---- diff --git a/docs/examples/f4fdfe52ecba65eec6beb30d8deb8bbf.asciidoc b/docs/examples/f4fdfe52ecba65eec6beb30d8deb8bbf.asciidoc index ceca7cc8a..6ed6fd1bf 100644 --- a/docs/examples/f4fdfe52ecba65eec6beb30d8deb8bbf.asciidoc +++ b/docs/examples/f4fdfe52ecba65eec6beb30d8deb8bbf.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ccr/apis/follow/post-forget-follower.asciidoc:35 +// ccr/apis/follow/post-forget-follower.asciidoc:41 [source, python] ---- diff --git a/docs/examples/f5140f08f56c64b5789357539f8b9ba8.asciidoc b/docs/examples/f5140f08f56c64b5789357539f8b9ba8.asciidoc index f0e6a2bcd..ac21cc555 100644 --- a/docs/examples/f5140f08f56c64b5789357539f8b9ba8.asciidoc +++ b/docs/examples/f5140f08f56c64b5789357539f8b9ba8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/delete-alias.asciidoc:10 +// indices/delete-alias.asciidoc:16 [source, python] ---- diff --git a/docs/examples/f57ce7de0946e9416ddb9150e95f4b74.asciidoc b/docs/examples/f57ce7de0946e9416ddb9150e95f4b74.asciidoc index d2d437022..c72602223 100644 --- a/docs/examples/f57ce7de0946e9416ddb9150e95f4b74.asciidoc +++ b/docs/examples/f57ce7de0946e9416ddb9150e95f4b74.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-azure-openai.asciidoc:159 +// inference/service-azure-openai.asciidoc:165 [source, python] ---- diff --git a/docs/examples/f58fd031597e2c3df78bf0efd07206e3.asciidoc b/docs/examples/f58fd031597e2c3df78bf0efd07206e3.asciidoc index a2222067f..a9ea7f209 100644 --- a/docs/examples/f58fd031597e2c3df78bf0efd07206e3.asciidoc +++ b/docs/examples/f58fd031597e2c3df78bf0efd07206e3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// licensing/start-basic.asciidoc:62 +// licensing/start-basic.asciidoc:68 [source, python] ---- diff --git a/docs/examples/f63f6343e74bd5c844854272e746de14.asciidoc b/docs/examples/f63f6343e74bd5c844854272e746de14.asciidoc index 00f67f60f..a317cea4e 100644 --- a/docs/examples/f63f6343e74bd5c844854272e746de14.asciidoc +++ b/docs/examples/f63f6343e74bd5c844854272e746de14.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/deactivate-watch.asciidoc:82 +// rest-api/watcher/deactivate-watch.asciidoc:88 [source, python] ---- diff --git a/docs/examples/f656c1e64268293ecc8ebd8065628faa.asciidoc b/docs/examples/f656c1e64268293ecc8ebd8065628faa.asciidoc index 1b4e85d1d..084364977 100644 --- a/docs/examples/f656c1e64268293ecc8ebd8065628faa.asciidoc +++ b/docs/examples/f656c1e64268293ecc8ebd8065628faa.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/clear-service-token-caches.asciidoc:70 +// rest-api/security/clear-service-token-caches.asciidoc:76 [source, python] ---- diff --git a/docs/examples/f66643c54999426c5afa6d5a87435d4e.asciidoc b/docs/examples/f66643c54999426c5afa6d5a87435d4e.asciidoc index 767b7ced6..ce8785026 100644 --- a/docs/examples/f66643c54999426c5afa6d5a87435d4e.asciidoc +++ b/docs/examples/f66643c54999426c5afa6d5a87435d4e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/clear-api-key-cache.asciidoc:43 +// rest-api/security/clear-api-key-cache.asciidoc:49 [source, python] ---- diff --git a/docs/examples/f6982ff80b9a64cd5fcac5b20908c906.asciidoc b/docs/examples/f6982ff80b9a64cd5fcac5b20908c906.asciidoc index 08be9d126..74a46c58a 100644 --- a/docs/examples/f6982ff80b9a64cd5fcac5b20908c906.asciidoc +++ b/docs/examples/f6982ff80b9a64cd5fcac5b20908c906.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/delete-calendar-event.asciidoc:43 +// ml/anomaly-detection/apis/delete-calendar-event.asciidoc:49 [source, python] ---- diff --git a/docs/examples/f6d493650b4344f17297b568016fb445.asciidoc b/docs/examples/f6d493650b4344f17297b568016fb445.asciidoc index 8e1f1d499..674327bf6 100644 --- a/docs/examples/f6d493650b4344f17297b568016fb445.asciidoc +++ b/docs/examples/f6d493650b4344f17297b568016fb445.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ccr/apis/follow/post-unfollow.asciidoc:33 +// ccr/apis/follow/post-unfollow.asciidoc:39 [source, python] ---- diff --git a/docs/examples/f6df4acf3c7a4f85706ff314b21ebcb2.asciidoc b/docs/examples/f6df4acf3c7a4f85706ff314b21ebcb2.asciidoc index a030da158..b21d0e61f 100644 --- a/docs/examples/f6df4acf3c7a4f85706ff314b21ebcb2.asciidoc +++ b/docs/examples/f6df4acf3c7a4f85706ff314b21ebcb2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/clear-privileges-cache.asciidoc:43 +// rest-api/security/clear-privileges-cache.asciidoc:49 [source, python] ---- diff --git a/docs/examples/f6ead39c5505045543b9225deca7367d.asciidoc b/docs/examples/f6ead39c5505045543b9225deca7367d.asciidoc index 85a72c5e4..c07d7f415 100644 --- a/docs/examples/f6ead39c5505045543b9225deca7367d.asciidoc +++ b/docs/examples/f6ead39c5505045543b9225deca7367d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/voting-exclusions.asciidoc:109 +// cluster/voting-exclusions.asciidoc:115 [source, python] ---- diff --git a/docs/examples/f6eff830fb0fad200ebfb1e3e46f6f0e.asciidoc b/docs/examples/f6eff830fb0fad200ebfb1e3e46f6f0e.asciidoc index eb34a6f82..521fed98e 100644 --- a/docs/examples/f6eff830fb0fad200ebfb1e3e46f6f0e.asciidoc +++ b/docs/examples/f6eff830fb0fad200ebfb1e3e46f6f0e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/execute-watch.asciidoc:155 +// rest-api/watcher/execute-watch.asciidoc:161 [source, python] ---- diff --git a/docs/examples/f6f647eb644a2d236637ff05f833cb73.asciidoc b/docs/examples/f6f647eb644a2d236637ff05f833cb73.asciidoc index 3301985ae..1e70a342e 100644 --- a/docs/examples/f6f647eb644a2d236637ff05f833cb73.asciidoc +++ b/docs/examples/f6f647eb644a2d236637ff05f833cb73.asciidoc @@ -3,7 +3,10 @@ [source, python] ---- -resp = client.connector.secret_post( +resp = client.perform_request( + "POST", + "/_connector/_secret", + headers={"Content-Type": "application/json"}, body={ "value": "encoded_api_key" }, diff --git a/docs/examples/f70a54cd9a9f4811bf962e469f2ca2ea.asciidoc b/docs/examples/f70a54cd9a9f4811bf962e469f2ca2ea.asciidoc index 5d8f859d7..9506f1299 100644 --- a/docs/examples/f70a54cd9a9f4811bf962e469f2ca2ea.asciidoc +++ b/docs/examples/f70a54cd9a9f4811bf962e469f2ca2ea.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/bool-query.asciidoc:88 +// query-dsl/bool-query.asciidoc:91 [source, python] ---- diff --git a/docs/examples/f70ff57c80cdbce3f1e7c63ee307c92d.asciidoc b/docs/examples/f70ff57c80cdbce3f1e7c63ee307c92d.asciidoc index 2decbc5f1..efb71a223 100644 --- a/docs/examples/f70ff57c80cdbce3f1e7c63ee307c92d.asciidoc +++ b/docs/examples/f70ff57c80cdbce3f1e7c63ee307c92d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// scripting/using.asciidoc:503 +// scripting/using.asciidoc:508 [source, python] ---- diff --git a/docs/examples/f785b5d17eb59f8d2a353c2dee66eb5b.asciidoc b/docs/examples/f785b5d17eb59f8d2a353c2dee66eb5b.asciidoc index d93907c92..db64e8c7c 100644 --- a/docs/examples/f785b5d17eb59f8d2a353c2dee66eb5b.asciidoc +++ b/docs/examples/f785b5d17eb59f8d2a353c2dee66eb5b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/get-connector-sync-job-api.asciidoc:45 +// connector/apis/get-connector-sync-job-api.asciidoc:51 [source, python] ---- diff --git a/docs/examples/f823e4b87ed181b27f73ebc51351f0ee.asciidoc b/docs/examples/f823e4b87ed181b27f73ebc51351f0ee.asciidoc index 0de27db07..dcc03a130 100644 --- a/docs/examples/f823e4b87ed181b27f73ebc51351f0ee.asciidoc +++ b/docs/examples/f823e4b87ed181b27f73ebc51351f0ee.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/delete-data-stream.asciidoc:26 +// indices/delete-data-stream.asciidoc:32 [source, python] ---- diff --git a/docs/examples/f8a0010753b1ff563dc42d703902d2fa.asciidoc b/docs/examples/f8a0010753b1ff563dc42d703902d2fa.asciidoc index fb0ec2cb8..ef6a5206b 100644 --- a/docs/examples/f8a0010753b1ff563dc42d703902d2fa.asciidoc +++ b/docs/examples/f8a0010753b1ff563dc42d703902d2fa.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/bool-query.asciidoc:36 +// query-dsl/bool-query.asciidoc:39 [source, python] ---- diff --git a/docs/examples/f8cb1a04c2e487ff006b5ae0e1a7afbd.asciidoc b/docs/examples/f8cb1a04c2e487ff006b5ae0e1a7afbd.asciidoc index f6c6cbba9..d66ed0a8c 100644 --- a/docs/examples/f8cb1a04c2e487ff006b5ae0e1a7afbd.asciidoc +++ b/docs/examples/f8cb1a04c2e487ff006b5ae0e1a7afbd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/rollup-caps.asciidoc:172 +// rollup/apis/rollup-caps.asciidoc:178 [source, python] ---- diff --git a/docs/examples/f92d2f5018a8843ffbb56ade15f84406.asciidoc b/docs/examples/f92d2f5018a8843ffbb56ade15f84406.asciidoc index 148bc6c38..2ea838669 100644 --- a/docs/examples/f92d2f5018a8843ffbb56ade15f84406.asciidoc +++ b/docs/examples/f92d2f5018a8843ffbb56ade15f84406.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// licensing/get-basic-status.asciidoc:35 +// licensing/get-basic-status.asciidoc:41 [source, python] ---- diff --git a/docs/examples/f95a4d7ab02bf400246c8822f0245f02.asciidoc b/docs/examples/f95a4d7ab02bf400246c8822f0245f02.asciidoc index 9e2f8d55f..0d7391c3e 100644 --- a/docs/examples/f95a4d7ab02bf400246c8822f0245f02.asciidoc +++ b/docs/examples/f95a4d7ab02bf400246c8822f0245f02.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/trainedmodel.asciidoc:118 +// cat/trainedmodel.asciidoc:124 [source, python] ---- diff --git a/docs/examples/f96d4614f2fc294339fef325b794355f.asciidoc b/docs/examples/f96d4614f2fc294339fef325b794355f.asciidoc index a10bf05e0..0ab525d3e 100644 --- a/docs/examples/f96d4614f2fc294339fef325b794355f.asciidoc +++ b/docs/examples/f96d4614f2fc294339fef325b794355f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/get-bucket.asciidoc:202 +// ml/anomaly-detection/apis/get-bucket.asciidoc:208 [source, python] ---- diff --git a/docs/examples/f96d8131e8a592fbf6dfd686173940a9.asciidoc b/docs/examples/f96d8131e8a592fbf6dfd686173940a9.asciidoc index edb0c9a7b..051fb6d52 100644 --- a/docs/examples/f96d8131e8a592fbf6dfd686173940a9.asciidoc +++ b/docs/examples/f96d8131e8a592fbf6dfd686173940a9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/update-settings.asciidoc:16 +// rest-api/watcher/update-settings.asciidoc:22 [source, python] ---- diff --git a/docs/examples/f978088f5117d4addd55c11ee3777312.asciidoc b/docs/examples/f978088f5117d4addd55c11ee3777312.asciidoc index 7739d7a40..06db79737 100644 --- a/docs/examples/f978088f5117d4addd55c11ee3777312.asciidoc +++ b/docs/examples/f978088f5117d4addd55c11ee3777312.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-service-credentials.asciidoc:51 +// rest-api/security/get-service-credentials.asciidoc:56 [source, python] ---- diff --git a/docs/examples/f97aa2efabbf11a534073041eb2658c9.asciidoc b/docs/examples/f97aa2efabbf11a534073041eb2658c9.asciidoc index 915d3739b..c928e6e09 100644 --- a/docs/examples/f97aa2efabbf11a534073041eb2658c9.asciidoc +++ b/docs/examples/f97aa2efabbf11a534073041eb2658c9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// scripting/apis/delete-stored-script-api.asciidoc:24 +// scripting/apis/delete-stored-script-api.asciidoc:30 [source, python] ---- diff --git a/docs/examples/f9c8245cc13770dff052b6759a749efa.asciidoc b/docs/examples/f9c8245cc13770dff052b6759a749efa.asciidoc index c8e370454..6d189efc6 100644 --- a/docs/examples/f9c8245cc13770dff052b6759a749efa.asciidoc +++ b/docs/examples/f9c8245cc13770dff052b6759a749efa.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/get.asciidoc:288 +// docs/get.asciidoc:294 [source, python] ---- diff --git a/docs/examples/fa42ae3bf6a300420cd0f77ba006458a.asciidoc b/docs/examples/fa42ae3bf6a300420cd0f77ba006458a.asciidoc index a66e5fe3d..e6b2e9301 100644 --- a/docs/examples/fa42ae3bf6a300420cd0f77ba006458a.asciidoc +++ b/docs/examples/fa42ae3bf6a300420cd0f77ba006458a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/analyze.asciidoc:11 +// indices/analyze.asciidoc:17 [source, python] ---- diff --git a/docs/examples/fa82d86a046d67366cfe9ce65535e433.asciidoc b/docs/examples/fa82d86a046d67366cfe9ce65535e433.asciidoc index 3dd87952c..18011566d 100644 --- a/docs/examples/fa82d86a046d67366cfe9ce65535e433.asciidoc +++ b/docs/examples/fa82d86a046d67366cfe9ce65535e433.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// graph/explore.asciidoc:396 +// graph/explore.asciidoc:402 [source, python] ---- diff --git a/docs/examples/fa88f6f5a7d728ec4f1d05244228cb09.asciidoc b/docs/examples/fa88f6f5a7d728ec4f1d05244228cb09.asciidoc index 792d64f6f..f123ea93c 100644 --- a/docs/examples/fa88f6f5a7d728ec4f1d05244228cb09.asciidoc +++ b/docs/examples/fa88f6f5a7d728ec4f1d05244228cb09.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/bool-query.asciidoc:107 +// query-dsl/bool-query.asciidoc:110 [source, python] ---- diff --git a/docs/examples/fab702851e90e945c1b62dec0bb6a205.asciidoc b/docs/examples/fab702851e90e945c1b62dec0bb6a205.asciidoc index 59c475fc3..e6c08027b 100644 --- a/docs/examples/fab702851e90e945c1b62dec0bb6a205.asciidoc +++ b/docs/examples/fab702851e90e945c1b62dec0bb6a205.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// behavioral-analytics/apis/delete-analytics-collection.asciidoc:53 +// behavioral-analytics/apis/delete-analytics-collection.asciidoc:59 [source, python] ---- diff --git a/docs/examples/fabe14480624a99e8ee42c7338672058.asciidoc b/docs/examples/fabe14480624a99e8ee42c7338672058.asciidoc index 82b6a2045..798012303 100644 --- a/docs/examples/fabe14480624a99e8ee42c7338672058.asciidoc +++ b/docs/examples/fabe14480624a99e8ee42c7338672058.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/create-index.asciidoc:264 +// indices/create-index.asciidoc:270 [source, python] ---- diff --git a/docs/examples/fad26f4fb5a1bc9c38db33394e877d94.asciidoc b/docs/examples/fad26f4fb5a1bc9c38db33394e877d94.asciidoc index 3abe7941d..3fb4bdc81 100644 --- a/docs/examples/fad26f4fb5a1bc9c38db33394e877d94.asciidoc +++ b/docs/examples/fad26f4fb5a1bc9c38db33394e877d94.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/df-analytics/apis/get-dfanalytics-stats.asciidoc:533 +// ml/df-analytics/apis/get-dfanalytics-stats.asciidoc:539 [source, python] ---- diff --git a/docs/examples/faf7d8b9827cf5c0db5c177f01dc31c4.asciidoc b/docs/examples/faf7d8b9827cf5c0db5c177f01dc31c4.asciidoc index 1015d6895..85bee5676 100644 --- a/docs/examples/faf7d8b9827cf5c0db5c177f01dc31c4.asciidoc +++ b/docs/examples/faf7d8b9827cf5c0db5c177f01dc31c4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/rank-eval.asciidoc:257 +// search/rank-eval.asciidoc:263 [source, python] ---- diff --git a/docs/examples/fb4799d2fe4011bf6084f89d97d9a4a5.asciidoc b/docs/examples/fb4799d2fe4011bf6084f89d97d9a4a5.asciidoc index 4e9fedc99..e3a968c4d 100644 --- a/docs/examples/fb4799d2fe4011bf6084f89d97d9a4a5.asciidoc +++ b/docs/examples/fb4799d2fe4011bf6084f89d97d9a4a5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// autoscaling/apis/get-autoscaling-policy.asciidoc:40 +// autoscaling/apis/get-autoscaling-policy.asciidoc:47 [source, python] ---- diff --git a/docs/examples/fc190fbbf71949331266dcb3f46a1198.asciidoc b/docs/examples/fc190fbbf71949331266dcb3f46a1198.asciidoc index 0613d1120..75819854f 100644 --- a/docs/examples/fc190fbbf71949331266dcb3f46a1198.asciidoc +++ b/docs/examples/fc190fbbf71949331266dcb3f46a1198.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/data-stream-stats.asciidoc:51 +// indices/data-stream-stats.asciidoc:57 [source, python] ---- diff --git a/docs/examples/fc49437ce2e7916facf58128308c2aa3.asciidoc b/docs/examples/fc49437ce2e7916facf58128308c2aa3.asciidoc index a5b2395ab..dd8923d82 100644 --- a/docs/examples/fc49437ce2e7916facf58128308c2aa3.asciidoc +++ b/docs/examples/fc49437ce2e7916facf58128308c2aa3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// searchable-snapshots/apis/mount-snapshot.asciidoc:128 +// searchable-snapshots/apis/mount-snapshot.asciidoc:134 [source, python] ---- diff --git a/docs/examples/fccbddfba9f975de7e321732874dfb78.asciidoc b/docs/examples/fccbddfba9f975de7e321732874dfb78.asciidoc index e1106501c..475f1f2d4 100644 --- a/docs/examples/fccbddfba9f975de7e321732874dfb78.asciidoc +++ b/docs/examples/fccbddfba9f975de7e321732874dfb78.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/data-stream-stats.asciidoc:176 +// indices/data-stream-stats.asciidoc:182 [source, python] ---- diff --git a/docs/examples/fce5c03a388c893cb11a6696e068543f.asciidoc b/docs/examples/fce5c03a388c893cb11a6696e068543f.asciidoc index 7417cd3f6..eb279de96 100644 --- a/docs/examples/fce5c03a388c893cb11a6696e068543f.asciidoc +++ b/docs/examples/fce5c03a388c893cb11a6696e068543f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/has-privileges-user-profile.asciidoc:98 +// rest-api/security/has-privileges-user-profile.asciidoc:104 [source, python] ---- diff --git a/docs/examples/fd2d289e6b725fcc3cbe8fe7ffe02ea0.asciidoc b/docs/examples/fd2d289e6b725fcc3cbe8fe7ffe02ea0.asciidoc index 7094e18ab..0c0581876 100644 --- a/docs/examples/fd2d289e6b725fcc3cbe8fe7ffe02ea0.asciidoc +++ b/docs/examples/fd2d289e6b725fcc3cbe8fe7ffe02ea0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/get-index-template-v1.asciidoc:97 +// indices/get-index-template-v1.asciidoc:103 [source, python] ---- diff --git a/docs/examples/fd352b472d44d197022a46fce90b6ecb.asciidoc b/docs/examples/fd352b472d44d197022a46fce90b6ecb.asciidoc index d52c31f38..5272749ba 100644 --- a/docs/examples/fd352b472d44d197022a46fce90b6ecb.asciidoc +++ b/docs/examples/fd352b472d44d197022a46fce90b6ecb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/multi-get.asciidoc:178 +// docs/multi-get.asciidoc:184 [source, python] ---- diff --git a/docs/examples/fd60b4092c6552164862cec287359676.asciidoc b/docs/examples/fd60b4092c6552164862cec287359676.asciidoc index b996a6216..d9b530c7b 100644 --- a/docs/examples/fd60b4092c6552164862cec287359676.asciidoc +++ b/docs/examples/fd60b4092c6552164862cec287359676.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/stop-datafeed.asciidoc:74 +// ml/anomaly-detection/apis/stop-datafeed.asciidoc:80 [source, python] ---- diff --git a/docs/examples/fd620f09dbce62c6f0f603a366623607.asciidoc b/docs/examples/fd620f09dbce62c6f0f603a366623607.asciidoc index dcebe10f0..b03a89667 100644 --- a/docs/examples/fd620f09dbce62c6f0f603a366623607.asciidoc +++ b/docs/examples/fd620f09dbce62c6f0f603a366623607.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/update-connector-filtering-api.asciidoc:150 +// connector/apis/update-connector-filtering-api.asciidoc:156 [source, python] ---- diff --git a/docs/examples/fd9b668eeb1f117950bd4991c7c03fb1.asciidoc b/docs/examples/fd9b668eeb1f117950bd4991c7c03fb1.asciidoc index 8016651a9..9a96ab56c 100644 --- a/docs/examples/fd9b668eeb1f117950bd4991c7c03fb1.asciidoc +++ b/docs/examples/fd9b668eeb1f117950bd4991c7c03fb1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/analyze.asciidoc:157 +// indices/analyze.asciidoc:163 [source, python] ---- diff --git a/docs/examples/fdada036a875d7995d5d7aba9c06361e.asciidoc b/docs/examples/fdada036a875d7995d5d7aba9c06361e.asciidoc index b2e05b24b..8f9d3b6d5 100644 --- a/docs/examples/fdada036a875d7995d5d7aba9c06361e.asciidoc +++ b/docs/examples/fdada036a875d7995d5d7aba9c06361e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/dense-vector.asciidoc:95 +// mapping/types/dense-vector.asciidoc:94 [source, python] ---- diff --git a/docs/examples/fe6e35839f7d7381f8ec535c8f21959b.asciidoc b/docs/examples/fe6e35839f7d7381f8ec535c8f21959b.asciidoc index 6eb08b8f0..a3d664e01 100644 --- a/docs/examples/fe6e35839f7d7381f8ec535c8f21959b.asciidoc +++ b/docs/examples/fe6e35839f7d7381f8ec535c8f21959b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// how-to/recipes/scoring.asciidoc:123 +// how-to/recipes/scoring.asciidoc:124 [source, python] ---- diff --git a/docs/examples/febb71d774e0a1fc67454213d7448c53.asciidoc b/docs/examples/febb71d774e0a1fc67454213d7448c53.asciidoc index f7d66a12b..8682a6c83 100644 --- a/docs/examples/febb71d774e0a1fc67454213d7448c53.asciidoc +++ b/docs/examples/febb71d774e0a1fc67454213d7448c53.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// scripting/using.asciidoc:362 +// scripting/using.asciidoc:367 [source, python] ---- diff --git a/docs/examples/ff09e13391cecb2e8b9dd440b37e065f.asciidoc b/docs/examples/ff09e13391cecb2e8b9dd440b37e065f.asciidoc index e6498693e..155f9c05c 100644 --- a/docs/examples/ff09e13391cecb2e8b9dd440b37e065f.asciidoc +++ b/docs/examples/ff09e13391cecb2e8b9dd440b37e065f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/reindex.asciidoc:310 +// docs/reindex.asciidoc:316 [source, python] ---- diff --git a/docs/examples/ff56ded50c65998c70f3c5691ddc6f86.asciidoc b/docs/examples/ff56ded50c65998c70f3c5691ddc6f86.asciidoc index b08b694ec..4ea52bc32 100644 --- a/docs/examples/ff56ded50c65998c70f3c5691ddc6f86.asciidoc +++ b/docs/examples/ff56ded50c65998c70f3c5691ddc6f86.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/apis/delete-repo-api.asciidoc:27 +// snapshot-restore/apis/delete-repo-api.asciidoc:33 [source, python] ---- diff --git a/docs/examples/ff63ae39c34925dbfa54282ec9989124.asciidoc b/docs/examples/ff63ae39c34925dbfa54282ec9989124.asciidoc index 7381f8418..b7e2bc292 100644 --- a/docs/examples/ff63ae39c34925dbfa54282ec9989124.asciidoc +++ b/docs/examples/ff63ae39c34925dbfa54282ec9989124.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/reindex.asciidoc:1003 +// docs/reindex.asciidoc:1009 [source, python] ---- diff --git a/docs/examples/ff776c0fccf93e1c7050f7cb7efbae0b.asciidoc b/docs/examples/ff776c0fccf93e1c7050f7cb7efbae0b.asciidoc index be4d854b0..3968cc823 100644 --- a/docs/examples/ff776c0fccf93e1c7050f7cb7efbae0b.asciidoc +++ b/docs/examples/ff776c0fccf93e1c7050f7cb7efbae0b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/infer-trained-model.asciidoc:1006 +// ml/trained-models/apis/infer-trained-model.asciidoc:1012 [source, python] ---- diff --git a/docs/examples/ff7b81fa96c3b994efa3dee230512291.asciidoc b/docs/examples/ff7b81fa96c3b994efa3dee230512291.asciidoc index 2d64be799..0767e0c5b 100644 --- a/docs/examples/ff7b81fa96c3b994efa3dee230512291.asciidoc +++ b/docs/examples/ff7b81fa96c3b994efa3dee230512291.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// graph/explore.asciidoc:204 +// graph/explore.asciidoc:210 [source, python] ---- diff --git a/docs/examples/ffd63dd186ab81b893faec3b3358fa09.asciidoc b/docs/examples/ffd63dd186ab81b893faec3b3358fa09.asciidoc index 83140cb6e..33e1646ec 100644 --- a/docs/examples/ffd63dd186ab81b893faec3b3358fa09.asciidoc +++ b/docs/examples/ffd63dd186ab81b893faec3b3358fa09.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/delete-users.asciidoc:39 +// rest-api/security/delete-users.asciidoc:45 [source, python] ---- diff --git a/docs/examples/fff86117c47f974074284644e8a97a99.asciidoc b/docs/examples/fff86117c47f974074284644e8a97a99.asciidoc new file mode 100644 index 000000000..5624bd74b --- /dev/null +++ b/docs/examples/fff86117c47f974074284644e8a97a99.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// inference/service-jinaai.asciidoc:155 + +[source, python] +---- +resp = client.inference.put( + task_type="text_embedding", + inference_id="jinaai-embeddings", + inference_config={ + "service": "jinaai", + "service_settings": { + "model_id": "jina-embeddings-v3", + "api_key": "" + } + }, +) +print(resp) +---- diff --git a/docs/guide/release-notes.asciidoc b/docs/guide/release-notes.asciidoc index b09fd4569..1b8d3957f 100644 --- a/docs/guide/release-notes.asciidoc +++ b/docs/guide/release-notes.asciidoc @@ -1,6 +1,8 @@ [[release-notes]] == Release notes +* <> +* <> * <> * <> * <> @@ -42,6 +44,51 @@ * <> * <> +[discrete] +[[rn-8-17-1]] +=== 8.17.1 (2025-01-23) + +- Make pyarrow dependency optional for tests (https://github.com/elastic/elasticsearch-py/pull/2733[#2733], contributed by danigm) +- Update APIs: + * Add Simulate ingest API + * Add Get data stream lifecycle stats API + * Add Update inference API + * Add Create or update, Get and Delete IP geolocation database configuration APIs + * Add Bulk update API keys + * Add Get and Update Security index settings APIs + * Add OpenID Connect prepare authentication, OpenID Connect authenticate and OpenID Connect logout APIs + * Add Delegate PKI authentication API + * Add Repository analysis API + * Add Render Search Application Query API + * Add Find field structure and Find messages structure APIs + * Add Get Watcher index settings and Update Watcher index settings APIs + * Add experimental Check in and Claim connector sync job APIs + * Add experimental Set connector sync job errors and Set connector sync job stats APIs + * Add experimental Update connector features APIs + * Add experimental Post Event to an Analytics Collection API + * Add `timeout` and `master_timeout` to Snapshot lifecycle management (SLM) APIs + * Add `allow_partial_search_results` to SQL search API + * Add `throttle_period_in_millis` to Create or update watch API + * Fix query parameters for CAT APIs + +[discrete] +[[rn-8-17-0]] +=== 8.17.0 (2024-12-13) + +- Allow simsimd again on Python 3.13 (https://github.com/elastic/elasticsearch-py/pull/2722[#2722]) +- Update APIs: + * Mark all Inference APIs as stable. + * Add `allow_partial_search_results` to the Open Point in Time API + * Add `keep_alive` to the Get async search status API + * Remove the `keep_alive`, `pre_filter_shard_size` and `scroll` parameters from the Submit async search API. They were never supported. + * Add `master_timeout` and `timeout` to all autoscaling policy APIs + * Add `master_timeout` to the Alias exists and Get alias APIs + * Add `list_executed_pipelines` and `require_data_stream` to Bulk API + * Add `include_model_definition` to Get trained models API + * Add `meta` to Create data frame analytics API + * Add `aggs` to Create datafeeds API + * Add `allow_no_indices`, `expand_wildcards`, `ignore_throttled` and `ignore_unavailable` to Create anomaly detection jobs API + [discrete] [[rn-8-16-0]] === 8.16.0 (2024-11-12) diff --git a/elasticsearch/_version.py b/elasticsearch/_version.py index f1a7a85f5..d1cc6c87a 100644 --- a/elasticsearch/_version.py +++ b/elasticsearch/_version.py @@ -15,4 +15,4 @@ # specific language governing permissions and limitations # under the License. -__versionstr__ = "8.16.0" +__versionstr__ = "8.17.1" From f04755d617dd03add7b11de87d86b391200c701e Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Wed, 29 Jan 2025 08:49:27 +0100 Subject: [PATCH 26/65] Auto-generated API code (#2767) --- elasticsearch/_async/client/__init__.py | 450 +++++++++++++++--------- elasticsearch/_async/client/indices.py | 211 +++++++++++ elasticsearch/_sync/client/__init__.py | 450 +++++++++++++++--------- elasticsearch/_sync/client/indices.py | 211 +++++++++++ 4 files changed, 982 insertions(+), 340 deletions(-) diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index 1c966b828..25f832f5d 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -1455,74 +1455,146 @@ async def delete_by_query( """ .. raw:: html -

Delete documents. - Deletes documents that match the specified query.

+

Delete documents.

+

Deletes documents that match the specified query.

+

If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:

+
    +
  • read
  • +
  • delete or write
  • +
+

You can specify the query criteria in the request URI or the request body using the same syntax as the search API. + When you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning. + If a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails.

+

NOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number.

+

While processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete. + A bulk delete request is performed for each batch of matching documents. + If a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off. + If the maximum retry limit is reached, processing halts and all failed requests are returned in the response. + Any delete requests that completed successfully still stick, they are not rolled back.

+

You can opt to count version conflicts instead of halting and returning by setting conflicts to proceed. + Note that if you opt to count version conflicts the operation could attempt to delete more documents from the source than max_docs until it has successfully deleted max_docs documents, or it has gone through every document in the source query.

+

Throttling delete requests

+

To control the rate at which delete by query issues batches of delete operations, you can set requests_per_second to any positive decimal number. + This pads each batch with a wait time to throttle the rate. + Set requests_per_second to -1 to disable throttling.

+

Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. + The padding time is the difference between the batch size divided by the requests_per_second and the time spent writing. + By default the batch size is 1000, so if requests_per_second is set to 500:

+
target_time = 1000 / 500 per second = 2 seconds
+          wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
+          
+

Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. + This is "bursty" instead of "smooth".

+

Slicing

+

Delete by query supports sliced scroll to parallelize the delete process. + This can improve efficiency and provide a convenient way to break the request down into smaller parts.

+

Setting slices to auto lets Elasticsearch choose the number of slices to use. + This setting will use one slice per shard, up to a certain limit. + If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. + Adding slices to the delete by query operation creates sub-requests which means it has some quirks:

+
    +
  • You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices.
  • +
  • Fetching the status of the task for the request with slices only contains the status of completed slices.
  • +
  • These sub-requests are individually addressable for things like cancellation and rethrottling.
  • +
  • Rethrottling the request with slices will rethrottle the unfinished sub-request proportionally.
  • +
  • Canceling the request with slices will cancel each sub-request.
  • +
  • Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.
  • +
  • Parameters like requests_per_second and max_docs on a request with slices are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using max_docs with slices might not result in exactly max_docs documents being deleted.
  • +
  • Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.
  • +
+

If you're slicing manually or otherwise tuning automatic slicing, keep in mind that:

+
    +
  • Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.
  • +
  • Delete performance scales linearly across available resources with the number of slices.
  • +
+

Whether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources.

+

Cancel a delete by query operation

+

Any delete by query can be canceled using the task cancel API. For example:

+
POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel
+          
+

The task ID can be found by using the get tasks API.

+

Cancellation should happen quickly but might take a few seconds. + The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself.

``_ - :param index: Comma-separated list of data streams, indices, and aliases to search. - Supports wildcards (`*`). To search all data streams or indices, omit this - parameter or use `*` or `_all`. + :param index: A comma-separated list of data streams, indices, and aliases to + search. It supports wildcards (`*`). To search all data streams or indices, + omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param analyze_wildcard: If `true`, wildcard and prefix queries are analyzed. - :param analyzer: Analyzer to use for the query string. + This parameter can be used only when the `q` query string parameter is specified. + :param analyzer: Analyzer to use for the query string. This parameter can be + used only when the `q` query string parameter is specified. :param conflicts: What to do if delete by query hits version conflicts: `abort` or `proceed`. :param default_operator: The default operator for query string query: `AND` or - `OR`. - :param df: Field to use as default where no field prefix is given in the query - string. - :param expand_wildcards: Type of index that wildcard patterns can match. If the - request can target data streams, this argument determines whether wildcard - expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + `OR`. This parameter can be used only when the `q` query string parameter + is specified. + :param df: The field to use as default where no field prefix is given in the + query string. This parameter can be used only when the `q` query string parameter + is specified. + :param expand_wildcards: The type of index that wildcard patterns can match. + If the request can target data streams, this argument determines whether + wildcard expressions match hidden data streams. It supports comma-separated + values, such as `open,hidden`. :param from_: Starting offset (default: 0) :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param lenient: If `true`, format-based query failures (such as providing text - to a numeric field) in the query string will be ignored. + to a numeric field) in the query string will be ignored. This parameter can + be used only when the `q` query string parameter is specified. :param max_docs: The maximum number of documents to delete. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. - :param q: Query in the Lucene query string syntax. - :param query: Specifies the documents to delete using the Query DSL. + :param preference: The node or shard the operation should be performed on. It + is random by default. + :param q: A query in the Lucene query string syntax. + :param query: The documents to delete specified with Query DSL. :param refresh: If `true`, Elasticsearch refreshes all shards involved in the - delete by query after the request completes. + delete by query after the request completes. This is different than the delete + API's `refresh` parameter, which causes just the shard that received the + delete request to be refreshed. Unlike the delete API, it does not support + `wait_for`. :param request_cache: If `true`, the request cache is used for this request. Defaults to the index-level setting. :param requests_per_second: The throttle for this request in sub-requests per second. - :param routing: Custom value used to route operations to a specific shard. - :param scroll: Period to retain the search context for scrolling. - :param scroll_size: Size of the scroll request that powers the operation. - :param search_timeout: Explicit timeout for each search request. Defaults to - no timeout. - :param search_type: The type of the search operation. Available options: `query_then_fetch`, - `dfs_query_then_fetch`. + :param routing: A custom value used to route operations to a specific shard. + :param scroll: The period to retain the search context for scrolling. + :param scroll_size: The size of the scroll request that powers the operation. + :param search_timeout: The explicit timeout for each search request. It defaults + to no timeout. + :param search_type: The type of the search operation. Available options include + `query_then_fetch` and `dfs_query_then_fetch`. :param slice: Slice the request manually using the provided slice ID and total number of slices. :param slices: The number of slices this task should be divided into. - :param sort: A comma-separated list of : pairs. - :param stats: Specific `tag` of the request for logging and statistical purposes. - :param terminate_after: Maximum number of documents to collect for each shard. + :param sort: A comma-separated list of `:` pairs. + :param stats: The specific `tag` of the request for logging and statistical purposes. + :param terminate_after: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. - :param timeout: Period each deletion request waits for active shards. + :param timeout: The period each deletion request waits for active shards. :param version: If `true`, returns the document version as part of a hit. :param wait_for_active_shards: The number of shard copies that must be active - before proceeding with the operation. Set to all or any positive integer - up to the total number of shards in the index (`number_of_replicas+1`). + before proceeding with the operation. Set to `all` or any positive integer + up to the total number of shards in the index (`number_of_replicas+1`). The + `timeout` value controls how long each write request waits for unavailable + shards to become available. :param wait_for_completion: If `true`, the request blocks until the operation - is complete. + is complete. If `false`, Elasticsearch performs some preflight checks, launches + the request, and returns a task you can use to cancel or get the status of + the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. + When you are done with a task, you should delete the task document so Elasticsearch + can reclaim the space. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -1642,11 +1714,11 @@ async def delete_by_query_rethrottle( Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts.

- ``_ + ``_ :param task_id: The ID for the task. :param requests_per_second: The throttle for this request in sub-requests per - second. + second. To disable throttling, set it to `-1`. """ if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_id'") @@ -3597,30 +3669,50 @@ async def open_point_in_time( search requests using the same point in time. For example, if refreshes happen between search_after requests, then the results of those requests might not be consistent as changes happening between searches are only visible to the more recent point in time.

-

A point in time must be opened explicitly before being used in search requests. - The keep_alive parameter tells Elasticsearch how long it should persist.

+

A point in time must be opened explicitly before being used in search requests.

+

A subsequent search request with the pit parameter must not specify index, routing, or preference values as these parameters are copied from the point in time.

+

Just like regular searches, you can use from and size to page through point in time search results, up to the first 10,000 hits. + If you want to retrieve more hits, use PIT with search_after.

+

IMPORTANT: The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request.

+

When a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a NoShardAvailableActionException exception. + To get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime.

+

Keeping point in time alive

+

The keep_alive parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time. + The value does not need to be long enough to process all data — it just needs to be long enough for the next request.

+

Normally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments. + Once the smaller segments are no longer needed they are deleted. + However, open point-in-times prevent the old segments from being deleted since they are still in use.

+

TIP: Keeping older segments alive means that more disk space and file handles are needed. + Ensure that you have configured your nodes to have ample free file handles.

+

Additionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request. + Ensure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates. + Note that a point-in-time doesn't prevent its associated indices from being deleted. + You can check how many point-in-times (that is, search contexts) are open with the nodes stats API.

``_ :param index: A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices - :param keep_alive: Extends the time to live of the corresponding point in time. - :param allow_partial_search_results: If `false`, creating a point in time request - when a shard is missing or unavailable will throw an exception. If `true`, - the point in time will contain all the shards that are available at the time - of the request. - :param expand_wildcards: Type of index that wildcard patterns can match. If the - request can target data streams, this argument determines whether wildcard - expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + :param keep_alive: Extend the length of time that the point in time persists. + :param allow_partial_search_results: Indicates whether the point in time tolerates + unavailable shards or shard failures when initially creating the PIT. If + `false`, creating a point in time request when a shard is missing or unavailable + will throw an exception. If `true`, the point in time will contain all the + shards that are available at the time of the request. + :param expand_wildcards: The type of index that wildcard patterns can match. + If the request can target data streams, this argument determines whether + wildcard expressions match hidden data streams. It supports comma-separated + values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, + `hidden`, `none`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. - :param index_filter: Allows to filter indices if the provided query rewrites - to `match_none` on every shard. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. - :param routing: Custom value used to route operations to a specific shard. + :param index_filter: Filter indices if the provided query rewrites to `match_none` + on every shard. + :param preference: The node or shard the operation should be performed on. By + default, it is random. + :param routing: A custom value that is used to route operations to a specific + shard. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -4530,13 +4622,23 @@ async def search(

Get search hits that match the query defined in the request. You can provide search queries using the q query string parameter or the request body. If both are specified, only the query parameter is used.

+

If the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges. + To search a point in time (PIT) for an alias, you must have the read index privilege for the alias's data streams or indices.

+

Search slicing

+

When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the slice and pit properties. + By default the splitting is done first on the shards, then locally on each shard. + The local splitting partitions the shard into contiguous ranges based on Lucene document IDs.

+

For instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard.

+

IMPORTANT: The same point-in-time ID should be used for all slices. + If different PIT IDs are used, slices can overlap and miss documents. + This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.

``_ - :param index: Comma-separated list of data streams, indices, and aliases to search. - Supports wildcards (`*`). To search all data streams and indices, omit this - parameter or use `*` or `_all`. + :param index: A comma-separated list of data streams, indices, and aliases to + search. It supports wildcards (`*`). To search all data streams and indices, + omit this parameter or use `*` or `_all`. :param aggregations: Defines the aggregations that are run as part of the search request. :param aggs: Defines the aggregations that are run as part of the search request. @@ -4545,45 +4647,46 @@ async def search( This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - :param allow_partial_search_results: If true, returns partial results if there - are shard request timeouts or shard failures. If false, returns an error - with no partial results. - :param analyze_wildcard: If true, wildcard and prefix queries are analyzed. This - parameter can only be used when the q query string parameter is specified. - :param analyzer: Analyzer to use for the query string. This parameter can only - be used when the q query string parameter is specified. + :param allow_partial_search_results: If `true` and there are shard request timeouts + or shard failures, the request returns partial results. If `false`, it returns + an error with no partial results. To override the default behavior, you can + set the `search.default_allow_partial_results` cluster setting to `false`. + :param analyze_wildcard: If `true`, wildcard and prefix queries are analyzed. + This parameter can be used only when the `q` query string parameter is specified. + :param analyzer: The analyzer to use for the query string. This parameter can + be used only when the `q` query string parameter is specified. :param batched_reduce_size: The number of shard results that should be reduced - at once on the coordinating node. This value should be used as a protection - mechanism to reduce the memory overhead per search request if the potential - number of shards in the request can be large. - :param ccs_minimize_roundtrips: If true, network round-trips between the coordinating - node and the remote clusters are minimized when executing cross-cluster search + at once on the coordinating node. If the potential number of shards in the + request can be large, this value should be used as a protection mechanism + to reduce the memory overhead per search request. + :param ccs_minimize_roundtrips: If `true`, network round-trips between the coordinating + node and the remote clusters are minimized when running cross-cluster search (CCS) requests. :param collapse: Collapses search results the values of the specified field. - :param default_operator: The default operator for query string query: AND or - OR. This parameter can only be used when the `q` query string parameter is - specified. - :param df: Field to use as default where no field prefix is given in the query - string. This parameter can only be used when the q query string parameter + :param default_operator: The default operator for the query string query: `AND` + or `OR`. This parameter can be used only when the `q` query string parameter is specified. - :param docvalue_fields: Array of wildcard (`*`) patterns. The request returns - doc values for field names matching these patterns in the `hits.fields` property - of the response. - :param expand_wildcards: Type of index that wildcard patterns can match. If the - request can target data streams, this argument determines whether wildcard - expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. - :param explain: If true, returns detailed information about score computation - as part of a hit. + :param df: The field to use as a default when no field prefix is given in the + query string. This parameter can be used only when the `q` query string parameter + is specified. + :param docvalue_fields: An array of wildcard (`*`) field patterns. The request + returns doc values for field names matching these patterns in the `hits.fields` + property of the response. + :param expand_wildcards: The type of index that wildcard patterns can match. + If the request can target data streams, this argument determines whether + wildcard expressions match hidden data streams. It supports comma-separated + values such as `open,hidden`. + :param explain: If `true`, the request returns detailed information about score + computation as part of a hit. :param ext: Configuration of search extensions defined by Elasticsearch plugins. - :param fields: Array of wildcard (`*`) patterns. The request returns values for - field names matching these patterns in the `hits.fields` property of the - response. + :param fields: An array of wildcard (`*`) field patterns. The request returns + values for field names matching these patterns in the `hits.fields` property + of the response. :param force_synthetic_source: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. - :param from_: Starting document offset. Needs to be non-negative. By default, + :param from_: The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. :param highlight: Specifies the highlighter to use for retrieving highlighted @@ -4592,95 +4695,101 @@ async def search( be ignored when frozen. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. - :param include_named_queries_score: Indicates whether hit.matched_queries should - be rendered as a map that includes the name of the matched query associated - with its score (true) or as an array containing the name of the matched queries - (false) This functionality reruns each named query on every hit in a search - response. Typically, this adds a small overhead to a request. However, using - computationally expensive named queries on a large number of hits may add - significant overhead. - :param indices_boost: Boosts the _score of documents from specified indices. - :param knn: Defines the approximate kNN search to run. + :param include_named_queries_score: If `true`, the response includes the score + contribution from any named queries. This functionality reruns each named + query on every hit in a search response. Typically, this adds a small overhead + to a request. However, using computationally expensive named queries on a + large number of hits may add significant overhead. + :param indices_boost: Boost the `_score` of documents from specified indices. + The boost value is the factor by which scores are multiplied. A boost value + greater than `1.0` increases the score. A boost value between `0` and `1.0` + decreases the score. + :param knn: The approximate kNN search to run. :param lenient: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can - only be used when the `q` query string parameter is specified. - :param max_concurrent_shard_requests: Defines the number of concurrent shard - requests per node this search executes concurrently. This value should be - used to limit the impact of the search on the cluster in order to limit the - number of concurrent shard requests. + be used only when the `q` query string parameter is specified. + :param max_concurrent_shard_requests: The number of concurrent shard requests + per node that the search runs concurrently. This value should be used to + limit the impact of the search on the cluster in order to limit the number + of concurrent shard requests. :param min_compatible_shard_node: The minimum version of the node that can handle the request Any handling node with a lower version will fail the request. - :param min_score: Minimum `_score` for matching documents. Documents with a lower - `_score` are not included in the search results. - :param pit: Limits the search to a point in time (PIT). If you provide a PIT, + :param min_score: The minimum `_score` for matching documents. Documents with + a lower `_score` are not included in the search results. + :param pit: Limit the search to a point in time (PIT). If you provide a PIT, you cannot specify an `` in the request path. :param post_filter: Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results. - :param pre_filter_shard_size: Defines a threshold that enforces a pre-filter - roundtrip to prefilter search shards based on query rewriting if the number - of shards the search request expands to exceeds the threshold. This filter - roundtrip can limit the number of shards significantly if for instance a - shard can not match any documents based on its rewrite method (if date filters - are mandatory to match but the shard bounds and the query are disjoint). - When unspecified, the pre-filter phase is executed if any of these conditions - is met: the request targets more than 128 shards; the request targets one - or more read-only index; the primary sort of the query targets an indexed + :param pre_filter_shard_size: A threshold that enforces a pre-filter roundtrip + to prefilter search shards based on query rewriting if the number of shards + the search request expands to exceeds the threshold. This filter roundtrip + can limit the number of shards significantly if for instance a shard can + not match any documents based on its rewrite method (if date filters are + mandatory to match but the shard bounds and the query are disjoint). When + unspecified, the pre-filter phase is executed if any of these conditions + is met: * The request targets more than 128 shards. * The request targets + one or more read-only index. * The primary sort of the query targets an indexed field. - :param preference: Nodes and shards used for the search. By default, Elasticsearch + :param preference: The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, - accounting for allocation awareness. Valid values are: `_only_local` to run - the search only on shards on the local node; `_local` to, if possible, run - the search on shards on the local node, or if not, select shards using the - default method; `_only_nodes:,` to run the search on only - the specified nodes IDs, where, if suitable shards exist on more than one - selected node, use shards on those nodes using the default method, or if - none of the specified nodes are available, select shards from any available - node using the default method; `_prefer_nodes:,` to if + accounting for allocation awareness. Valid values are: * `_only_local` to + run the search only on shards on the local node; * `_local` to, if possible, + run the search on shards on the local node, or if not, select shards using + the default method; * `_only_nodes:,` to run the search + on only the specified nodes IDs, where, if suitable shards exist on more + than one selected node, use shards on those nodes using the default method, + or if none of the specified nodes are available, select shards from any available + node using the default method; * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs, or if not, select shards - using the default method; `_shards:,` to run the search only - on the specified shards; `` (any string that does not start + using the default method; * `_shards:,` to run the search only + on the specified shards; * `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. :param profile: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. - :param q: Query in the Lucene query string syntax using query parameter search. - Query parameter searches do not support the full Elasticsearch Query DSL - but are handy for testing. - :param query: Defines the search definition using the Query DSL. - :param rank: Defines the Reciprocal Rank Fusion (RRF) to use. + :param q: A query in the Lucene query string syntax. Query parameter searches + do not support the full Elasticsearch Query DSL but are handy for testing. + IMPORTANT: This parameter overrides the query parameter in the request body. + If both parameters are specified, documents matching the query request body + parameter are not returned. + :param query: The search definition using the Query DSL. + :param rank: The Reciprocal Rank Fusion (RRF) to use. :param request_cache: If `true`, the caching of search results is enabled for - requests where `size` is `0`. Defaults to index level settings. + requests where `size` is `0`. It defaults to index level settings. :param rescore: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. :param rest_total_hits_as_int: Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. :param retriever: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that - also return top documents such as query and knn. - :param routing: Custom value used to route operations to a specific shard. - :param runtime_mappings: Defines one or more runtime fields in the search request. - These fields take precedence over mapped fields with the same name. + also return top documents such as `query` and `knn`. + :param routing: A custom value that is used to route operations to a specific + shard. + :param runtime_mappings: One or more runtime fields in the search request. These + fields take precedence over mapped fields with the same name. :param script_fields: Retrieve a script evaluation (based on different fields) for each hit. - :param scroll: Period to retain the search context for scrolling. See Scroll - search results. By default, this value cannot exceed `1d` (24 hours). You - can change this limit using the `search.max_keep_alive` cluster-level setting. + :param scroll: The period to retain the search context for scrolling. By default, + this value cannot exceed `1d` (24 hours). You can change this limit by using + the `search.max_keep_alive` cluster-level setting. :param search_after: Used to retrieve the next page of hits using a set of sort values from the previous page. - :param search_type: How distributed term frequencies are calculated for relevance - scoring. - :param seq_no_primary_term: If `true`, returns sequence number and primary term - of the last modification of each hit. - :param size: The number of hits to return. By default, you cannot page through - more than 10,000 hits using the `from` and `size` parameters. To page through - more hits, use the `search_after` parameter. - :param slice: Can be used to split a scrolled search into multiple slices that - can be consumed independently. + :param search_type: Indicates how distributed term frequencies are calculated + for relevance scoring. + :param seq_no_primary_term: If `true`, the request returns sequence number and + primary term of the last modification of each hit. + :param size: The number of hits to return, which must not be negative. By default, + you cannot page through more than 10,000 hits using the `from` and `size` + parameters. To page through more hits, use the `search_after` property. + :param slice: Split a scrolled search into multiple slices that can be consumed + independently. :param sort: A comma-separated list of : pairs. - :param source: Indicates which source fields are returned for matching documents. - These fields are returned in the hits._source property of the search response. + :param source: The source fields that are returned for matching documents. These + fields are returned in the `hits._source` property of the search response. + If the `stored_fields` property is specified, the `_source` property defaults + to `false`. Otherwise, it defaults to `true`. :param source_excludes: A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` @@ -4690,45 +4799,46 @@ async def search( returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - :param stats: Stats groups to associate with the search. Each group maintains + :param stats: The stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. - :param stored_fields: List of stored fields to return as part of a hit. If no - fields are specified, no stored fields are included in the response. If this - field is specified, the `_source` parameter defaults to `false`. You can - pass `_source: true` to return both source fields and stored fields in the - search response. + :param stored_fields: A comma-separated list of stored fields to return as part + of a hit. If no fields are specified, no stored fields are included in the + response. If this field is specified, the `_source` property defaults to + `false`. You can pass `_source: true` to return both source fields and stored + fields in the search response. :param suggest: Defines a suggester that provides similar looking terms based on a provided text. - :param suggest_field: Specifies which field to use for suggestions. - :param suggest_mode: Specifies the suggest mode. This parameter can only be used - when the `suggest_field` and `suggest_text` query string parameters are specified. - :param suggest_size: Number of suggestions to return. This parameter can only - be used when the `suggest_field` and `suggest_text` query string parameters + :param suggest_field: The field to use for suggestions. + :param suggest_mode: The suggest mode. This parameter can be used only when the + `suggest_field` and `suggest_text` query string parameters are specified. + :param suggest_size: The number of suggestions to return. This parameter can + be used only when the `suggest_field` and `suggest_text` query string parameters are specified. :param suggest_text: The source text for which the suggestions should be returned. - This parameter can only be used when the `suggest_field` and `suggest_text` + This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. - :param terminate_after: Maximum number of documents to collect for each shard. + :param terminate_after: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. - Elasticsearch collects documents before sorting. Use with caution. Elasticsearch - applies this parameter to each shard handling the request. When possible, - let Elasticsearch perform early termination automatically. Avoid specifying - this parameter for requests that target data streams with backing indices - across multiple data tiers. If set to `0` (default), the query does not terminate - early. - :param timeout: Specifies the period of time to wait for a response from each - shard. If no response is received before the timeout expires, the request - fails and returns an error. Defaults to no timeout. - :param track_scores: If true, calculate and return document scores, even if the - scores are not used for sorting. + Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. + Elasticsearch applies this property to each shard handling the request. When + possible, let Elasticsearch perform early termination automatically. Avoid + specifying this property for requests that target data streams with backing + indices across multiple data tiers. If set to `0` (default), the query does + not terminate early. + :param timeout: The period of time to wait for a response from each shard. If + no response is received before the timeout expires, the request fails and + returns an error. Defaults to no timeout. + :param track_scores: If `true`, calculate and return document scores, even if + the scores are not used for sorting. :param track_total_hits: Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. :param typed_keys: If `true`, aggregation and suggester names are be prefixed by their respective types in the response. - :param version: If true, returns document version as part of a hit. + :param version: If `true`, the request returns the document version as part of + a hit. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index 2edac7ded..02b315efb 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -230,6 +230,51 @@ async def analyze( path_parts=__path_parts, ) + @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) + async def cancel_migrate_reindex( + self, + *, + index: t.Union[str, t.Sequence[str]], + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Cancel a migration reindex operation.

+

Cancel a migration reindex attempt for a data stream or index.

+ + + ``_ + + :param index: The index or data stream name + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'index'") + __path_parts: t.Dict[str, str] = {"index": _quote(index)} + __path = f'/_migration/reindex/{__path_parts["index"]}/_cancel' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + endpoint_id="indices.cancel_migrate_reindex", + path_parts=__path_parts, + ) + @_rewrite_parameters() async def clear_cache( self, @@ -710,6 +755,71 @@ async def create_data_stream( path_parts=__path_parts, ) + @_rewrite_parameters( + body_name="create_from", + ) + @_stability_warning(Stability.EXPERIMENTAL) + async def create_from( + self, + *, + source: str, + dest: str, + create_from: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create an index from a source index.

+

Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values.

+ + + ``_ + + :param source: The source index or data stream name + :param dest: The destination index or data stream name + :param create_from: + """ + if source in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'source'") + if dest in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'dest'") + if create_from is None and body is None: + raise ValueError( + "Empty value passed for parameters 'create_from' and 'body', one of them should be set." + ) + elif create_from is not None and body is not None: + raise ValueError("Cannot set both 'create_from' and 'body'") + __path_parts: t.Dict[str, str] = { + "source": _quote(source), + "dest": _quote(dest), + } + __path = f'/_create_from/{__path_parts["source"]}/{__path_parts["dest"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __body = create_from if create_from is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="indices.create_from", + path_parts=__path_parts, + ) + @_rewrite_parameters() async def data_streams_stats( self, @@ -2585,6 +2695,51 @@ async def get_mapping( path_parts=__path_parts, ) + @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) + async def get_migrate_reindex_status( + self, + *, + index: t.Union[str, t.Sequence[str]], + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Get the migration reindexing status.

+

Get the status of a migration reindex attempt for a data stream or index.

+ + + ``_ + + :param index: The index or data stream name. + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'index'") + __path_parts: t.Dict[str, str] = {"index": _quote(index)} + __path = f'/_migration/reindex/{__path_parts["index"]}/_status' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="indices.get_migrate_reindex_status", + path_parts=__path_parts, + ) + @_rewrite_parameters() async def get_settings( self, @@ -2754,6 +2909,62 @@ async def get_template( path_parts=__path_parts, ) + @_rewrite_parameters( + body_name="reindex", + ) + @_stability_warning(Stability.EXPERIMENTAL) + async def migrate_reindex( + self, + *, + reindex: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Reindex legacy backing indices.

+

Reindex all legacy backing indices for a data stream. + This operation occurs in a persistent task. + The persistent task ID is returned immediately and the reindexing work is completed in that task.

+ + + ``_ + + :param reindex: + """ + if reindex is None and body is None: + raise ValueError( + "Empty value passed for parameters 'reindex' and 'body', one of them should be set." + ) + elif reindex is not None and body is not None: + raise ValueError("Cannot set both 'reindex' and 'body'") + __path_parts: t.Dict[str, str] = {} + __path = "/_migration/reindex" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __body = reindex if reindex is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="indices.migrate_reindex", + path_parts=__path_parts, + ) + @_rewrite_parameters() async def migrate_to_data_stream( self, diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index 243849fb8..67187220a 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -1453,74 +1453,146 @@ def delete_by_query( """ .. raw:: html -

Delete documents. - Deletes documents that match the specified query.

+

Delete documents.

+

Deletes documents that match the specified query.

+

If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:

+
    +
  • read
  • +
  • delete or write
  • +
+

You can specify the query criteria in the request URI or the request body using the same syntax as the search API. + When you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning. + If a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails.

+

NOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number.

+

While processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete. + A bulk delete request is performed for each batch of matching documents. + If a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off. + If the maximum retry limit is reached, processing halts and all failed requests are returned in the response. + Any delete requests that completed successfully still stick, they are not rolled back.

+

You can opt to count version conflicts instead of halting and returning by setting conflicts to proceed. + Note that if you opt to count version conflicts the operation could attempt to delete more documents from the source than max_docs until it has successfully deleted max_docs documents, or it has gone through every document in the source query.

+

Throttling delete requests

+

To control the rate at which delete by query issues batches of delete operations, you can set requests_per_second to any positive decimal number. + This pads each batch with a wait time to throttle the rate. + Set requests_per_second to -1 to disable throttling.

+

Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. + The padding time is the difference between the batch size divided by the requests_per_second and the time spent writing. + By default the batch size is 1000, so if requests_per_second is set to 500:

+
target_time = 1000 / 500 per second = 2 seconds
+          wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
+          
+

Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. + This is "bursty" instead of "smooth".

+

Slicing

+

Delete by query supports sliced scroll to parallelize the delete process. + This can improve efficiency and provide a convenient way to break the request down into smaller parts.

+

Setting slices to auto lets Elasticsearch choose the number of slices to use. + This setting will use one slice per shard, up to a certain limit. + If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. + Adding slices to the delete by query operation creates sub-requests which means it has some quirks:

+
    +
  • You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices.
  • +
  • Fetching the status of the task for the request with slices only contains the status of completed slices.
  • +
  • These sub-requests are individually addressable for things like cancellation and rethrottling.
  • +
  • Rethrottling the request with slices will rethrottle the unfinished sub-request proportionally.
  • +
  • Canceling the request with slices will cancel each sub-request.
  • +
  • Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.
  • +
  • Parameters like requests_per_second and max_docs on a request with slices are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using max_docs with slices might not result in exactly max_docs documents being deleted.
  • +
  • Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.
  • +
+

If you're slicing manually or otherwise tuning automatic slicing, keep in mind that:

+
    +
  • Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.
  • +
  • Delete performance scales linearly across available resources with the number of slices.
  • +
+

Whether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources.

+

Cancel a delete by query operation

+

Any delete by query can be canceled using the task cancel API. For example:

+
POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel
+          
+

The task ID can be found by using the get tasks API.

+

Cancellation should happen quickly but might take a few seconds. + The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself.

``_ - :param index: Comma-separated list of data streams, indices, and aliases to search. - Supports wildcards (`*`). To search all data streams or indices, omit this - parameter or use `*` or `_all`. + :param index: A comma-separated list of data streams, indices, and aliases to + search. It supports wildcards (`*`). To search all data streams or indices, + omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param analyze_wildcard: If `true`, wildcard and prefix queries are analyzed. - :param analyzer: Analyzer to use for the query string. + This parameter can be used only when the `q` query string parameter is specified. + :param analyzer: Analyzer to use for the query string. This parameter can be + used only when the `q` query string parameter is specified. :param conflicts: What to do if delete by query hits version conflicts: `abort` or `proceed`. :param default_operator: The default operator for query string query: `AND` or - `OR`. - :param df: Field to use as default where no field prefix is given in the query - string. - :param expand_wildcards: Type of index that wildcard patterns can match. If the - request can target data streams, this argument determines whether wildcard - expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + `OR`. This parameter can be used only when the `q` query string parameter + is specified. + :param df: The field to use as default where no field prefix is given in the + query string. This parameter can be used only when the `q` query string parameter + is specified. + :param expand_wildcards: The type of index that wildcard patterns can match. + If the request can target data streams, this argument determines whether + wildcard expressions match hidden data streams. It supports comma-separated + values, such as `open,hidden`. :param from_: Starting offset (default: 0) :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param lenient: If `true`, format-based query failures (such as providing text - to a numeric field) in the query string will be ignored. + to a numeric field) in the query string will be ignored. This parameter can + be used only when the `q` query string parameter is specified. :param max_docs: The maximum number of documents to delete. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. - :param q: Query in the Lucene query string syntax. - :param query: Specifies the documents to delete using the Query DSL. + :param preference: The node or shard the operation should be performed on. It + is random by default. + :param q: A query in the Lucene query string syntax. + :param query: The documents to delete specified with Query DSL. :param refresh: If `true`, Elasticsearch refreshes all shards involved in the - delete by query after the request completes. + delete by query after the request completes. This is different than the delete + API's `refresh` parameter, which causes just the shard that received the + delete request to be refreshed. Unlike the delete API, it does not support + `wait_for`. :param request_cache: If `true`, the request cache is used for this request. Defaults to the index-level setting. :param requests_per_second: The throttle for this request in sub-requests per second. - :param routing: Custom value used to route operations to a specific shard. - :param scroll: Period to retain the search context for scrolling. - :param scroll_size: Size of the scroll request that powers the operation. - :param search_timeout: Explicit timeout for each search request. Defaults to - no timeout. - :param search_type: The type of the search operation. Available options: `query_then_fetch`, - `dfs_query_then_fetch`. + :param routing: A custom value used to route operations to a specific shard. + :param scroll: The period to retain the search context for scrolling. + :param scroll_size: The size of the scroll request that powers the operation. + :param search_timeout: The explicit timeout for each search request. It defaults + to no timeout. + :param search_type: The type of the search operation. Available options include + `query_then_fetch` and `dfs_query_then_fetch`. :param slice: Slice the request manually using the provided slice ID and total number of slices. :param slices: The number of slices this task should be divided into. - :param sort: A comma-separated list of : pairs. - :param stats: Specific `tag` of the request for logging and statistical purposes. - :param terminate_after: Maximum number of documents to collect for each shard. + :param sort: A comma-separated list of `:` pairs. + :param stats: The specific `tag` of the request for logging and statistical purposes. + :param terminate_after: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. - :param timeout: Period each deletion request waits for active shards. + :param timeout: The period each deletion request waits for active shards. :param version: If `true`, returns the document version as part of a hit. :param wait_for_active_shards: The number of shard copies that must be active - before proceeding with the operation. Set to all or any positive integer - up to the total number of shards in the index (`number_of_replicas+1`). + before proceeding with the operation. Set to `all` or any positive integer + up to the total number of shards in the index (`number_of_replicas+1`). The + `timeout` value controls how long each write request waits for unavailable + shards to become available. :param wait_for_completion: If `true`, the request blocks until the operation - is complete. + is complete. If `false`, Elasticsearch performs some preflight checks, launches + the request, and returns a task you can use to cancel or get the status of + the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. + When you are done with a task, you should delete the task document so Elasticsearch + can reclaim the space. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -1640,11 +1712,11 @@ def delete_by_query_rethrottle( Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts.

- ``_ + ``_ :param task_id: The ID for the task. :param requests_per_second: The throttle for this request in sub-requests per - second. + second. To disable throttling, set it to `-1`. """ if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_id'") @@ -3595,30 +3667,50 @@ def open_point_in_time( search requests using the same point in time. For example, if refreshes happen between search_after requests, then the results of those requests might not be consistent as changes happening between searches are only visible to the more recent point in time.

-

A point in time must be opened explicitly before being used in search requests. - The keep_alive parameter tells Elasticsearch how long it should persist.

+

A point in time must be opened explicitly before being used in search requests.

+

A subsequent search request with the pit parameter must not specify index, routing, or preference values as these parameters are copied from the point in time.

+

Just like regular searches, you can use from and size to page through point in time search results, up to the first 10,000 hits. + If you want to retrieve more hits, use PIT with search_after.

+

IMPORTANT: The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request.

+

When a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a NoShardAvailableActionException exception. + To get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime.

+

Keeping point in time alive

+

The keep_alive parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time. + The value does not need to be long enough to process all data — it just needs to be long enough for the next request.

+

Normally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments. + Once the smaller segments are no longer needed they are deleted. + However, open point-in-times prevent the old segments from being deleted since they are still in use.

+

TIP: Keeping older segments alive means that more disk space and file handles are needed. + Ensure that you have configured your nodes to have ample free file handles.

+

Additionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request. + Ensure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates. + Note that a point-in-time doesn't prevent its associated indices from being deleted. + You can check how many point-in-times (that is, search contexts) are open with the nodes stats API.

``_ :param index: A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices - :param keep_alive: Extends the time to live of the corresponding point in time. - :param allow_partial_search_results: If `false`, creating a point in time request - when a shard is missing or unavailable will throw an exception. If `true`, - the point in time will contain all the shards that are available at the time - of the request. - :param expand_wildcards: Type of index that wildcard patterns can match. If the - request can target data streams, this argument determines whether wildcard - expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + :param keep_alive: Extend the length of time that the point in time persists. + :param allow_partial_search_results: Indicates whether the point in time tolerates + unavailable shards or shard failures when initially creating the PIT. If + `false`, creating a point in time request when a shard is missing or unavailable + will throw an exception. If `true`, the point in time will contain all the + shards that are available at the time of the request. + :param expand_wildcards: The type of index that wildcard patterns can match. + If the request can target data streams, this argument determines whether + wildcard expressions match hidden data streams. It supports comma-separated + values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, + `hidden`, `none`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. - :param index_filter: Allows to filter indices if the provided query rewrites - to `match_none` on every shard. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. - :param routing: Custom value used to route operations to a specific shard. + :param index_filter: Filter indices if the provided query rewrites to `match_none` + on every shard. + :param preference: The node or shard the operation should be performed on. By + default, it is random. + :param routing: A custom value that is used to route operations to a specific + shard. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -4528,13 +4620,23 @@ def search(

Get search hits that match the query defined in the request. You can provide search queries using the q query string parameter or the request body. If both are specified, only the query parameter is used.

+

If the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges. + To search a point in time (PIT) for an alias, you must have the read index privilege for the alias's data streams or indices.

+

Search slicing

+

When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the slice and pit properties. + By default the splitting is done first on the shards, then locally on each shard. + The local splitting partitions the shard into contiguous ranges based on Lucene document IDs.

+

For instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard.

+

IMPORTANT: The same point-in-time ID should be used for all slices. + If different PIT IDs are used, slices can overlap and miss documents. + This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.

``_ - :param index: Comma-separated list of data streams, indices, and aliases to search. - Supports wildcards (`*`). To search all data streams and indices, omit this - parameter or use `*` or `_all`. + :param index: A comma-separated list of data streams, indices, and aliases to + search. It supports wildcards (`*`). To search all data streams and indices, + omit this parameter or use `*` or `_all`. :param aggregations: Defines the aggregations that are run as part of the search request. :param aggs: Defines the aggregations that are run as part of the search request. @@ -4543,45 +4645,46 @@ def search( This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. - :param allow_partial_search_results: If true, returns partial results if there - are shard request timeouts or shard failures. If false, returns an error - with no partial results. - :param analyze_wildcard: If true, wildcard and prefix queries are analyzed. This - parameter can only be used when the q query string parameter is specified. - :param analyzer: Analyzer to use for the query string. This parameter can only - be used when the q query string parameter is specified. + :param allow_partial_search_results: If `true` and there are shard request timeouts + or shard failures, the request returns partial results. If `false`, it returns + an error with no partial results. To override the default behavior, you can + set the `search.default_allow_partial_results` cluster setting to `false`. + :param analyze_wildcard: If `true`, wildcard and prefix queries are analyzed. + This parameter can be used only when the `q` query string parameter is specified. + :param analyzer: The analyzer to use for the query string. This parameter can + be used only when the `q` query string parameter is specified. :param batched_reduce_size: The number of shard results that should be reduced - at once on the coordinating node. This value should be used as a protection - mechanism to reduce the memory overhead per search request if the potential - number of shards in the request can be large. - :param ccs_minimize_roundtrips: If true, network round-trips between the coordinating - node and the remote clusters are minimized when executing cross-cluster search + at once on the coordinating node. If the potential number of shards in the + request can be large, this value should be used as a protection mechanism + to reduce the memory overhead per search request. + :param ccs_minimize_roundtrips: If `true`, network round-trips between the coordinating + node and the remote clusters are minimized when running cross-cluster search (CCS) requests. :param collapse: Collapses search results the values of the specified field. - :param default_operator: The default operator for query string query: AND or - OR. This parameter can only be used when the `q` query string parameter is - specified. - :param df: Field to use as default where no field prefix is given in the query - string. This parameter can only be used when the q query string parameter + :param default_operator: The default operator for the query string query: `AND` + or `OR`. This parameter can be used only when the `q` query string parameter is specified. - :param docvalue_fields: Array of wildcard (`*`) patterns. The request returns - doc values for field names matching these patterns in the `hits.fields` property - of the response. - :param expand_wildcards: Type of index that wildcard patterns can match. If the - request can target data streams, this argument determines whether wildcard - expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. - :param explain: If true, returns detailed information about score computation - as part of a hit. + :param df: The field to use as a default when no field prefix is given in the + query string. This parameter can be used only when the `q` query string parameter + is specified. + :param docvalue_fields: An array of wildcard (`*`) field patterns. The request + returns doc values for field names matching these patterns in the `hits.fields` + property of the response. + :param expand_wildcards: The type of index that wildcard patterns can match. + If the request can target data streams, this argument determines whether + wildcard expressions match hidden data streams. It supports comma-separated + values such as `open,hidden`. + :param explain: If `true`, the request returns detailed information about score + computation as part of a hit. :param ext: Configuration of search extensions defined by Elasticsearch plugins. - :param fields: Array of wildcard (`*`) patterns. The request returns values for - field names matching these patterns in the `hits.fields` property of the - response. + :param fields: An array of wildcard (`*`) field patterns. The request returns + values for field names matching these patterns in the `hits.fields` property + of the response. :param force_synthetic_source: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. - :param from_: Starting document offset. Needs to be non-negative. By default, + :param from_: The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. :param highlight: Specifies the highlighter to use for retrieving highlighted @@ -4590,95 +4693,101 @@ def search( be ignored when frozen. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. - :param include_named_queries_score: Indicates whether hit.matched_queries should - be rendered as a map that includes the name of the matched query associated - with its score (true) or as an array containing the name of the matched queries - (false) This functionality reruns each named query on every hit in a search - response. Typically, this adds a small overhead to a request. However, using - computationally expensive named queries on a large number of hits may add - significant overhead. - :param indices_boost: Boosts the _score of documents from specified indices. - :param knn: Defines the approximate kNN search to run. + :param include_named_queries_score: If `true`, the response includes the score + contribution from any named queries. This functionality reruns each named + query on every hit in a search response. Typically, this adds a small overhead + to a request. However, using computationally expensive named queries on a + large number of hits may add significant overhead. + :param indices_boost: Boost the `_score` of documents from specified indices. + The boost value is the factor by which scores are multiplied. A boost value + greater than `1.0` increases the score. A boost value between `0` and `1.0` + decreases the score. + :param knn: The approximate kNN search to run. :param lenient: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can - only be used when the `q` query string parameter is specified. - :param max_concurrent_shard_requests: Defines the number of concurrent shard - requests per node this search executes concurrently. This value should be - used to limit the impact of the search on the cluster in order to limit the - number of concurrent shard requests. + be used only when the `q` query string parameter is specified. + :param max_concurrent_shard_requests: The number of concurrent shard requests + per node that the search runs concurrently. This value should be used to + limit the impact of the search on the cluster in order to limit the number + of concurrent shard requests. :param min_compatible_shard_node: The minimum version of the node that can handle the request Any handling node with a lower version will fail the request. - :param min_score: Minimum `_score` for matching documents. Documents with a lower - `_score` are not included in the search results. - :param pit: Limits the search to a point in time (PIT). If you provide a PIT, + :param min_score: The minimum `_score` for matching documents. Documents with + a lower `_score` are not included in the search results. + :param pit: Limit the search to a point in time (PIT). If you provide a PIT, you cannot specify an `` in the request path. :param post_filter: Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results. - :param pre_filter_shard_size: Defines a threshold that enforces a pre-filter - roundtrip to prefilter search shards based on query rewriting if the number - of shards the search request expands to exceeds the threshold. This filter - roundtrip can limit the number of shards significantly if for instance a - shard can not match any documents based on its rewrite method (if date filters - are mandatory to match but the shard bounds and the query are disjoint). - When unspecified, the pre-filter phase is executed if any of these conditions - is met: the request targets more than 128 shards; the request targets one - or more read-only index; the primary sort of the query targets an indexed + :param pre_filter_shard_size: A threshold that enforces a pre-filter roundtrip + to prefilter search shards based on query rewriting if the number of shards + the search request expands to exceeds the threshold. This filter roundtrip + can limit the number of shards significantly if for instance a shard can + not match any documents based on its rewrite method (if date filters are + mandatory to match but the shard bounds and the query are disjoint). When + unspecified, the pre-filter phase is executed if any of these conditions + is met: * The request targets more than 128 shards. * The request targets + one or more read-only index. * The primary sort of the query targets an indexed field. - :param preference: Nodes and shards used for the search. By default, Elasticsearch + :param preference: The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, - accounting for allocation awareness. Valid values are: `_only_local` to run - the search only on shards on the local node; `_local` to, if possible, run - the search on shards on the local node, or if not, select shards using the - default method; `_only_nodes:,` to run the search on only - the specified nodes IDs, where, if suitable shards exist on more than one - selected node, use shards on those nodes using the default method, or if - none of the specified nodes are available, select shards from any available - node using the default method; `_prefer_nodes:,` to if + accounting for allocation awareness. Valid values are: * `_only_local` to + run the search only on shards on the local node; * `_local` to, if possible, + run the search on shards on the local node, or if not, select shards using + the default method; * `_only_nodes:,` to run the search + on only the specified nodes IDs, where, if suitable shards exist on more + than one selected node, use shards on those nodes using the default method, + or if none of the specified nodes are available, select shards from any available + node using the default method; * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs, or if not, select shards - using the default method; `_shards:,` to run the search only - on the specified shards; `` (any string that does not start + using the default method; * `_shards:,` to run the search only + on the specified shards; * `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. :param profile: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. - :param q: Query in the Lucene query string syntax using query parameter search. - Query parameter searches do not support the full Elasticsearch Query DSL - but are handy for testing. - :param query: Defines the search definition using the Query DSL. - :param rank: Defines the Reciprocal Rank Fusion (RRF) to use. + :param q: A query in the Lucene query string syntax. Query parameter searches + do not support the full Elasticsearch Query DSL but are handy for testing. + IMPORTANT: This parameter overrides the query parameter in the request body. + If both parameters are specified, documents matching the query request body + parameter are not returned. + :param query: The search definition using the Query DSL. + :param rank: The Reciprocal Rank Fusion (RRF) to use. :param request_cache: If `true`, the caching of search results is enabled for - requests where `size` is `0`. Defaults to index level settings. + requests where `size` is `0`. It defaults to index level settings. :param rescore: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. :param rest_total_hits_as_int: Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. :param retriever: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that - also return top documents such as query and knn. - :param routing: Custom value used to route operations to a specific shard. - :param runtime_mappings: Defines one or more runtime fields in the search request. - These fields take precedence over mapped fields with the same name. + also return top documents such as `query` and `knn`. + :param routing: A custom value that is used to route operations to a specific + shard. + :param runtime_mappings: One or more runtime fields in the search request. These + fields take precedence over mapped fields with the same name. :param script_fields: Retrieve a script evaluation (based on different fields) for each hit. - :param scroll: Period to retain the search context for scrolling. See Scroll - search results. By default, this value cannot exceed `1d` (24 hours). You - can change this limit using the `search.max_keep_alive` cluster-level setting. + :param scroll: The period to retain the search context for scrolling. By default, + this value cannot exceed `1d` (24 hours). You can change this limit by using + the `search.max_keep_alive` cluster-level setting. :param search_after: Used to retrieve the next page of hits using a set of sort values from the previous page. - :param search_type: How distributed term frequencies are calculated for relevance - scoring. - :param seq_no_primary_term: If `true`, returns sequence number and primary term - of the last modification of each hit. - :param size: The number of hits to return. By default, you cannot page through - more than 10,000 hits using the `from` and `size` parameters. To page through - more hits, use the `search_after` parameter. - :param slice: Can be used to split a scrolled search into multiple slices that - can be consumed independently. + :param search_type: Indicates how distributed term frequencies are calculated + for relevance scoring. + :param seq_no_primary_term: If `true`, the request returns sequence number and + primary term of the last modification of each hit. + :param size: The number of hits to return, which must not be negative. By default, + you cannot page through more than 10,000 hits using the `from` and `size` + parameters. To page through more hits, use the `search_after` property. + :param slice: Split a scrolled search into multiple slices that can be consumed + independently. :param sort: A comma-separated list of : pairs. - :param source: Indicates which source fields are returned for matching documents. - These fields are returned in the hits._source property of the search response. + :param source: The source fields that are returned for matching documents. These + fields are returned in the `hits._source` property of the search response. + If the `stored_fields` property is specified, the `_source` property defaults + to `false`. Otherwise, it defaults to `true`. :param source_excludes: A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` @@ -4688,45 +4797,46 @@ def search( returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. - :param stats: Stats groups to associate with the search. Each group maintains + :param stats: The stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. - :param stored_fields: List of stored fields to return as part of a hit. If no - fields are specified, no stored fields are included in the response. If this - field is specified, the `_source` parameter defaults to `false`. You can - pass `_source: true` to return both source fields and stored fields in the - search response. + :param stored_fields: A comma-separated list of stored fields to return as part + of a hit. If no fields are specified, no stored fields are included in the + response. If this field is specified, the `_source` property defaults to + `false`. You can pass `_source: true` to return both source fields and stored + fields in the search response. :param suggest: Defines a suggester that provides similar looking terms based on a provided text. - :param suggest_field: Specifies which field to use for suggestions. - :param suggest_mode: Specifies the suggest mode. This parameter can only be used - when the `suggest_field` and `suggest_text` query string parameters are specified. - :param suggest_size: Number of suggestions to return. This parameter can only - be used when the `suggest_field` and `suggest_text` query string parameters + :param suggest_field: The field to use for suggestions. + :param suggest_mode: The suggest mode. This parameter can be used only when the + `suggest_field` and `suggest_text` query string parameters are specified. + :param suggest_size: The number of suggestions to return. This parameter can + be used only when the `suggest_field` and `suggest_text` query string parameters are specified. :param suggest_text: The source text for which the suggestions should be returned. - This parameter can only be used when the `suggest_field` and `suggest_text` + This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. - :param terminate_after: Maximum number of documents to collect for each shard. + :param terminate_after: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. - Elasticsearch collects documents before sorting. Use with caution. Elasticsearch - applies this parameter to each shard handling the request. When possible, - let Elasticsearch perform early termination automatically. Avoid specifying - this parameter for requests that target data streams with backing indices - across multiple data tiers. If set to `0` (default), the query does not terminate - early. - :param timeout: Specifies the period of time to wait for a response from each - shard. If no response is received before the timeout expires, the request - fails and returns an error. Defaults to no timeout. - :param track_scores: If true, calculate and return document scores, even if the - scores are not used for sorting. + Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. + Elasticsearch applies this property to each shard handling the request. When + possible, let Elasticsearch perform early termination automatically. Avoid + specifying this property for requests that target data streams with backing + indices across multiple data tiers. If set to `0` (default), the query does + not terminate early. + :param timeout: The period of time to wait for a response from each shard. If + no response is received before the timeout expires, the request fails and + returns an error. Defaults to no timeout. + :param track_scores: If `true`, calculate and return document scores, even if + the scores are not used for sorting. :param track_total_hits: Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. :param typed_keys: If `true`, aggregation and suggester names are be prefixed by their respective types in the response. - :param version: If true, returns document version as part of a hit. + :param version: If `true`, the request returns the document version as part of + a hit. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index 39e513ea1..b4774cea3 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -230,6 +230,51 @@ def analyze( path_parts=__path_parts, ) + @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) + def cancel_migrate_reindex( + self, + *, + index: t.Union[str, t.Sequence[str]], + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Cancel a migration reindex operation.

+

Cancel a migration reindex attempt for a data stream or index.

+ + + ``_ + + :param index: The index or data stream name + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'index'") + __path_parts: t.Dict[str, str] = {"index": _quote(index)} + __path = f'/_migration/reindex/{__path_parts["index"]}/_cancel' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + endpoint_id="indices.cancel_migrate_reindex", + path_parts=__path_parts, + ) + @_rewrite_parameters() def clear_cache( self, @@ -710,6 +755,71 @@ def create_data_stream( path_parts=__path_parts, ) + @_rewrite_parameters( + body_name="create_from", + ) + @_stability_warning(Stability.EXPERIMENTAL) + def create_from( + self, + *, + source: str, + dest: str, + create_from: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Create an index from a source index.

+

Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values.

+ + + ``_ + + :param source: The source index or data stream name + :param dest: The destination index or data stream name + :param create_from: + """ + if source in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'source'") + if dest in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'dest'") + if create_from is None and body is None: + raise ValueError( + "Empty value passed for parameters 'create_from' and 'body', one of them should be set." + ) + elif create_from is not None and body is not None: + raise ValueError("Cannot set both 'create_from' and 'body'") + __path_parts: t.Dict[str, str] = { + "source": _quote(source), + "dest": _quote(dest), + } + __path = f'/_create_from/{__path_parts["source"]}/{__path_parts["dest"]}' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __body = create_from if create_from is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="indices.create_from", + path_parts=__path_parts, + ) + @_rewrite_parameters() def data_streams_stats( self, @@ -2585,6 +2695,51 @@ def get_mapping( path_parts=__path_parts, ) + @_rewrite_parameters() + @_stability_warning(Stability.EXPERIMENTAL) + def get_migrate_reindex_status( + self, + *, + index: t.Union[str, t.Sequence[str]], + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Get the migration reindexing status.

+

Get the status of a migration reindex attempt for a data stream or index.

+ + + ``_ + + :param index: The index or data stream name. + """ + if index in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'index'") + __path_parts: t.Dict[str, str] = {"index": _quote(index)} + __path = f'/_migration/reindex/{__path_parts["index"]}/_status' + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "GET", + __path, + params=__query, + headers=__headers, + endpoint_id="indices.get_migrate_reindex_status", + path_parts=__path_parts, + ) + @_rewrite_parameters() def get_settings( self, @@ -2754,6 +2909,62 @@ def get_template( path_parts=__path_parts, ) + @_rewrite_parameters( + body_name="reindex", + ) + @_stability_warning(Stability.EXPERIMENTAL) + def migrate_reindex( + self, + *, + reindex: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

Reindex legacy backing indices.

+

Reindex all legacy backing indices for a data stream. + This operation occurs in a persistent task. + The persistent task ID is returned immediately and the reindexing work is completed in that task.

+ + + ``_ + + :param reindex: + """ + if reindex is None and body is None: + raise ValueError( + "Empty value passed for parameters 'reindex' and 'body', one of them should be set." + ) + elif reindex is not None and body is not None: + raise ValueError("Cannot set both 'reindex' and 'body'") + __path_parts: t.Dict[str, str] = {} + __path = "/_migration/reindex" + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __body = reindex if reindex is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="indices.migrate_reindex", + path_parts=__path_parts, + ) + @_rewrite_parameters() def migrate_to_data_stream( self, From 394b51ea4c098a5d5a2908e12d72d852430d0130 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 11 Feb 2025 14:44:49 +0000 Subject: [PATCH 27/65] Add Elasticsearch-DSL code (#2736) (#2781) * Add Elasticsearch-DSL code * reformat code * runasync dsl * dsl generator * fix pytest configuration * dsl testing fixes * wipe cluster after dsl tests * remove unused coverage option * review feedback * 2nd round of feedback addressed * fix coverage reports (cherry picked from commit 10ded221534c88c3a89e9dd4076c89b0b965ff0c) Co-authored-by: Miguel Grinberg --- elasticsearch/__init__.py | 4 +- elasticsearch/_async/helpers.py | 2 +- elasticsearch/_sync/client/utils.py | 2 +- elasticsearch/dsl/__init__.py | 203 + elasticsearch/dsl/_async/__init__.py | 16 + elasticsearch/dsl/_async/document.py | 522 ++ elasticsearch/dsl/_async/faceted_search.py | 50 + elasticsearch/dsl/_async/index.py | 639 ++ elasticsearch/dsl/_async/mapping.py | 49 + elasticsearch/dsl/_async/search.py | 233 + elasticsearch/dsl/_async/update_by_query.py | 47 + elasticsearch/dsl/_sync/__init__.py | 16 + elasticsearch/dsl/_sync/document.py | 514 ++ elasticsearch/dsl/_sync/faceted_search.py | 50 + elasticsearch/dsl/_sync/index.py | 597 ++ elasticsearch/dsl/_sync/mapping.py | 49 + elasticsearch/dsl/_sync/search.py | 218 + elasticsearch/dsl/_sync/update_by_query.py | 45 + elasticsearch/dsl/aggs.py | 3730 ++++++++++ elasticsearch/dsl/analysis.py | 341 + elasticsearch/dsl/async_connections.py | 37 + elasticsearch/dsl/connections.py | 142 + elasticsearch/dsl/document.py | 20 + elasticsearch/dsl/document_base.py | 444 ++ elasticsearch/dsl/exceptions.py | 32 + elasticsearch/dsl/faceted_search.py | 28 + elasticsearch/dsl/faceted_search_base.py | 489 ++ elasticsearch/dsl/field.py | 587 ++ elasticsearch/dsl/function.py | 180 + elasticsearch/dsl/index.py | 23 + elasticsearch/dsl/index_base.py | 178 + elasticsearch/dsl/mapping.py | 19 + elasticsearch/dsl/mapping_base.py | 219 + elasticsearch/dsl/query.py | 2794 ++++++++ elasticsearch/dsl/response/__init__.py | 354 + elasticsearch/dsl/response/aggs.py | 100 + elasticsearch/dsl/response/hit.py | 53 + elasticsearch/dsl/search.py | 20 + elasticsearch/dsl/search_base.py | 1040 +++ elasticsearch/dsl/serializer.py | 34 + elasticsearch/dsl/types.py | 6273 +++++++++++++++++ elasticsearch/dsl/update_by_query.py | 19 + elasticsearch/dsl/update_by_query_base.py | 149 + elasticsearch/dsl/utils.py | 687 ++ elasticsearch/dsl/wrappers.py | 119 + elasticsearch/helpers/actions.py | 2 +- examples/dsl/README.rst | 47 + examples/dsl/alias_migration.py | 161 + examples/dsl/async/alias_migration.py | 162 + examples/dsl/async/completion.py | 114 + examples/dsl/async/composite_agg.py | 93 + examples/dsl/async/parent_child.py | 276 + examples/dsl/async/percolate.py | 117 + examples/dsl/async/search_as_you_type.py | 99 + examples/dsl/async/semantic_text.py | 148 + examples/dsl/async/sparse_vectors.py | 198 + examples/dsl/async/vectors.py | 187 + examples/dsl/completion.py | 113 + examples/dsl/composite_agg.py | 90 + examples/dsl/parent_child.py | 275 + examples/dsl/percolate.py | 116 + examples/dsl/search_as_you_type.py | 93 + examples/dsl/semantic_text.py | 147 + examples/dsl/sparse_vectors.py | 197 + examples/dsl/vectors.py | 186 + noxfile.py | 46 +- pyproject.toml | 21 +- setup.cfg | 2 +- test_elasticsearch/test_dsl/__init__.py | 16 + .../test_dsl/_async/__init__.py | 16 + .../test_dsl/_async/test_document.py | 883 +++ .../test_dsl/_async/test_faceted_search.py | 201 + .../test_dsl/_async/test_index.py | 197 + .../test_dsl/_async/test_mapping.py | 222 + .../test_dsl/_async/test_search.py | 841 +++ .../test_dsl/_async/test_update_by_query.py | 180 + test_elasticsearch/test_dsl/_sync/__init__.py | 16 + .../test_dsl/_sync/test_document.py | 883 +++ .../test_dsl/_sync/test_faceted_search.py | 201 + .../test_dsl/_sync/test_index.py | 190 + .../test_dsl/_sync/test_mapping.py | 222 + .../test_dsl/_sync/test_search.py | 831 +++ .../test_dsl/_sync/test_update_by_query.py | 180 + test_elasticsearch/test_dsl/async_sleep.py | 24 + test_elasticsearch/test_dsl/conftest.py | 466 ++ test_elasticsearch/test_dsl/sleep.py | 24 + test_elasticsearch/test_dsl/test_aggs.py | 530 ++ test_elasticsearch/test_dsl/test_analysis.py | 216 + .../test_dsl/test_connections.py | 143 + test_elasticsearch/test_dsl/test_field.py | 234 + .../test_dsl/test_integration/__init__.py | 16 + .../test_integration/_async/__init__.py | 16 + .../test_integration/_async/test_analysis.py | 54 + .../test_integration/_async/test_document.py | 852 +++ .../_async/test_faceted_search.py | 305 + .../test_integration/_async/test_index.py | 162 + .../test_integration/_async/test_mapping.py | 171 + .../test_integration/_async/test_search.py | 304 + .../_async/test_update_by_query.py | 85 + .../test_integration/_sync/__init__.py | 16 + .../test_integration/_sync/test_analysis.py | 54 + .../test_integration/_sync/test_document.py | 844 +++ .../_sync/test_faceted_search.py | 305 + .../test_integration/_sync/test_index.py | 160 + .../test_integration/_sync/test_mapping.py | 169 + .../test_integration/_sync/test_search.py | 294 + .../_sync/test_update_by_query.py | 85 + .../test_dsl/test_integration/test_count.py | 45 + .../test_dsl/test_integration/test_data.py | 1093 +++ .../test_examples/__init__.py | 16 + .../test_examples/_async/__init__.py | 16 + .../_async/test_alias_migration.py | 74 + .../test_examples/_async/test_completion.py | 40 + .../_async/test_composite_aggs.py | 57 + .../test_examples/_async/test_parent_child.py | 116 + .../test_examples/_async/test_percolate.py | 38 + .../test_examples/_async/test_vectors.py | 56 + .../test_examples/_sync/__init__.py | 16 + .../_sync/test_alias_migration.py | 74 + .../test_examples/_sync/test_completion.py | 40 + .../_sync/test_composite_aggs.py | 57 + .../test_examples/_sync/test_parent_child.py | 111 + .../test_examples/_sync/test_percolate.py | 38 + .../test_examples/_sync/test_vectors.py | 56 + .../test_examples/async_examples | 1 + .../test_integration/test_examples/examples | 1 + test_elasticsearch/test_dsl/test_package.py | 22 + test_elasticsearch/test_dsl/test_query.py | 671 ++ test_elasticsearch/test_dsl/test_result.py | 215 + test_elasticsearch/test_dsl/test_utils.py | 136 + .../test_dsl/test_validation.py | 162 + test_elasticsearch/test_dsl/test_wrappers.py | 111 + .../test_vectorstore/test_vectorstore.py | 4 +- utils/dsl-generator.py | 855 +++ utils/run-unasync-dsl.py | 151 + utils/templates/aggs.py.tpl | 320 + utils/templates/query.py.tpl | 373 + utils/templates/response.__init__.py.tpl | 225 + utils/templates/types.py.tpl | 107 + 139 files changed, 40147 insertions(+), 15 deletions(-) create mode 100644 elasticsearch/dsl/__init__.py create mode 100644 elasticsearch/dsl/_async/__init__.py create mode 100644 elasticsearch/dsl/_async/document.py create mode 100644 elasticsearch/dsl/_async/faceted_search.py create mode 100644 elasticsearch/dsl/_async/index.py create mode 100644 elasticsearch/dsl/_async/mapping.py create mode 100644 elasticsearch/dsl/_async/search.py create mode 100644 elasticsearch/dsl/_async/update_by_query.py create mode 100644 elasticsearch/dsl/_sync/__init__.py create mode 100644 elasticsearch/dsl/_sync/document.py create mode 100644 elasticsearch/dsl/_sync/faceted_search.py create mode 100644 elasticsearch/dsl/_sync/index.py create mode 100644 elasticsearch/dsl/_sync/mapping.py create mode 100644 elasticsearch/dsl/_sync/search.py create mode 100644 elasticsearch/dsl/_sync/update_by_query.py create mode 100644 elasticsearch/dsl/aggs.py create mode 100644 elasticsearch/dsl/analysis.py create mode 100644 elasticsearch/dsl/async_connections.py create mode 100644 elasticsearch/dsl/connections.py create mode 100644 elasticsearch/dsl/document.py create mode 100644 elasticsearch/dsl/document_base.py create mode 100644 elasticsearch/dsl/exceptions.py create mode 100644 elasticsearch/dsl/faceted_search.py create mode 100644 elasticsearch/dsl/faceted_search_base.py create mode 100644 elasticsearch/dsl/field.py create mode 100644 elasticsearch/dsl/function.py create mode 100644 elasticsearch/dsl/index.py create mode 100644 elasticsearch/dsl/index_base.py create mode 100644 elasticsearch/dsl/mapping.py create mode 100644 elasticsearch/dsl/mapping_base.py create mode 100644 elasticsearch/dsl/query.py create mode 100644 elasticsearch/dsl/response/__init__.py create mode 100644 elasticsearch/dsl/response/aggs.py create mode 100644 elasticsearch/dsl/response/hit.py create mode 100644 elasticsearch/dsl/search.py create mode 100644 elasticsearch/dsl/search_base.py create mode 100644 elasticsearch/dsl/serializer.py create mode 100644 elasticsearch/dsl/types.py create mode 100644 elasticsearch/dsl/update_by_query.py create mode 100644 elasticsearch/dsl/update_by_query_base.py create mode 100644 elasticsearch/dsl/utils.py create mode 100644 elasticsearch/dsl/wrappers.py create mode 100644 examples/dsl/README.rst create mode 100644 examples/dsl/alias_migration.py create mode 100644 examples/dsl/async/alias_migration.py create mode 100644 examples/dsl/async/completion.py create mode 100644 examples/dsl/async/composite_agg.py create mode 100644 examples/dsl/async/parent_child.py create mode 100644 examples/dsl/async/percolate.py create mode 100644 examples/dsl/async/search_as_you_type.py create mode 100644 examples/dsl/async/semantic_text.py create mode 100644 examples/dsl/async/sparse_vectors.py create mode 100644 examples/dsl/async/vectors.py create mode 100644 examples/dsl/completion.py create mode 100644 examples/dsl/composite_agg.py create mode 100644 examples/dsl/parent_child.py create mode 100644 examples/dsl/percolate.py create mode 100644 examples/dsl/search_as_you_type.py create mode 100644 examples/dsl/semantic_text.py create mode 100644 examples/dsl/sparse_vectors.py create mode 100644 examples/dsl/vectors.py create mode 100644 test_elasticsearch/test_dsl/__init__.py create mode 100644 test_elasticsearch/test_dsl/_async/__init__.py create mode 100644 test_elasticsearch/test_dsl/_async/test_document.py create mode 100644 test_elasticsearch/test_dsl/_async/test_faceted_search.py create mode 100644 test_elasticsearch/test_dsl/_async/test_index.py create mode 100644 test_elasticsearch/test_dsl/_async/test_mapping.py create mode 100644 test_elasticsearch/test_dsl/_async/test_search.py create mode 100644 test_elasticsearch/test_dsl/_async/test_update_by_query.py create mode 100644 test_elasticsearch/test_dsl/_sync/__init__.py create mode 100644 test_elasticsearch/test_dsl/_sync/test_document.py create mode 100644 test_elasticsearch/test_dsl/_sync/test_faceted_search.py create mode 100644 test_elasticsearch/test_dsl/_sync/test_index.py create mode 100644 test_elasticsearch/test_dsl/_sync/test_mapping.py create mode 100644 test_elasticsearch/test_dsl/_sync/test_search.py create mode 100644 test_elasticsearch/test_dsl/_sync/test_update_by_query.py create mode 100644 test_elasticsearch/test_dsl/async_sleep.py create mode 100644 test_elasticsearch/test_dsl/conftest.py create mode 100644 test_elasticsearch/test_dsl/sleep.py create mode 100644 test_elasticsearch/test_dsl/test_aggs.py create mode 100644 test_elasticsearch/test_dsl/test_analysis.py create mode 100644 test_elasticsearch/test_dsl/test_connections.py create mode 100644 test_elasticsearch/test_dsl/test_field.py create mode 100644 test_elasticsearch/test_dsl/test_integration/__init__.py create mode 100644 test_elasticsearch/test_dsl/test_integration/_async/__init__.py create mode 100644 test_elasticsearch/test_dsl/test_integration/_async/test_analysis.py create mode 100644 test_elasticsearch/test_dsl/test_integration/_async/test_document.py create mode 100644 test_elasticsearch/test_dsl/test_integration/_async/test_faceted_search.py create mode 100644 test_elasticsearch/test_dsl/test_integration/_async/test_index.py create mode 100644 test_elasticsearch/test_dsl/test_integration/_async/test_mapping.py create mode 100644 test_elasticsearch/test_dsl/test_integration/_async/test_search.py create mode 100644 test_elasticsearch/test_dsl/test_integration/_async/test_update_by_query.py create mode 100644 test_elasticsearch/test_dsl/test_integration/_sync/__init__.py create mode 100644 test_elasticsearch/test_dsl/test_integration/_sync/test_analysis.py create mode 100644 test_elasticsearch/test_dsl/test_integration/_sync/test_document.py create mode 100644 test_elasticsearch/test_dsl/test_integration/_sync/test_faceted_search.py create mode 100644 test_elasticsearch/test_dsl/test_integration/_sync/test_index.py create mode 100644 test_elasticsearch/test_dsl/test_integration/_sync/test_mapping.py create mode 100644 test_elasticsearch/test_dsl/test_integration/_sync/test_search.py create mode 100644 test_elasticsearch/test_dsl/test_integration/_sync/test_update_by_query.py create mode 100644 test_elasticsearch/test_dsl/test_integration/test_count.py create mode 100644 test_elasticsearch/test_dsl/test_integration/test_data.py create mode 100644 test_elasticsearch/test_dsl/test_integration/test_examples/__init__.py create mode 100644 test_elasticsearch/test_dsl/test_integration/test_examples/_async/__init__.py create mode 100644 test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_alias_migration.py create mode 100644 test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_completion.py create mode 100644 test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_composite_aggs.py create mode 100644 test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_parent_child.py create mode 100644 test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_percolate.py create mode 100644 test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_vectors.py create mode 100644 test_elasticsearch/test_dsl/test_integration/test_examples/_sync/__init__.py create mode 100644 test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_alias_migration.py create mode 100644 test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_completion.py create mode 100644 test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_composite_aggs.py create mode 100644 test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_parent_child.py create mode 100644 test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_percolate.py create mode 100644 test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_vectors.py create mode 120000 test_elasticsearch/test_dsl/test_integration/test_examples/async_examples create mode 120000 test_elasticsearch/test_dsl/test_integration/test_examples/examples create mode 100644 test_elasticsearch/test_dsl/test_package.py create mode 100644 test_elasticsearch/test_dsl/test_query.py create mode 100644 test_elasticsearch/test_dsl/test_result.py create mode 100644 test_elasticsearch/test_dsl/test_utils.py create mode 100644 test_elasticsearch/test_dsl/test_validation.py create mode 100644 test_elasticsearch/test_dsl/test_wrappers.py create mode 100644 utils/dsl-generator.py create mode 100644 utils/run-unasync-dsl.py create mode 100644 utils/templates/aggs.py.tpl create mode 100644 utils/templates/query.py.tpl create mode 100644 utils/templates/response.__init__.py.tpl create mode 100644 utils/templates/types.py.tpl diff --git a/elasticsearch/__init__.py b/elasticsearch/__init__.py index 723b3a2b7..c2277228a 100644 --- a/elasticsearch/__init__.py +++ b/elasticsearch/__init__.py @@ -27,7 +27,7 @@ from ._version import __versionstr__ # Ensure that a compatible version of elastic-transport is installed. -_version_groups = tuple(int(x) for x in re.search(r"^(\d+)\.(\d+)\.(\d+)", _elastic_transport_version).groups()) # type: ignore +_version_groups = tuple(int(x) for x in re.search(r"^(\d+)\.(\d+)\.(\d+)", _elastic_transport_version).groups()) # type: ignore[union-attr] if _version_groups < (8, 0, 0) or _version_groups > (9, 0, 0): raise ImportError( "An incompatible version of elastic-transport is installed. Must be between " @@ -35,7 +35,7 @@ "$ python -m pip install 'elastic-transport>=8, <9'" ) -_version_groups = re.search(r"^(\d+)\.(\d+)\.(\d+)", __versionstr__).groups() # type: ignore +_version_groups = re.search(r"^(\d+)\.(\d+)\.(\d+)", __versionstr__).groups() # type: ignore[assignment, union-attr] _major, _minor, _patch = (int(x) for x in _version_groups) VERSION = __version__ = (_major, _minor, _patch) diff --git a/elasticsearch/_async/helpers.py b/elasticsearch/_async/helpers.py index 1bc339917..4c53f0bbe 100644 --- a/elasticsearch/_async/helpers.py +++ b/elasticsearch/_async/helpers.py @@ -257,7 +257,7 @@ async def map_actions() -> AsyncIterable[_TYPE_BULK_ACTION_HEADER_AND_BODY]: ] ok: bool info: Dict[str, Any] - async for data, (ok, info) in azip( # type: ignore + async for data, (ok, info) in azip( # type: ignore[assignment, misc] bulk_data, _process_bulk_chunk( client, diff --git a/elasticsearch/_sync/client/utils.py b/elasticsearch/_sync/client/utils.py index c5ec21dae..9f957987c 100644 --- a/elasticsearch/_sync/client/utils.py +++ b/elasticsearch/_sync/client/utils.py @@ -232,7 +232,7 @@ def host_mapping_to_node_config(host: Mapping[str, Union[str, int]]) -> NodeConf ) options["path_prefix"] = options.pop("url_prefix") - return NodeConfig(**options) # type: ignore + return NodeConfig(**options) # type: ignore[arg-type] def cloud_id_to_node_configs(cloud_id: str) -> List[NodeConfig]: diff --git a/elasticsearch/dsl/__init__.py b/elasticsearch/dsl/__init__.py new file mode 100644 index 000000000..860e2b761 --- /dev/null +++ b/elasticsearch/dsl/__init__.py @@ -0,0 +1,203 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from . import async_connections, connections +from .aggs import A, Agg +from .analysis import analyzer, char_filter, normalizer, token_filter, tokenizer +from .document import AsyncDocument, Document +from .document_base import InnerDoc, M, MetaField, mapped_field +from .exceptions import ( + ElasticsearchDslException, + IllegalOperation, + UnknownDslObject, + ValidationException, +) +from .faceted_search import ( + AsyncFacetedSearch, + DateHistogramFacet, + Facet, + FacetedResponse, + FacetedSearch, + HistogramFacet, + NestedFacet, + RangeFacet, + TermsFacet, +) +from .field import ( + Binary, + Boolean, + Byte, + Completion, + ConstantKeyword, + CustomField, + Date, + DateRange, + DenseVector, + Double, + DoubleRange, + Field, + Float, + FloatRange, + GeoPoint, + GeoShape, + HalfFloat, + Integer, + IntegerRange, + Ip, + IpRange, + Join, + Keyword, + Long, + LongRange, + Murmur3, + Nested, + Object, + Percolator, + Point, + RangeField, + RankFeature, + RankFeatures, + ScaledFloat, + SearchAsYouType, + Shape, + Short, + SparseVector, + Text, + TokenCount, + construct_field, +) +from .function import SF +from .index import ( + AsyncComposableIndexTemplate, + AsyncIndex, + AsyncIndexTemplate, + ComposableIndexTemplate, + Index, + IndexTemplate, +) +from .mapping import AsyncMapping, Mapping +from .query import Q, Query +from .response import AggResponse, Response, UpdateByQueryResponse +from .search import ( + AsyncEmptySearch, + AsyncMultiSearch, + AsyncSearch, + EmptySearch, + MultiSearch, + Search, +) +from .update_by_query import AsyncUpdateByQuery, UpdateByQuery +from .utils import AttrDict, AttrList, DslBase +from .wrappers import Range + +__all__ = [ + "A", + "Agg", + "AggResponse", + "AsyncComposableIndexTemplate", + "AsyncDocument", + "AsyncEmptySearch", + "AsyncFacetedSearch", + "AsyncIndex", + "AsyncIndexTemplate", + "AsyncMapping", + "AsyncMultiSearch", + "AsyncSearch", + "AsyncUpdateByQuery", + "AttrDict", + "AttrList", + "Binary", + "Boolean", + "Byte", + "Completion", + "ComposableIndexTemplate", + "ConstantKeyword", + "CustomField", + "Date", + "DateHistogramFacet", + "DateRange", + "DenseVector", + "Document", + "Double", + "DoubleRange", + "DslBase", + "ElasticsearchDslException", + "EmptySearch", + "Facet", + "FacetedResponse", + "FacetedSearch", + "Field", + "Float", + "FloatRange", + "GeoPoint", + "GeoShape", + "HalfFloat", + "HistogramFacet", + "IllegalOperation", + "Index", + "IndexTemplate", + "InnerDoc", + "Integer", + "IntegerRange", + "Ip", + "IpRange", + "Join", + "Keyword", + "Long", + "LongRange", + "M", + "Mapping", + "MetaField", + "MultiSearch", + "Murmur3", + "Nested", + "NestedFacet", + "Object", + "Percolator", + "Point", + "Q", + "Query", + "Range", + "RangeFacet", + "RangeField", + "RankFeature", + "RankFeatures", + "Response", + "SF", + "ScaledFloat", + "Search", + "SearchAsYouType", + "Shape", + "Short", + "SparseVector", + "TermsFacet", + "Text", + "TokenCount", + "UnknownDslObject", + "UpdateByQuery", + "UpdateByQueryResponse", + "ValidationException", + "analyzer", + "async_connections", + "char_filter", + "connections", + "construct_field", + "mapped_field", + "normalizer", + "token_filter", + "tokenizer", +] diff --git a/elasticsearch/dsl/_async/__init__.py b/elasticsearch/dsl/_async/__init__.py new file mode 100644 index 000000000..2a87d183f --- /dev/null +++ b/elasticsearch/dsl/_async/__init__.py @@ -0,0 +1,16 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/elasticsearch/dsl/_async/document.py b/elasticsearch/dsl/_async/document.py new file mode 100644 index 000000000..4b7654761 --- /dev/null +++ b/elasticsearch/dsl/_async/document.py @@ -0,0 +1,522 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import collections.abc +from typing import ( + TYPE_CHECKING, + Any, + AsyncIterable, + Dict, + List, + Optional, + Tuple, + Union, + cast, +) + +from typing_extensions import Self, dataclass_transform + +from elasticsearch.exceptions import NotFoundError, RequestError +from elasticsearch.helpers import async_bulk + +from .._async.index import AsyncIndex +from ..async_connections import get_connection +from ..document_base import DocumentBase, DocumentMeta, mapped_field +from ..exceptions import IllegalOperation +from ..utils import DOC_META_FIELDS, META_FIELDS, AsyncUsingType, merge +from .search import AsyncSearch + +if TYPE_CHECKING: + from elasticsearch import AsyncElasticsearch + + +class AsyncIndexMeta(DocumentMeta): + _index: AsyncIndex + + # global flag to guard us from associating an Index with the base Document + # class, only user defined subclasses should have an _index attr + _document_initialized = False + + def __new__( + cls, name: str, bases: Tuple[type, ...], attrs: Dict[str, Any] + ) -> "AsyncIndexMeta": + new_cls = super().__new__(cls, name, bases, attrs) + if cls._document_initialized: + index_opts = attrs.pop("Index", None) + index = cls.construct_index(index_opts, bases) + new_cls._index = index + index.document(new_cls) + cls._document_initialized = True + return cast(AsyncIndexMeta, new_cls) + + @classmethod + def construct_index( + cls, opts: Dict[str, Any], bases: Tuple[type, ...] + ) -> AsyncIndex: + if opts is None: + for b in bases: + if hasattr(b, "_index"): + return b._index + + # Set None as Index name so it will set _all while making the query + return AsyncIndex(name=None) + + i = AsyncIndex( + getattr(opts, "name", "*"), using=getattr(opts, "using", "default") + ) + i.settings(**getattr(opts, "settings", {})) + i.aliases(**getattr(opts, "aliases", {})) + for a in getattr(opts, "analyzers", ()): + i.analyzer(a) + return i + + +@dataclass_transform(field_specifiers=(mapped_field,)) +class AsyncDocument(DocumentBase, metaclass=AsyncIndexMeta): + """ + Model-like class for persisting documents in elasticsearch. + """ + + if TYPE_CHECKING: + _index: AsyncIndex + + @classmethod + def _get_using(cls, using: Optional[AsyncUsingType] = None) -> AsyncUsingType: + return cast(AsyncUsingType, using or cls._index._using) + + @classmethod + def _get_connection( + cls, using: Optional[AsyncUsingType] = None + ) -> "AsyncElasticsearch": + return get_connection(cls._get_using(using)) + + @classmethod + async def init( + cls, index: Optional[str] = None, using: Optional[AsyncUsingType] = None + ) -> None: + """ + Create the index and populate the mappings in elasticsearch. + """ + i = cls._index + if index: + i = i.clone(name=index) + await i.save(using=using) + + @classmethod + def search( + cls, using: Optional[AsyncUsingType] = None, index: Optional[str] = None + ) -> AsyncSearch[Self]: + """ + Create an :class:`~elasticsearch.dsl.Search` instance that will search + over this ``Document``. + """ + return AsyncSearch( + using=cls._get_using(using), index=cls._default_index(index), doc_type=[cls] + ) + + @classmethod + async def get( + cls, + id: str, + using: Optional[AsyncUsingType] = None, + index: Optional[str] = None, + **kwargs: Any, + ) -> Optional[Self]: + """ + Retrieve a single document from elasticsearch using its ``id``. + + :arg id: ``id`` of the document to be retrieved + :arg index: elasticsearch index to use, if the ``Document`` is + associated with an index this can be omitted. + :arg using: connection alias to use, defaults to ``'default'`` + + Any additional keyword arguments will be passed to + ``Elasticsearch.get`` unchanged. + """ + es = cls._get_connection(using) + doc = await es.get(index=cls._default_index(index), id=id, **kwargs) + if not doc.get("found", False): + return None + return cls.from_es(doc) + + @classmethod + async def exists( + cls, + id: str, + using: Optional[AsyncUsingType] = None, + index: Optional[str] = None, + **kwargs: Any, + ) -> bool: + """ + check if exists a single document from elasticsearch using its ``id``. + + :arg id: ``id`` of the document to check if exists + :arg index: elasticsearch index to use, if the ``Document`` is + associated with an index this can be omitted. + :arg using: connection alias to use, defaults to ``'default'`` + + Any additional keyword arguments will be passed to + ``Elasticsearch.exists`` unchanged. + """ + es = cls._get_connection(using) + return bool(await es.exists(index=cls._default_index(index), id=id, **kwargs)) + + @classmethod + async def mget( + cls, + docs: List[Dict[str, Any]], + using: Optional[AsyncUsingType] = None, + index: Optional[str] = None, + raise_on_error: bool = True, + missing: str = "none", + **kwargs: Any, + ) -> List[Optional[Self]]: + r""" + Retrieve multiple document by their ``id``\s. Returns a list of instances + in the same order as requested. + + :arg docs: list of ``id``\s of the documents to be retrieved or a list + of document specifications as per + https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html + :arg index: elasticsearch index to use, if the ``Document`` is + associated with an index this can be omitted. + :arg using: connection alias to use, defaults to ``'default'`` + :arg missing: what to do when one of the documents requested is not + found. Valid options are ``'none'`` (use ``None``), ``'raise'`` (raise + ``NotFoundError``) or ``'skip'`` (ignore the missing document). + + Any additional keyword arguments will be passed to + ``Elasticsearch.mget`` unchanged. + """ + if missing not in ("raise", "skip", "none"): + raise ValueError("'missing' must be 'raise', 'skip', or 'none'.") + es = cls._get_connection(using) + body = { + "docs": [ + doc if isinstance(doc, collections.abc.Mapping) else {"_id": doc} + for doc in docs + ] + } + results = await es.mget(index=cls._default_index(index), body=body, **kwargs) + + objs: List[Optional[Self]] = [] + error_docs: List[Self] = [] + missing_docs: List[Self] = [] + for doc in results["docs"]: + if doc.get("found"): + if error_docs or missing_docs: + # We're going to raise an exception anyway, so avoid an + # expensive call to cls.from_es(). + continue + + objs.append(cls.from_es(doc)) + + elif doc.get("error"): + if raise_on_error: + error_docs.append(doc) + if missing == "none": + objs.append(None) + + # The doc didn't cause an error, but the doc also wasn't found. + elif missing == "raise": + missing_docs.append(doc) + elif missing == "none": + objs.append(None) + + if error_docs: + error_ids = [doc["_id"] for doc in error_docs] + message = "Required routing not provided for documents %s." + message %= ", ".join(error_ids) + raise RequestError(400, message, error_docs) # type: ignore[arg-type] + if missing_docs: + missing_ids = [doc["_id"] for doc in missing_docs] + message = f"Documents {', '.join(missing_ids)} not found." + raise NotFoundError(404, message, {"docs": missing_docs}) # type: ignore[arg-type] + return objs + + async def delete( + self, + using: Optional[AsyncUsingType] = None, + index: Optional[str] = None, + **kwargs: Any, + ) -> None: + """ + Delete the instance in elasticsearch. + + :arg index: elasticsearch index to use, if the ``Document`` is + associated with an index this can be omitted. + :arg using: connection alias to use, defaults to ``'default'`` + + Any additional keyword arguments will be passed to + ``Elasticsearch.delete`` unchanged. + """ + es = self._get_connection(using) + # extract routing etc from meta + doc_meta = {k: self.meta[k] for k in DOC_META_FIELDS if k in self.meta} + + # Optimistic concurrency control + if "seq_no" in self.meta and "primary_term" in self.meta: + doc_meta["if_seq_no"] = self.meta["seq_no"] + doc_meta["if_primary_term"] = self.meta["primary_term"] + + doc_meta.update(kwargs) + i = self._get_index(index) + assert i is not None + + await es.delete(index=i, **doc_meta) + + async def update( + self, + using: Optional[AsyncUsingType] = None, + index: Optional[str] = None, + detect_noop: bool = True, + doc_as_upsert: bool = False, + refresh: bool = False, + retry_on_conflict: Optional[int] = None, + script: Optional[Union[str, Dict[str, Any]]] = None, + script_id: Optional[str] = None, + scripted_upsert: bool = False, + upsert: Optional[Dict[str, Any]] = None, + return_doc_meta: bool = False, + **fields: Any, + ) -> Any: + """ + Partial update of the document, specify fields you wish to update and + both the instance and the document in elasticsearch will be updated:: + + doc = MyDocument(title='Document Title!') + doc.save() + doc.update(title='New Document Title!') + + :arg index: elasticsearch index to use, if the ``Document`` is + associated with an index this can be omitted. + :arg using: connection alias to use, defaults to ``'default'`` + :arg detect_noop: Set to ``False`` to disable noop detection. + :arg refresh: Control when the changes made by this request are visible + to search. Set to ``True`` for immediate effect. + :arg retry_on_conflict: In between the get and indexing phases of the + update, it is possible that another process might have already + updated the same document. By default, the update will fail with a + version conflict exception. The retry_on_conflict parameter + controls how many times to retry the update before finally throwing + an exception. + :arg doc_as_upsert: Instead of sending a partial doc plus an upsert + doc, setting doc_as_upsert to true will use the contents of doc as + the upsert value + :arg script: the source code of the script as a string, or a dictionary + with script attributes to update. + :arg return_doc_meta: set to ``True`` to return all metadata from the + index API call instead of only the operation result + + :return: operation result noop/updated + """ + body: Dict[str, Any] = { + "doc_as_upsert": doc_as_upsert, + "detect_noop": detect_noop, + } + + # scripted update + if script or script_id: + if upsert is not None: + body["upsert"] = upsert + + if script: + if isinstance(script, str): + script = {"source": script} + else: + script = {"id": script_id} + + if "params" not in script: + script["params"] = fields + else: + script["params"].update(fields) + + body["script"] = script + body["scripted_upsert"] = scripted_upsert + + # partial document update + else: + if not fields: + raise IllegalOperation( + "You cannot call update() without updating individual fields or a script. " + "If you wish to update the entire object use save()." + ) + + # update given fields locally + merge(self, fields) + + # prepare data for ES + values = self.to_dict(skip_empty=False) + + # if fields were given: partial update + body["doc"] = {k: values.get(k) for k in fields.keys()} + + # extract routing etc from meta + doc_meta = {k: self.meta[k] for k in DOC_META_FIELDS if k in self.meta} + + if retry_on_conflict is not None: + doc_meta["retry_on_conflict"] = retry_on_conflict + + # Optimistic concurrency control + if ( + retry_on_conflict in (None, 0) + and "seq_no" in self.meta + and "primary_term" in self.meta + ): + doc_meta["if_seq_no"] = self.meta["seq_no"] + doc_meta["if_primary_term"] = self.meta["primary_term"] + + i = self._get_index(index) + assert i is not None + + meta = await self._get_connection(using).update( + index=i, body=body, refresh=refresh, **doc_meta + ) + + # update meta information from ES + for k in META_FIELDS: + if "_" + k in meta: + setattr(self.meta, k, meta["_" + k]) + + return meta if return_doc_meta else meta["result"] + + async def save( + self, + using: Optional[AsyncUsingType] = None, + index: Optional[str] = None, + validate: bool = True, + skip_empty: bool = True, + return_doc_meta: bool = False, + **kwargs: Any, + ) -> Any: + """ + Save the document into elasticsearch. If the document doesn't exist it + is created, it is overwritten otherwise. Returns ``True`` if this + operations resulted in new document being created. + + :arg index: elasticsearch index to use, if the ``Document`` is + associated with an index this can be omitted. + :arg using: connection alias to use, defaults to ``'default'`` + :arg validate: set to ``False`` to skip validating the document + :arg skip_empty: if set to ``False`` will cause empty values (``None``, + ``[]``, ``{}``) to be left on the document. Those values will be + stripped out otherwise as they make no difference in elasticsearch. + :arg return_doc_meta: set to ``True`` to return all metadata from the + update API call instead of only the operation result + + Any additional keyword arguments will be passed to + ``Elasticsearch.index`` unchanged. + + :return: operation result created/updated + """ + if validate: + self.full_clean() + + es = self._get_connection(using) + # extract routing etc from meta + doc_meta = {k: self.meta[k] for k in DOC_META_FIELDS if k in self.meta} + + # Optimistic concurrency control + if "seq_no" in self.meta and "primary_term" in self.meta: + doc_meta["if_seq_no"] = self.meta["seq_no"] + doc_meta["if_primary_term"] = self.meta["primary_term"] + + doc_meta.update(kwargs) + i = self._get_index(index) + assert i is not None + + meta = await es.index( + index=i, + body=self.to_dict(skip_empty=skip_empty), + **doc_meta, + ) + # update meta information from ES + for k in META_FIELDS: + if "_" + k in meta: + setattr(self.meta, k, meta["_" + k]) + + return meta if return_doc_meta else meta["result"] + + @classmethod + async def bulk( + cls, + actions: AsyncIterable[Union[Self, Dict[str, Any]]], + using: Optional[AsyncUsingType] = None, + index: Optional[str] = None, + validate: bool = True, + skip_empty: bool = True, + **kwargs: Any, + ) -> Tuple[int, Union[int, List[Any]]]: + """ + Allows to perform multiple indexing operations in a single request. + + :arg actions: a generator that returns document instances to be indexed, + bulk operation dictionaries. + :arg using: connection alias to use, defaults to ``'default'`` + :arg index: Elasticsearch index to use, if the ``Document`` is + associated with an index this can be omitted. + :arg validate: set to ``False`` to skip validating the documents + :arg skip_empty: if set to ``False`` will cause empty values (``None``, + ``[]``, ``{}``) to be left on the document. Those values will be + stripped out otherwise as they make no difference in Elasticsearch. + + Any additional keyword arguments will be passed to + ``Elasticsearch.bulk`` unchanged. + + :return: bulk operation results + """ + es = cls._get_connection(using) + + i = cls._default_index(index) + assert i is not None + + class Generate: + def __init__( + self, + doc_iterator: AsyncIterable[Union[AsyncDocument, Dict[str, Any]]], + ): + self.doc_iterator = doc_iterator.__aiter__() + + def __aiter__(self) -> Self: + return self + + async def __anext__(self) -> Dict[str, Any]: + doc: Optional[Union[AsyncDocument, Dict[str, Any]]] = ( + await self.doc_iterator.__anext__() + ) + + if isinstance(doc, dict): + action = doc + doc = None + if "_source" in action and isinstance( + action["_source"], AsyncDocument + ): + doc = action["_source"] + if validate: # pragma: no cover + doc.full_clean() + action["_source"] = doc.to_dict( + include_meta=False, skip_empty=skip_empty + ) + elif doc is not None: + if validate: # pragma: no cover + doc.full_clean() + action = doc.to_dict(include_meta=True, skip_empty=skip_empty) + if "_index" not in action: + action["_index"] = i + return action + + return await async_bulk(es, Generate(actions), **kwargs) diff --git a/elasticsearch/dsl/_async/faceted_search.py b/elasticsearch/dsl/_async/faceted_search.py new file mode 100644 index 000000000..545392254 --- /dev/null +++ b/elasticsearch/dsl/_async/faceted_search.py @@ -0,0 +1,50 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import TYPE_CHECKING + +from ..faceted_search_base import FacetedResponse, FacetedSearchBase +from ..utils import _R +from .search import AsyncSearch + +if TYPE_CHECKING: + from ..response import Response + + +class AsyncFacetedSearch(FacetedSearchBase[_R]): + _s: AsyncSearch[_R] + + async def count(self) -> int: + return await self._s.count() + + def search(self) -> AsyncSearch[_R]: + """ + Returns the base Search object to which the facets are added. + + You can customize the query by overriding this method and returning a + modified search object. + """ + s = AsyncSearch[_R](doc_type=self.doc_types, index=self.index, using=self.using) + return s.response_class(FacetedResponse) + + async def execute(self) -> "Response[_R]": + """ + Execute the search and return the response. + """ + r = await self._s.execute() + r._faceted_search = self + return r diff --git a/elasticsearch/dsl/_async/index.py b/elasticsearch/dsl/_async/index.py new file mode 100644 index 000000000..58369579b --- /dev/null +++ b/elasticsearch/dsl/_async/index.py @@ -0,0 +1,639 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import TYPE_CHECKING, Any, Dict, Optional + +from typing_extensions import Self + +from ..async_connections import get_connection +from ..exceptions import IllegalOperation +from ..index_base import IndexBase +from ..utils import AsyncUsingType +from .mapping import AsyncMapping +from .search import AsyncSearch +from .update_by_query import AsyncUpdateByQuery + +if TYPE_CHECKING: + from elastic_transport import ObjectApiResponse + + from elasticsearch import AsyncElasticsearch + + +class AsyncIndexTemplate: + def __init__( + self, + name: str, + template: str, + index: Optional["AsyncIndex"] = None, + order: Optional[int] = None, + **kwargs: Any, + ): + if index is None: + self._index = AsyncIndex(template, **kwargs) + else: + if kwargs: + raise ValueError( + "You cannot specify options for Index when" + " passing an Index instance." + ) + self._index = index.clone() + self._index._name = template + self._template_name = name + self.order = order + + def __getattr__(self, attr_name: str) -> Any: + return getattr(self._index, attr_name) + + def to_dict(self) -> Dict[str, Any]: + d = self._index.to_dict() + d["index_patterns"] = [self._index._name] + if self.order is not None: + d["order"] = self.order + return d + + async def save( + self, using: Optional[AsyncUsingType] = None + ) -> "ObjectApiResponse[Any]": + es = get_connection(using or self._index._using) + return await es.indices.put_template( + name=self._template_name, body=self.to_dict() + ) + + +class AsyncComposableIndexTemplate: + def __init__( + self, + name: str, + template: str, + index: Optional["AsyncIndex"] = None, + priority: Optional[int] = None, + **kwargs: Any, + ): + if index is None: + self._index = AsyncIndex(template, **kwargs) + else: + if kwargs: + raise ValueError( + "You cannot specify options for Index when" + " passing an Index instance." + ) + self._index = index.clone() + self._index._name = template + self._template_name = name + self.priority = priority + + def __getattr__(self, attr_name: str) -> Any: + return getattr(self._index, attr_name) + + def to_dict(self) -> Dict[str, Any]: + d: Dict[str, Any] = {"template": self._index.to_dict()} + d["index_patterns"] = [self._index._name] + if self.priority is not None: + d["priority"] = self.priority + return d + + async def save( + self, using: Optional[AsyncUsingType] = None + ) -> "ObjectApiResponse[Any]": + es = get_connection(using or self._index._using) + return await es.indices.put_index_template( + name=self._template_name, **self.to_dict() + ) + + +class AsyncIndex(IndexBase): + _using: AsyncUsingType + + if TYPE_CHECKING: + + def get_or_create_mapping(self) -> AsyncMapping: ... + + def __init__(self, name: str, using: AsyncUsingType = "default"): + """ + :arg name: name of the index + :arg using: connection alias to use, defaults to ``'default'`` + """ + super().__init__(name, AsyncMapping, using=using) + + def _get_connection( + self, using: Optional[AsyncUsingType] = None + ) -> "AsyncElasticsearch": + if self._name is None: + raise ValueError("You cannot perform API calls on the default index.") + return get_connection(using or self._using) + + connection = property(_get_connection) + + def as_template( + self, + template_name: str, + pattern: Optional[str] = None, + order: Optional[int] = None, + ) -> AsyncIndexTemplate: + return AsyncIndexTemplate( + template_name, pattern or self._name, index=self, order=order + ) + + def as_composable_template( + self, + template_name: str, + pattern: Optional[str] = None, + priority: Optional[int] = None, + ) -> AsyncComposableIndexTemplate: + return AsyncComposableIndexTemplate( + template_name, pattern or self._name, index=self, priority=priority + ) + + async def load_mappings(self, using: Optional[AsyncUsingType] = None) -> None: + await self.get_or_create_mapping().update_from_es( + self._name, using=using or self._using + ) + + def clone( + self, name: Optional[str] = None, using: Optional[AsyncUsingType] = None + ) -> Self: + """ + Create a copy of the instance with another name or connection alias. + Useful for creating multiple indices with shared configuration:: + + i = Index('base-index') + i.settings(number_of_shards=1) + i.create() + + i2 = i.clone('other-index') + i2.create() + + :arg name: name of the index + :arg using: connection alias to use, defaults to ``'default'`` + """ + i = self.__class__(name or self._name, using=using or self._using) + i._settings = self._settings.copy() + i._aliases = self._aliases.copy() + i._analysis = self._analysis.copy() + i._doc_types = self._doc_types[:] + if self._mapping is not None: + i._mapping = self._mapping._clone() + return i + + def search(self, using: Optional[AsyncUsingType] = None) -> AsyncSearch: + """ + Return a :class:`~elasticsearch.dsl.Search` object searching over the + index (or all the indices belonging to this template) and its + ``Document``\\s. + """ + return AsyncSearch( + using=using or self._using, index=self._name, doc_type=self._doc_types + ) + + def updateByQuery( + self, using: Optional[AsyncUsingType] = None + ) -> AsyncUpdateByQuery: + """ + Return a :class:`~elasticsearch.dsl.UpdateByQuery` object searching over the index + (or all the indices belonging to this template) and updating Documents that match + the search criteria. + + For more information, see here: + https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html + """ + return AsyncUpdateByQuery( + using=using or self._using, + index=self._name, + ) + + async def create( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Creates the index in elasticsearch. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.create`` unchanged. + """ + return await self._get_connection(using).indices.create( + index=self._name, body=self.to_dict(), **kwargs + ) + + async def is_closed(self, using: Optional[AsyncUsingType] = None) -> bool: + state = await self._get_connection(using).cluster.state( + index=self._name, metric="metadata" + ) + return bool(state["metadata"]["indices"][self._name]["state"] == "close") + + async def save( + self, using: Optional[AsyncUsingType] = None + ) -> "Optional[ObjectApiResponse[Any]]": + """ + Sync the index definition with elasticsearch, creating the index if it + doesn't exist and updating its settings and mappings if it does. + + Note some settings and mapping changes cannot be done on an open + index (or at all on an existing index) and for those this method will + fail with the underlying exception. + """ + if not await self.exists(using=using): + return await self.create(using=using) + + body = self.to_dict() + settings = body.pop("settings", {}) + analysis = settings.pop("analysis", None) + current_settings = (await self.get_settings(using=using))[self._name][ + "settings" + ]["index"] + if analysis: + if await self.is_closed(using=using): + # closed index, update away + settings["analysis"] = analysis + else: + # compare analysis definition, if all analysis objects are + # already defined as requested, skip analysis update and + # proceed, otherwise raise IllegalOperation + existing_analysis = current_settings.get("analysis", {}) + if any( + existing_analysis.get(section, {}).get(k, None) + != analysis[section][k] + for section in analysis + for k in analysis[section] + ): + raise IllegalOperation( + "You cannot update analysis configuration on an open index, " + "you need to close index %s first." % self._name + ) + + # try and update the settings + if settings: + settings = settings.copy() + for k, v in list(settings.items()): + if k in current_settings and current_settings[k] == str(v): + del settings[k] + + if settings: + await self.put_settings(using=using, body=settings) + + # update the mappings, any conflict in the mappings will result in an + # exception + mappings = body.pop("mappings", {}) + if mappings: + return await self.put_mapping(using=using, body=mappings) + + return None + + async def analyze( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Perform the analysis process on a text and return the tokens breakdown + of the text. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.analyze`` unchanged. + """ + return await self._get_connection(using).indices.analyze( + index=self._name, **kwargs + ) + + async def refresh( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Performs a refresh operation on the index. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.refresh`` unchanged. + """ + return await self._get_connection(using).indices.refresh( + index=self._name, **kwargs + ) + + async def flush( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Performs a flush operation on the index. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.flush`` unchanged. + """ + return await self._get_connection(using).indices.flush( + index=self._name, **kwargs + ) + + async def get( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + The get index API allows to retrieve information about the index. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.get`` unchanged. + """ + return await self._get_connection(using).indices.get(index=self._name, **kwargs) + + async def open( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Opens the index in elasticsearch. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.open`` unchanged. + """ + return await self._get_connection(using).indices.open( + index=self._name, **kwargs + ) + + async def close( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Closes the index in elasticsearch. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.close`` unchanged. + """ + return await self._get_connection(using).indices.close( + index=self._name, **kwargs + ) + + async def delete( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Deletes the index in elasticsearch. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.delete`` unchanged. + """ + return await self._get_connection(using).indices.delete( + index=self._name, **kwargs + ) + + async def exists( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> bool: + """ + Returns ``True`` if the index already exists in elasticsearch. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.exists`` unchanged. + """ + return bool( + await self._get_connection(using).indices.exists(index=self._name, **kwargs) + ) + + async def put_mapping( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Register specific mapping definition for a specific type. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.put_mapping`` unchanged. + """ + return await self._get_connection(using).indices.put_mapping( + index=self._name, **kwargs + ) + + async def get_mapping( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Retrieve specific mapping definition for a specific type. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.get_mapping`` unchanged. + """ + return await self._get_connection(using).indices.get_mapping( + index=self._name, **kwargs + ) + + async def get_field_mapping( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Retrieve mapping definition of a specific field. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.get_field_mapping`` unchanged. + """ + return await self._get_connection(using).indices.get_field_mapping( + index=self._name, **kwargs + ) + + async def put_alias( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Create an alias for the index. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.put_alias`` unchanged. + """ + return await self._get_connection(using).indices.put_alias( + index=self._name, **kwargs + ) + + async def exists_alias( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> bool: + """ + Return a boolean indicating whether given alias exists for this index. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.exists_alias`` unchanged. + """ + return bool( + await self._get_connection(using).indices.exists_alias( + index=self._name, **kwargs + ) + ) + + async def get_alias( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Retrieve a specified alias. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.get_alias`` unchanged. + """ + return await self._get_connection(using).indices.get_alias( + index=self._name, **kwargs + ) + + async def delete_alias( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Delete specific alias. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.delete_alias`` unchanged. + """ + return await self._get_connection(using).indices.delete_alias( + index=self._name, **kwargs + ) + + async def get_settings( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Retrieve settings for the index. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.get_settings`` unchanged. + """ + return await self._get_connection(using).indices.get_settings( + index=self._name, **kwargs + ) + + async def put_settings( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Change specific index level settings in real time. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.put_settings`` unchanged. + """ + return await self._get_connection(using).indices.put_settings( + index=self._name, **kwargs + ) + + async def stats( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Retrieve statistics on different operations happening on the index. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.stats`` unchanged. + """ + return await self._get_connection(using).indices.stats( + index=self._name, **kwargs + ) + + async def segments( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Provide low level segments information that a Lucene index (shard + level) is built with. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.segments`` unchanged. + """ + return await self._get_connection(using).indices.segments( + index=self._name, **kwargs + ) + + async def validate_query( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Validate a potentially expensive query without executing it. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.validate_query`` unchanged. + """ + return await self._get_connection(using).indices.validate_query( + index=self._name, **kwargs + ) + + async def clear_cache( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Clear all caches or specific cached associated with the index. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.clear_cache`` unchanged. + """ + return await self._get_connection(using).indices.clear_cache( + index=self._name, **kwargs + ) + + async def recovery( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + The indices recovery API provides insight into on-going shard + recoveries for the index. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.recovery`` unchanged. + """ + return await self._get_connection(using).indices.recovery( + index=self._name, **kwargs + ) + + async def shard_stores( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Provides store information for shard copies of the index. Store + information reports on which nodes shard copies exist, the shard copy + version, indicating how recent they are, and any exceptions encountered + while opening the shard index or from earlier engine failure. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.shard_stores`` unchanged. + """ + return await self._get_connection(using).indices.shard_stores( + index=self._name, **kwargs + ) + + async def forcemerge( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + The force merge API allows to force merging of the index through an + API. The merge relates to the number of segments a Lucene index holds + within each shard. The force merge operation allows to reduce the + number of segments by merging them. + + This call will block until the merge is complete. If the http + connection is lost, the request will continue in the background, and + any new requests will block until the previous force merge is complete. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.forcemerge`` unchanged. + """ + return await self._get_connection(using).indices.forcemerge( + index=self._name, **kwargs + ) + + async def shrink( + self, using: Optional[AsyncUsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + The shrink index API allows you to shrink an existing index into a new + index with fewer primary shards. The number of primary shards in the + target index must be a factor of the shards in the source index. For + example an index with 8 primary shards can be shrunk into 4, 2 or 1 + primary shards or an index with 15 primary shards can be shrunk into 5, + 3 or 1. If the number of shards in the index is a prime number it can + only be shrunk into a single primary shard. Before shrinking, a + (primary or replica) copy of every shard in the index must be present + on the same node. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.shrink`` unchanged. + """ + return await self._get_connection(using).indices.shrink( + index=self._name, **kwargs + ) diff --git a/elasticsearch/dsl/_async/mapping.py b/elasticsearch/dsl/_async/mapping.py new file mode 100644 index 000000000..7ef9c6dac --- /dev/null +++ b/elasticsearch/dsl/_async/mapping.py @@ -0,0 +1,49 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import List, Optional, Union + +from typing_extensions import Self + +from ..async_connections import get_connection +from ..mapping_base import MappingBase +from ..utils import AsyncUsingType + + +class AsyncMapping(MappingBase): + @classmethod + async def from_es( + cls, index: Optional[Union[str, List[str]]], using: AsyncUsingType = "default" + ) -> Self: + m = cls() + await m.update_from_es(index, using) + return m + + async def update_from_es( + self, index: Optional[Union[str, List[str]]], using: AsyncUsingType = "default" + ) -> None: + es = get_connection(using) + raw = await es.indices.get_mapping(index=index) + _, raw = raw.popitem() + self._update_from_dict(raw["mappings"]) + + async def save(self, index: str, using: AsyncUsingType = "default") -> None: + from .index import AsyncIndex + + i = AsyncIndex(index, using=using) + i.mapping(self) + await i.save() diff --git a/elasticsearch/dsl/_async/search.py b/elasticsearch/dsl/_async/search.py new file mode 100644 index 000000000..42eb142fd --- /dev/null +++ b/elasticsearch/dsl/_async/search.py @@ -0,0 +1,233 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import contextlib +from typing import ( + TYPE_CHECKING, + Any, + AsyncIterator, + Dict, + Iterator, + List, + Optional, + cast, +) + +from typing_extensions import Self + +from elasticsearch.exceptions import ApiError +from elasticsearch.helpers import async_scan + +from ..async_connections import get_connection +from ..response import Response +from ..search_base import MultiSearchBase, SearchBase +from ..utils import _R, AsyncUsingType, AttrDict + + +class AsyncSearch(SearchBase[_R]): + _using: AsyncUsingType + + def __aiter__(self) -> AsyncIterator[_R]: + """ + Iterate over the hits. + """ + + class ResultsIterator(AsyncIterator[_R]): + def __init__(self, search: AsyncSearch[_R]): + self.search = search + self.iterator: Optional[Iterator[_R]] = None + + async def __anext__(self) -> _R: + if self.iterator is None: + self.iterator = iter(await self.search.execute()) + try: + return next(self.iterator) + except StopIteration: + raise StopAsyncIteration() + + return ResultsIterator(self) + + async def count(self) -> int: + """ + Return the number of hits matching the query and filters. Note that + only the actual number is returned. + """ + if hasattr(self, "_response") and self._response.hits.total.relation == "eq": # type: ignore[attr-defined] + return cast(int, self._response.hits.total.value) # type: ignore[attr-defined] + + es = get_connection(self._using) + + d = self.to_dict(count=True) + # TODO: failed shards detection + resp = await es.count( + index=self._index, + query=cast(Optional[Dict[str, Any]], d.get("query", None)), + **self._params, + ) + + return cast(int, resp["count"]) + + async def execute(self, ignore_cache: bool = False) -> Response[_R]: + """ + Execute the search and return an instance of ``Response`` wrapping all + the data. + + :arg ignore_cache: if set to ``True``, consecutive calls will hit + ES, while cached result will be ignored. Defaults to `False` + """ + if ignore_cache or not hasattr(self, "_response"): + es = get_connection(self._using) + + self._response = self._response_class( + self, + ( + await es.search( + index=self._index, body=self.to_dict(), **self._params + ) + ).body, + ) + return self._response + + async def scan(self) -> AsyncIterator[_R]: + """ + Turn the search into a scan search and return a generator that will + iterate over all the documents matching the query. + + Use ``params`` method to specify any additional arguments you with to + pass to the underlying ``scan`` helper from ``elasticsearch-py`` - + https://elasticsearch-py.readthedocs.io/en/master/helpers.html#elasticsearch.helpers.scan + + The ``iterate()`` method should be preferred, as it provides similar + functionality using an Elasticsearch point in time. + """ + es = get_connection(self._using) + + async for hit in async_scan( + es, query=self.to_dict(), index=self._index, **self._params + ): + yield self._get_result(cast(AttrDict[Any], hit)) + + async def delete(self) -> AttrDict[Any]: + """ + delete() executes the query by delegating to delete_by_query() + """ + + es = get_connection(self._using) + assert self._index is not None + + return AttrDict( + cast( + Dict[str, Any], + await es.delete_by_query( + index=self._index, body=self.to_dict(), **self._params + ), + ) + ) + + @contextlib.asynccontextmanager + async def point_in_time(self, keep_alive: str = "1m") -> AsyncIterator[Self]: + """ + Open a point in time (pit) that can be used across several searches. + + This method implements a context manager that returns a search object + configured to operate within the created pit. + + :arg keep_alive: the time to live for the point in time, renewed with each search request + """ + es = get_connection(self._using) + + pit = await es.open_point_in_time( + index=self._index or "*", keep_alive=keep_alive + ) + search = self.index().extra(pit={"id": pit["id"], "keep_alive": keep_alive}) + if not search._sort: + search = search.sort("_shard_doc") + yield search + await es.close_point_in_time(id=pit["id"]) + + async def iterate(self, keep_alive: str = "1m") -> AsyncIterator[_R]: + """ + Return a generator that iterates over all the documents matching the query. + + This method uses a point in time to provide consistent results even when + the index is changing. It should be preferred over ``scan()``. + + :arg keep_alive: the time to live for the point in time, renewed with each new search request + """ + async with self.point_in_time(keep_alive=keep_alive) as s: + while True: + r = await s.execute() + for hit in r: + yield hit + if len(r.hits) == 0: + break + s = s.search_after() + + +class AsyncMultiSearch(MultiSearchBase[_R]): + """ + Combine multiple :class:`~elasticsearch.dsl.Search` objects into a single + request. + """ + + _using: AsyncUsingType + + if TYPE_CHECKING: + + def add(self, search: AsyncSearch[_R]) -> Self: ... # type: ignore[override] + + async def execute( + self, ignore_cache: bool = False, raise_on_error: bool = True + ) -> List[Response[_R]]: + """ + Execute the multi search request and return a list of search results. + """ + if ignore_cache or not hasattr(self, "_response"): + es = get_connection(self._using) + + responses = await es.msearch( + index=self._index, body=self.to_dict(), **self._params + ) + + out: List[Response[_R]] = [] + for s, r in zip(self._searches, responses["responses"]): + if r.get("error", False): + if raise_on_error: + raise ApiError("N/A", meta=responses.meta, body=r) + r = None + else: + r = Response(s, r) + out.append(r) + + self._response = out + + return self._response + + +class AsyncEmptySearch(AsyncSearch[_R]): + async def count(self) -> int: + return 0 + + async def execute(self, ignore_cache: bool = False) -> Response[_R]: + return self._response_class(self, {"hits": {"total": 0, "hits": []}}) + + async def scan(self) -> AsyncIterator[_R]: + return + yield # a bit strange, but this forces an empty generator function + + async def delete(self) -> AttrDict[Any]: + return AttrDict[Any]({}) diff --git a/elasticsearch/dsl/_async/update_by_query.py b/elasticsearch/dsl/_async/update_by_query.py new file mode 100644 index 000000000..bff3aa947 --- /dev/null +++ b/elasticsearch/dsl/_async/update_by_query.py @@ -0,0 +1,47 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import TYPE_CHECKING + +from ..async_connections import get_connection +from ..update_by_query_base import UpdateByQueryBase +from ..utils import _R, AsyncUsingType + +if TYPE_CHECKING: + from ..response import UpdateByQueryResponse + + +class AsyncUpdateByQuery(UpdateByQueryBase[_R]): + _using: AsyncUsingType + + async def execute(self) -> "UpdateByQueryResponse[_R]": + """ + Execute the search and return an instance of ``Response`` wrapping all + the data. + """ + es = get_connection(self._using) + assert self._index is not None + + self._response = self._response_class( + self, + ( + await es.update_by_query( + index=self._index, **self.to_dict(), **self._params + ) + ).body, + ) + return self._response diff --git a/elasticsearch/dsl/_sync/__init__.py b/elasticsearch/dsl/_sync/__init__.py new file mode 100644 index 000000000..2a87d183f --- /dev/null +++ b/elasticsearch/dsl/_sync/__init__.py @@ -0,0 +1,16 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/elasticsearch/dsl/_sync/document.py b/elasticsearch/dsl/_sync/document.py new file mode 100644 index 000000000..316ece5cb --- /dev/null +++ b/elasticsearch/dsl/_sync/document.py @@ -0,0 +1,514 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import collections.abc +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Iterable, + List, + Optional, + Tuple, + Union, + cast, +) + +from typing_extensions import Self, dataclass_transform + +from elasticsearch.exceptions import NotFoundError, RequestError +from elasticsearch.helpers import bulk + +from .._sync.index import Index +from ..connections import get_connection +from ..document_base import DocumentBase, DocumentMeta, mapped_field +from ..exceptions import IllegalOperation +from ..utils import DOC_META_FIELDS, META_FIELDS, UsingType, merge +from .search import Search + +if TYPE_CHECKING: + from elasticsearch import Elasticsearch + + +class IndexMeta(DocumentMeta): + _index: Index + + # global flag to guard us from associating an Index with the base Document + # class, only user defined subclasses should have an _index attr + _document_initialized = False + + def __new__( + cls, name: str, bases: Tuple[type, ...], attrs: Dict[str, Any] + ) -> "IndexMeta": + new_cls = super().__new__(cls, name, bases, attrs) + if cls._document_initialized: + index_opts = attrs.pop("Index", None) + index = cls.construct_index(index_opts, bases) + new_cls._index = index + index.document(new_cls) + cls._document_initialized = True + return cast(IndexMeta, new_cls) + + @classmethod + def construct_index(cls, opts: Dict[str, Any], bases: Tuple[type, ...]) -> Index: + if opts is None: + for b in bases: + if hasattr(b, "_index"): + return b._index + + # Set None as Index name so it will set _all while making the query + return Index(name=None) + + i = Index(getattr(opts, "name", "*"), using=getattr(opts, "using", "default")) + i.settings(**getattr(opts, "settings", {})) + i.aliases(**getattr(opts, "aliases", {})) + for a in getattr(opts, "analyzers", ()): + i.analyzer(a) + return i + + +@dataclass_transform(field_specifiers=(mapped_field,)) +class Document(DocumentBase, metaclass=IndexMeta): + """ + Model-like class for persisting documents in elasticsearch. + """ + + if TYPE_CHECKING: + _index: Index + + @classmethod + def _get_using(cls, using: Optional[UsingType] = None) -> UsingType: + return cast(UsingType, using or cls._index._using) + + @classmethod + def _get_connection(cls, using: Optional[UsingType] = None) -> "Elasticsearch": + return get_connection(cls._get_using(using)) + + @classmethod + def init( + cls, index: Optional[str] = None, using: Optional[UsingType] = None + ) -> None: + """ + Create the index and populate the mappings in elasticsearch. + """ + i = cls._index + if index: + i = i.clone(name=index) + i.save(using=using) + + @classmethod + def search( + cls, using: Optional[UsingType] = None, index: Optional[str] = None + ) -> Search[Self]: + """ + Create an :class:`~elasticsearch.dsl.Search` instance that will search + over this ``Document``. + """ + return Search( + using=cls._get_using(using), index=cls._default_index(index), doc_type=[cls] + ) + + @classmethod + def get( + cls, + id: str, + using: Optional[UsingType] = None, + index: Optional[str] = None, + **kwargs: Any, + ) -> Optional[Self]: + """ + Retrieve a single document from elasticsearch using its ``id``. + + :arg id: ``id`` of the document to be retrieved + :arg index: elasticsearch index to use, if the ``Document`` is + associated with an index this can be omitted. + :arg using: connection alias to use, defaults to ``'default'`` + + Any additional keyword arguments will be passed to + ``Elasticsearch.get`` unchanged. + """ + es = cls._get_connection(using) + doc = es.get(index=cls._default_index(index), id=id, **kwargs) + if not doc.get("found", False): + return None + return cls.from_es(doc) + + @classmethod + def exists( + cls, + id: str, + using: Optional[UsingType] = None, + index: Optional[str] = None, + **kwargs: Any, + ) -> bool: + """ + check if exists a single document from elasticsearch using its ``id``. + + :arg id: ``id`` of the document to check if exists + :arg index: elasticsearch index to use, if the ``Document`` is + associated with an index this can be omitted. + :arg using: connection alias to use, defaults to ``'default'`` + + Any additional keyword arguments will be passed to + ``Elasticsearch.exists`` unchanged. + """ + es = cls._get_connection(using) + return bool(es.exists(index=cls._default_index(index), id=id, **kwargs)) + + @classmethod + def mget( + cls, + docs: List[Dict[str, Any]], + using: Optional[UsingType] = None, + index: Optional[str] = None, + raise_on_error: bool = True, + missing: str = "none", + **kwargs: Any, + ) -> List[Optional[Self]]: + r""" + Retrieve multiple document by their ``id``\s. Returns a list of instances + in the same order as requested. + + :arg docs: list of ``id``\s of the documents to be retrieved or a list + of document specifications as per + https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html + :arg index: elasticsearch index to use, if the ``Document`` is + associated with an index this can be omitted. + :arg using: connection alias to use, defaults to ``'default'`` + :arg missing: what to do when one of the documents requested is not + found. Valid options are ``'none'`` (use ``None``), ``'raise'`` (raise + ``NotFoundError``) or ``'skip'`` (ignore the missing document). + + Any additional keyword arguments will be passed to + ``Elasticsearch.mget`` unchanged. + """ + if missing not in ("raise", "skip", "none"): + raise ValueError("'missing' must be 'raise', 'skip', or 'none'.") + es = cls._get_connection(using) + body = { + "docs": [ + doc if isinstance(doc, collections.abc.Mapping) else {"_id": doc} + for doc in docs + ] + } + results = es.mget(index=cls._default_index(index), body=body, **kwargs) + + objs: List[Optional[Self]] = [] + error_docs: List[Self] = [] + missing_docs: List[Self] = [] + for doc in results["docs"]: + if doc.get("found"): + if error_docs or missing_docs: + # We're going to raise an exception anyway, so avoid an + # expensive call to cls.from_es(). + continue + + objs.append(cls.from_es(doc)) + + elif doc.get("error"): + if raise_on_error: + error_docs.append(doc) + if missing == "none": + objs.append(None) + + # The doc didn't cause an error, but the doc also wasn't found. + elif missing == "raise": + missing_docs.append(doc) + elif missing == "none": + objs.append(None) + + if error_docs: + error_ids = [doc["_id"] for doc in error_docs] + message = "Required routing not provided for documents %s." + message %= ", ".join(error_ids) + raise RequestError(400, message, error_docs) # type: ignore[arg-type] + if missing_docs: + missing_ids = [doc["_id"] for doc in missing_docs] + message = f"Documents {', '.join(missing_ids)} not found." + raise NotFoundError(404, message, {"docs": missing_docs}) # type: ignore[arg-type] + return objs + + def delete( + self, + using: Optional[UsingType] = None, + index: Optional[str] = None, + **kwargs: Any, + ) -> None: + """ + Delete the instance in elasticsearch. + + :arg index: elasticsearch index to use, if the ``Document`` is + associated with an index this can be omitted. + :arg using: connection alias to use, defaults to ``'default'`` + + Any additional keyword arguments will be passed to + ``Elasticsearch.delete`` unchanged. + """ + es = self._get_connection(using) + # extract routing etc from meta + doc_meta = {k: self.meta[k] for k in DOC_META_FIELDS if k in self.meta} + + # Optimistic concurrency control + if "seq_no" in self.meta and "primary_term" in self.meta: + doc_meta["if_seq_no"] = self.meta["seq_no"] + doc_meta["if_primary_term"] = self.meta["primary_term"] + + doc_meta.update(kwargs) + i = self._get_index(index) + assert i is not None + + es.delete(index=i, **doc_meta) + + def update( + self, + using: Optional[UsingType] = None, + index: Optional[str] = None, + detect_noop: bool = True, + doc_as_upsert: bool = False, + refresh: bool = False, + retry_on_conflict: Optional[int] = None, + script: Optional[Union[str, Dict[str, Any]]] = None, + script_id: Optional[str] = None, + scripted_upsert: bool = False, + upsert: Optional[Dict[str, Any]] = None, + return_doc_meta: bool = False, + **fields: Any, + ) -> Any: + """ + Partial update of the document, specify fields you wish to update and + both the instance and the document in elasticsearch will be updated:: + + doc = MyDocument(title='Document Title!') + doc.save() + doc.update(title='New Document Title!') + + :arg index: elasticsearch index to use, if the ``Document`` is + associated with an index this can be omitted. + :arg using: connection alias to use, defaults to ``'default'`` + :arg detect_noop: Set to ``False`` to disable noop detection. + :arg refresh: Control when the changes made by this request are visible + to search. Set to ``True`` for immediate effect. + :arg retry_on_conflict: In between the get and indexing phases of the + update, it is possible that another process might have already + updated the same document. By default, the update will fail with a + version conflict exception. The retry_on_conflict parameter + controls how many times to retry the update before finally throwing + an exception. + :arg doc_as_upsert: Instead of sending a partial doc plus an upsert + doc, setting doc_as_upsert to true will use the contents of doc as + the upsert value + :arg script: the source code of the script as a string, or a dictionary + with script attributes to update. + :arg return_doc_meta: set to ``True`` to return all metadata from the + index API call instead of only the operation result + + :return: operation result noop/updated + """ + body: Dict[str, Any] = { + "doc_as_upsert": doc_as_upsert, + "detect_noop": detect_noop, + } + + # scripted update + if script or script_id: + if upsert is not None: + body["upsert"] = upsert + + if script: + if isinstance(script, str): + script = {"source": script} + else: + script = {"id": script_id} + + if "params" not in script: + script["params"] = fields + else: + script["params"].update(fields) + + body["script"] = script + body["scripted_upsert"] = scripted_upsert + + # partial document update + else: + if not fields: + raise IllegalOperation( + "You cannot call update() without updating individual fields or a script. " + "If you wish to update the entire object use save()." + ) + + # update given fields locally + merge(self, fields) + + # prepare data for ES + values = self.to_dict(skip_empty=False) + + # if fields were given: partial update + body["doc"] = {k: values.get(k) for k in fields.keys()} + + # extract routing etc from meta + doc_meta = {k: self.meta[k] for k in DOC_META_FIELDS if k in self.meta} + + if retry_on_conflict is not None: + doc_meta["retry_on_conflict"] = retry_on_conflict + + # Optimistic concurrency control + if ( + retry_on_conflict in (None, 0) + and "seq_no" in self.meta + and "primary_term" in self.meta + ): + doc_meta["if_seq_no"] = self.meta["seq_no"] + doc_meta["if_primary_term"] = self.meta["primary_term"] + + i = self._get_index(index) + assert i is not None + + meta = self._get_connection(using).update( + index=i, body=body, refresh=refresh, **doc_meta + ) + + # update meta information from ES + for k in META_FIELDS: + if "_" + k in meta: + setattr(self.meta, k, meta["_" + k]) + + return meta if return_doc_meta else meta["result"] + + def save( + self, + using: Optional[UsingType] = None, + index: Optional[str] = None, + validate: bool = True, + skip_empty: bool = True, + return_doc_meta: bool = False, + **kwargs: Any, + ) -> Any: + """ + Save the document into elasticsearch. If the document doesn't exist it + is created, it is overwritten otherwise. Returns ``True`` if this + operations resulted in new document being created. + + :arg index: elasticsearch index to use, if the ``Document`` is + associated with an index this can be omitted. + :arg using: connection alias to use, defaults to ``'default'`` + :arg validate: set to ``False`` to skip validating the document + :arg skip_empty: if set to ``False`` will cause empty values (``None``, + ``[]``, ``{}``) to be left on the document. Those values will be + stripped out otherwise as they make no difference in elasticsearch. + :arg return_doc_meta: set to ``True`` to return all metadata from the + update API call instead of only the operation result + + Any additional keyword arguments will be passed to + ``Elasticsearch.index`` unchanged. + + :return: operation result created/updated + """ + if validate: + self.full_clean() + + es = self._get_connection(using) + # extract routing etc from meta + doc_meta = {k: self.meta[k] for k in DOC_META_FIELDS if k in self.meta} + + # Optimistic concurrency control + if "seq_no" in self.meta and "primary_term" in self.meta: + doc_meta["if_seq_no"] = self.meta["seq_no"] + doc_meta["if_primary_term"] = self.meta["primary_term"] + + doc_meta.update(kwargs) + i = self._get_index(index) + assert i is not None + + meta = es.index( + index=i, + body=self.to_dict(skip_empty=skip_empty), + **doc_meta, + ) + # update meta information from ES + for k in META_FIELDS: + if "_" + k in meta: + setattr(self.meta, k, meta["_" + k]) + + return meta if return_doc_meta else meta["result"] + + @classmethod + def bulk( + cls, + actions: Iterable[Union[Self, Dict[str, Any]]], + using: Optional[UsingType] = None, + index: Optional[str] = None, + validate: bool = True, + skip_empty: bool = True, + **kwargs: Any, + ) -> Tuple[int, Union[int, List[Any]]]: + """ + Allows to perform multiple indexing operations in a single request. + + :arg actions: a generator that returns document instances to be indexed, + bulk operation dictionaries. + :arg using: connection alias to use, defaults to ``'default'`` + :arg index: Elasticsearch index to use, if the ``Document`` is + associated with an index this can be omitted. + :arg validate: set to ``False`` to skip validating the documents + :arg skip_empty: if set to ``False`` will cause empty values (``None``, + ``[]``, ``{}``) to be left on the document. Those values will be + stripped out otherwise as they make no difference in Elasticsearch. + + Any additional keyword arguments will be passed to + ``Elasticsearch.bulk`` unchanged. + + :return: bulk operation results + """ + es = cls._get_connection(using) + + i = cls._default_index(index) + assert i is not None + + class Generate: + def __init__( + self, + doc_iterator: Iterable[Union[Document, Dict[str, Any]]], + ): + self.doc_iterator = doc_iterator.__iter__() + + def __iter__(self) -> Self: + return self + + def __next__(self) -> Dict[str, Any]: + doc: Optional[Union[Document, Dict[str, Any]]] = ( + self.doc_iterator.__next__() + ) + + if isinstance(doc, dict): + action = doc + doc = None + if "_source" in action and isinstance(action["_source"], Document): + doc = action["_source"] + if validate: # pragma: no cover + doc.full_clean() + action["_source"] = doc.to_dict( + include_meta=False, skip_empty=skip_empty + ) + elif doc is not None: + if validate: # pragma: no cover + doc.full_clean() + action = doc.to_dict(include_meta=True, skip_empty=skip_empty) + if "_index" not in action: + action["_index"] = i + return action + + return bulk(es, Generate(actions), **kwargs) diff --git a/elasticsearch/dsl/_sync/faceted_search.py b/elasticsearch/dsl/_sync/faceted_search.py new file mode 100644 index 000000000..4bdac90de --- /dev/null +++ b/elasticsearch/dsl/_sync/faceted_search.py @@ -0,0 +1,50 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import TYPE_CHECKING + +from ..faceted_search_base import FacetedResponse, FacetedSearchBase +from ..utils import _R +from .search import Search + +if TYPE_CHECKING: + from ..response import Response + + +class FacetedSearch(FacetedSearchBase[_R]): + _s: Search[_R] + + def count(self) -> int: + return self._s.count() + + def search(self) -> Search[_R]: + """ + Returns the base Search object to which the facets are added. + + You can customize the query by overriding this method and returning a + modified search object. + """ + s = Search[_R](doc_type=self.doc_types, index=self.index, using=self.using) + return s.response_class(FacetedResponse) + + def execute(self) -> "Response[_R]": + """ + Execute the search and return the response. + """ + r = self._s.execute() + r._faceted_search = self + return r diff --git a/elasticsearch/dsl/_sync/index.py b/elasticsearch/dsl/_sync/index.py new file mode 100644 index 000000000..b2d5830d9 --- /dev/null +++ b/elasticsearch/dsl/_sync/index.py @@ -0,0 +1,597 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import TYPE_CHECKING, Any, Dict, Optional + +from typing_extensions import Self + +from ..connections import get_connection +from ..exceptions import IllegalOperation +from ..index_base import IndexBase +from ..utils import UsingType +from .mapping import Mapping +from .search import Search +from .update_by_query import UpdateByQuery + +if TYPE_CHECKING: + from elastic_transport import ObjectApiResponse + + from elasticsearch import Elasticsearch + + +class IndexTemplate: + def __init__( + self, + name: str, + template: str, + index: Optional["Index"] = None, + order: Optional[int] = None, + **kwargs: Any, + ): + if index is None: + self._index = Index(template, **kwargs) + else: + if kwargs: + raise ValueError( + "You cannot specify options for Index when" + " passing an Index instance." + ) + self._index = index.clone() + self._index._name = template + self._template_name = name + self.order = order + + def __getattr__(self, attr_name: str) -> Any: + return getattr(self._index, attr_name) + + def to_dict(self) -> Dict[str, Any]: + d = self._index.to_dict() + d["index_patterns"] = [self._index._name] + if self.order is not None: + d["order"] = self.order + return d + + def save(self, using: Optional[UsingType] = None) -> "ObjectApiResponse[Any]": + es = get_connection(using or self._index._using) + return es.indices.put_template(name=self._template_name, body=self.to_dict()) + + +class ComposableIndexTemplate: + def __init__( + self, + name: str, + template: str, + index: Optional["Index"] = None, + priority: Optional[int] = None, + **kwargs: Any, + ): + if index is None: + self._index = Index(template, **kwargs) + else: + if kwargs: + raise ValueError( + "You cannot specify options for Index when" + " passing an Index instance." + ) + self._index = index.clone() + self._index._name = template + self._template_name = name + self.priority = priority + + def __getattr__(self, attr_name: str) -> Any: + return getattr(self._index, attr_name) + + def to_dict(self) -> Dict[str, Any]: + d: Dict[str, Any] = {"template": self._index.to_dict()} + d["index_patterns"] = [self._index._name] + if self.priority is not None: + d["priority"] = self.priority + return d + + def save(self, using: Optional[UsingType] = None) -> "ObjectApiResponse[Any]": + es = get_connection(using or self._index._using) + return es.indices.put_index_template(name=self._template_name, **self.to_dict()) + + +class Index(IndexBase): + _using: UsingType + + if TYPE_CHECKING: + + def get_or_create_mapping(self) -> Mapping: ... + + def __init__(self, name: str, using: UsingType = "default"): + """ + :arg name: name of the index + :arg using: connection alias to use, defaults to ``'default'`` + """ + super().__init__(name, Mapping, using=using) + + def _get_connection(self, using: Optional[UsingType] = None) -> "Elasticsearch": + if self._name is None: + raise ValueError("You cannot perform API calls on the default index.") + return get_connection(using or self._using) + + connection = property(_get_connection) + + def as_template( + self, + template_name: str, + pattern: Optional[str] = None, + order: Optional[int] = None, + ) -> IndexTemplate: + return IndexTemplate( + template_name, pattern or self._name, index=self, order=order + ) + + def as_composable_template( + self, + template_name: str, + pattern: Optional[str] = None, + priority: Optional[int] = None, + ) -> ComposableIndexTemplate: + return ComposableIndexTemplate( + template_name, pattern or self._name, index=self, priority=priority + ) + + def load_mappings(self, using: Optional[UsingType] = None) -> None: + self.get_or_create_mapping().update_from_es( + self._name, using=using or self._using + ) + + def clone( + self, name: Optional[str] = None, using: Optional[UsingType] = None + ) -> Self: + """ + Create a copy of the instance with another name or connection alias. + Useful for creating multiple indices with shared configuration:: + + i = Index('base-index') + i.settings(number_of_shards=1) + i.create() + + i2 = i.clone('other-index') + i2.create() + + :arg name: name of the index + :arg using: connection alias to use, defaults to ``'default'`` + """ + i = self.__class__(name or self._name, using=using or self._using) + i._settings = self._settings.copy() + i._aliases = self._aliases.copy() + i._analysis = self._analysis.copy() + i._doc_types = self._doc_types[:] + if self._mapping is not None: + i._mapping = self._mapping._clone() + return i + + def search(self, using: Optional[UsingType] = None) -> Search: + """ + Return a :class:`~elasticsearch.dsl.Search` object searching over the + index (or all the indices belonging to this template) and its + ``Document``\\s. + """ + return Search( + using=using or self._using, index=self._name, doc_type=self._doc_types + ) + + def updateByQuery(self, using: Optional[UsingType] = None) -> UpdateByQuery: + """ + Return a :class:`~elasticsearch.dsl.UpdateByQuery` object searching over the index + (or all the indices belonging to this template) and updating Documents that match + the search criteria. + + For more information, see here: + https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html + """ + return UpdateByQuery( + using=using or self._using, + index=self._name, + ) + + def create( + self, using: Optional[UsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Creates the index in elasticsearch. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.create`` unchanged. + """ + return self._get_connection(using).indices.create( + index=self._name, body=self.to_dict(), **kwargs + ) + + def is_closed(self, using: Optional[UsingType] = None) -> bool: + state = self._get_connection(using).cluster.state( + index=self._name, metric="metadata" + ) + return bool(state["metadata"]["indices"][self._name]["state"] == "close") + + def save( + self, using: Optional[UsingType] = None + ) -> "Optional[ObjectApiResponse[Any]]": + """ + Sync the index definition with elasticsearch, creating the index if it + doesn't exist and updating its settings and mappings if it does. + + Note some settings and mapping changes cannot be done on an open + index (or at all on an existing index) and for those this method will + fail with the underlying exception. + """ + if not self.exists(using=using): + return self.create(using=using) + + body = self.to_dict() + settings = body.pop("settings", {}) + analysis = settings.pop("analysis", None) + current_settings = (self.get_settings(using=using))[self._name]["settings"][ + "index" + ] + if analysis: + if self.is_closed(using=using): + # closed index, update away + settings["analysis"] = analysis + else: + # compare analysis definition, if all analysis objects are + # already defined as requested, skip analysis update and + # proceed, otherwise raise IllegalOperation + existing_analysis = current_settings.get("analysis", {}) + if any( + existing_analysis.get(section, {}).get(k, None) + != analysis[section][k] + for section in analysis + for k in analysis[section] + ): + raise IllegalOperation( + "You cannot update analysis configuration on an open index, " + "you need to close index %s first." % self._name + ) + + # try and update the settings + if settings: + settings = settings.copy() + for k, v in list(settings.items()): + if k in current_settings and current_settings[k] == str(v): + del settings[k] + + if settings: + self.put_settings(using=using, body=settings) + + # update the mappings, any conflict in the mappings will result in an + # exception + mappings = body.pop("mappings", {}) + if mappings: + return self.put_mapping(using=using, body=mappings) + + return None + + def analyze( + self, using: Optional[UsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Perform the analysis process on a text and return the tokens breakdown + of the text. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.analyze`` unchanged. + """ + return self._get_connection(using).indices.analyze(index=self._name, **kwargs) + + def refresh( + self, using: Optional[UsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Performs a refresh operation on the index. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.refresh`` unchanged. + """ + return self._get_connection(using).indices.refresh(index=self._name, **kwargs) + + def flush( + self, using: Optional[UsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Performs a flush operation on the index. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.flush`` unchanged. + """ + return self._get_connection(using).indices.flush(index=self._name, **kwargs) + + def get( + self, using: Optional[UsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + The get index API allows to retrieve information about the index. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.get`` unchanged. + """ + return self._get_connection(using).indices.get(index=self._name, **kwargs) + + def open( + self, using: Optional[UsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Opens the index in elasticsearch. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.open`` unchanged. + """ + return self._get_connection(using).indices.open(index=self._name, **kwargs) + + def close( + self, using: Optional[UsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Closes the index in elasticsearch. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.close`` unchanged. + """ + return self._get_connection(using).indices.close(index=self._name, **kwargs) + + def delete( + self, using: Optional[UsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Deletes the index in elasticsearch. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.delete`` unchanged. + """ + return self._get_connection(using).indices.delete(index=self._name, **kwargs) + + def exists(self, using: Optional[UsingType] = None, **kwargs: Any) -> bool: + """ + Returns ``True`` if the index already exists in elasticsearch. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.exists`` unchanged. + """ + return bool( + self._get_connection(using).indices.exists(index=self._name, **kwargs) + ) + + def put_mapping( + self, using: Optional[UsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Register specific mapping definition for a specific type. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.put_mapping`` unchanged. + """ + return self._get_connection(using).indices.put_mapping( + index=self._name, **kwargs + ) + + def get_mapping( + self, using: Optional[UsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Retrieve specific mapping definition for a specific type. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.get_mapping`` unchanged. + """ + return self._get_connection(using).indices.get_mapping( + index=self._name, **kwargs + ) + + def get_field_mapping( + self, using: Optional[UsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Retrieve mapping definition of a specific field. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.get_field_mapping`` unchanged. + """ + return self._get_connection(using).indices.get_field_mapping( + index=self._name, **kwargs + ) + + def put_alias( + self, using: Optional[UsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Create an alias for the index. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.put_alias`` unchanged. + """ + return self._get_connection(using).indices.put_alias(index=self._name, **kwargs) + + def exists_alias(self, using: Optional[UsingType] = None, **kwargs: Any) -> bool: + """ + Return a boolean indicating whether given alias exists for this index. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.exists_alias`` unchanged. + """ + return bool( + self._get_connection(using).indices.exists_alias(index=self._name, **kwargs) + ) + + def get_alias( + self, using: Optional[UsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Retrieve a specified alias. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.get_alias`` unchanged. + """ + return self._get_connection(using).indices.get_alias(index=self._name, **kwargs) + + def delete_alias( + self, using: Optional[UsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Delete specific alias. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.delete_alias`` unchanged. + """ + return self._get_connection(using).indices.delete_alias( + index=self._name, **kwargs + ) + + def get_settings( + self, using: Optional[UsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Retrieve settings for the index. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.get_settings`` unchanged. + """ + return self._get_connection(using).indices.get_settings( + index=self._name, **kwargs + ) + + def put_settings( + self, using: Optional[UsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Change specific index level settings in real time. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.put_settings`` unchanged. + """ + return self._get_connection(using).indices.put_settings( + index=self._name, **kwargs + ) + + def stats( + self, using: Optional[UsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Retrieve statistics on different operations happening on the index. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.stats`` unchanged. + """ + return self._get_connection(using).indices.stats(index=self._name, **kwargs) + + def segments( + self, using: Optional[UsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Provide low level segments information that a Lucene index (shard + level) is built with. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.segments`` unchanged. + """ + return self._get_connection(using).indices.segments(index=self._name, **kwargs) + + def validate_query( + self, using: Optional[UsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Validate a potentially expensive query without executing it. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.validate_query`` unchanged. + """ + return self._get_connection(using).indices.validate_query( + index=self._name, **kwargs + ) + + def clear_cache( + self, using: Optional[UsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Clear all caches or specific cached associated with the index. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.clear_cache`` unchanged. + """ + return self._get_connection(using).indices.clear_cache( + index=self._name, **kwargs + ) + + def recovery( + self, using: Optional[UsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + The indices recovery API provides insight into on-going shard + recoveries for the index. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.recovery`` unchanged. + """ + return self._get_connection(using).indices.recovery(index=self._name, **kwargs) + + def shard_stores( + self, using: Optional[UsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + Provides store information for shard copies of the index. Store + information reports on which nodes shard copies exist, the shard copy + version, indicating how recent they are, and any exceptions encountered + while opening the shard index or from earlier engine failure. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.shard_stores`` unchanged. + """ + return self._get_connection(using).indices.shard_stores( + index=self._name, **kwargs + ) + + def forcemerge( + self, using: Optional[UsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + The force merge API allows to force merging of the index through an + API. The merge relates to the number of segments a Lucene index holds + within each shard. The force merge operation allows to reduce the + number of segments by merging them. + + This call will block until the merge is complete. If the http + connection is lost, the request will continue in the background, and + any new requests will block until the previous force merge is complete. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.forcemerge`` unchanged. + """ + return self._get_connection(using).indices.forcemerge( + index=self._name, **kwargs + ) + + def shrink( + self, using: Optional[UsingType] = None, **kwargs: Any + ) -> "ObjectApiResponse[Any]": + """ + The shrink index API allows you to shrink an existing index into a new + index with fewer primary shards. The number of primary shards in the + target index must be a factor of the shards in the source index. For + example an index with 8 primary shards can be shrunk into 4, 2 or 1 + primary shards or an index with 15 primary shards can be shrunk into 5, + 3 or 1. If the number of shards in the index is a prime number it can + only be shrunk into a single primary shard. Before shrinking, a + (primary or replica) copy of every shard in the index must be present + on the same node. + + Any additional keyword arguments will be passed to + ``Elasticsearch.indices.shrink`` unchanged. + """ + return self._get_connection(using).indices.shrink(index=self._name, **kwargs) diff --git a/elasticsearch/dsl/_sync/mapping.py b/elasticsearch/dsl/_sync/mapping.py new file mode 100644 index 000000000..4ee0f282a --- /dev/null +++ b/elasticsearch/dsl/_sync/mapping.py @@ -0,0 +1,49 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import List, Optional, Union + +from typing_extensions import Self + +from ..connections import get_connection +from ..mapping_base import MappingBase +from ..utils import UsingType + + +class Mapping(MappingBase): + @classmethod + def from_es( + cls, index: Optional[Union[str, List[str]]], using: UsingType = "default" + ) -> Self: + m = cls() + m.update_from_es(index, using) + return m + + def update_from_es( + self, index: Optional[Union[str, List[str]]], using: UsingType = "default" + ) -> None: + es = get_connection(using) + raw = es.indices.get_mapping(index=index) + _, raw = raw.popitem() + self._update_from_dict(raw["mappings"]) + + def save(self, index: str, using: UsingType = "default") -> None: + from .index import Index + + i = Index(index, using=using) + i.mapping(self) + i.save() diff --git a/elasticsearch/dsl/_sync/search.py b/elasticsearch/dsl/_sync/search.py new file mode 100644 index 000000000..f46364a67 --- /dev/null +++ b/elasticsearch/dsl/_sync/search.py @@ -0,0 +1,218 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import contextlib +from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, cast + +from typing_extensions import Self + +from elasticsearch.exceptions import ApiError +from elasticsearch.helpers import scan + +from ..connections import get_connection +from ..response import Response +from ..search_base import MultiSearchBase, SearchBase +from ..utils import _R, AttrDict, UsingType + + +class Search(SearchBase[_R]): + _using: UsingType + + def __iter__(self) -> Iterator[_R]: + """ + Iterate over the hits. + """ + + class ResultsIterator(Iterator[_R]): + def __init__(self, search: Search[_R]): + self.search = search + self.iterator: Optional[Iterator[_R]] = None + + def __next__(self) -> _R: + if self.iterator is None: + self.iterator = iter(self.search.execute()) + try: + return next(self.iterator) + except StopIteration: + raise StopIteration() + + return ResultsIterator(self) + + def count(self) -> int: + """ + Return the number of hits matching the query and filters. Note that + only the actual number is returned. + """ + if hasattr(self, "_response") and self._response.hits.total.relation == "eq": # type: ignore[attr-defined] + return cast(int, self._response.hits.total.value) # type: ignore[attr-defined] + + es = get_connection(self._using) + + d = self.to_dict(count=True) + # TODO: failed shards detection + resp = es.count( + index=self._index, + query=cast(Optional[Dict[str, Any]], d.get("query", None)), + **self._params, + ) + + return cast(int, resp["count"]) + + def execute(self, ignore_cache: bool = False) -> Response[_R]: + """ + Execute the search and return an instance of ``Response`` wrapping all + the data. + + :arg ignore_cache: if set to ``True``, consecutive calls will hit + ES, while cached result will be ignored. Defaults to `False` + """ + if ignore_cache or not hasattr(self, "_response"): + es = get_connection(self._using) + + self._response = self._response_class( + self, + ( + es.search(index=self._index, body=self.to_dict(), **self._params) + ).body, + ) + return self._response + + def scan(self) -> Iterator[_R]: + """ + Turn the search into a scan search and return a generator that will + iterate over all the documents matching the query. + + Use ``params`` method to specify any additional arguments you with to + pass to the underlying ``scan`` helper from ``elasticsearch-py`` - + https://elasticsearch-py.readthedocs.io/en/master/helpers.html#elasticsearch.helpers.scan + + The ``iterate()`` method should be preferred, as it provides similar + functionality using an Elasticsearch point in time. + """ + es = get_connection(self._using) + + for hit in scan(es, query=self.to_dict(), index=self._index, **self._params): + yield self._get_result(cast(AttrDict[Any], hit)) + + def delete(self) -> AttrDict[Any]: + """ + delete() executes the query by delegating to delete_by_query() + """ + + es = get_connection(self._using) + assert self._index is not None + + return AttrDict( + cast( + Dict[str, Any], + es.delete_by_query( + index=self._index, body=self.to_dict(), **self._params + ), + ) + ) + + @contextlib.contextmanager + def point_in_time(self, keep_alive: str = "1m") -> Iterator[Self]: + """ + Open a point in time (pit) that can be used across several searches. + + This method implements a context manager that returns a search object + configured to operate within the created pit. + + :arg keep_alive: the time to live for the point in time, renewed with each search request + """ + es = get_connection(self._using) + + pit = es.open_point_in_time(index=self._index or "*", keep_alive=keep_alive) + search = self.index().extra(pit={"id": pit["id"], "keep_alive": keep_alive}) + if not search._sort: + search = search.sort("_shard_doc") + yield search + es.close_point_in_time(id=pit["id"]) + + def iterate(self, keep_alive: str = "1m") -> Iterator[_R]: + """ + Return a generator that iterates over all the documents matching the query. + + This method uses a point in time to provide consistent results even when + the index is changing. It should be preferred over ``scan()``. + + :arg keep_alive: the time to live for the point in time, renewed with each new search request + """ + with self.point_in_time(keep_alive=keep_alive) as s: + while True: + r = s.execute() + for hit in r: + yield hit + if len(r.hits) == 0: + break + s = s.search_after() + + +class MultiSearch(MultiSearchBase[_R]): + """ + Combine multiple :class:`~elasticsearch.dsl.Search` objects into a single + request. + """ + + _using: UsingType + + if TYPE_CHECKING: + + def add(self, search: Search[_R]) -> Self: ... # type: ignore[override] + + def execute( + self, ignore_cache: bool = False, raise_on_error: bool = True + ) -> List[Response[_R]]: + """ + Execute the multi search request and return a list of search results. + """ + if ignore_cache or not hasattr(self, "_response"): + es = get_connection(self._using) + + responses = es.msearch( + index=self._index, body=self.to_dict(), **self._params + ) + + out: List[Response[_R]] = [] + for s, r in zip(self._searches, responses["responses"]): + if r.get("error", False): + if raise_on_error: + raise ApiError("N/A", meta=responses.meta, body=r) + r = None + else: + r = Response(s, r) + out.append(r) + + self._response = out + + return self._response + + +class EmptySearch(Search[_R]): + def count(self) -> int: + return 0 + + def execute(self, ignore_cache: bool = False) -> Response[_R]: + return self._response_class(self, {"hits": {"total": 0, "hits": []}}) + + def scan(self) -> Iterator[_R]: + return + yield # a bit strange, but this forces an empty generator function + + def delete(self) -> AttrDict[Any]: + return AttrDict[Any]({}) diff --git a/elasticsearch/dsl/_sync/update_by_query.py b/elasticsearch/dsl/_sync/update_by_query.py new file mode 100644 index 000000000..0caecc029 --- /dev/null +++ b/elasticsearch/dsl/_sync/update_by_query.py @@ -0,0 +1,45 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import TYPE_CHECKING + +from ..connections import get_connection +from ..update_by_query_base import UpdateByQueryBase +from ..utils import _R, UsingType + +if TYPE_CHECKING: + from ..response import UpdateByQueryResponse + + +class UpdateByQuery(UpdateByQueryBase[_R]): + _using: UsingType + + def execute(self) -> "UpdateByQueryResponse[_R]": + """ + Execute the search and return an instance of ``Response`` wrapping all + the data. + """ + es = get_connection(self._using) + assert self._index is not None + + self._response = self._response_class( + self, + ( + es.update_by_query(index=self._index, **self.to_dict(), **self._params) + ).body, + ) + return self._response diff --git a/elasticsearch/dsl/aggs.py b/elasticsearch/dsl/aggs.py new file mode 100644 index 000000000..ba5150803 --- /dev/null +++ b/elasticsearch/dsl/aggs.py @@ -0,0 +1,3730 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import collections.abc +from copy import deepcopy +from typing import ( + TYPE_CHECKING, + Any, + ClassVar, + Dict, + Generic, + Iterable, + Literal, + Mapping, + MutableMapping, + Optional, + Sequence, + Union, + cast, +) + +from elastic_transport.client_utils import DEFAULT + +from .query import Query +from .response.aggs import AggResponse, BucketData, FieldBucketData, TopHitsData +from .utils import _R, AttrDict, DslBase + +if TYPE_CHECKING: + from elastic_transport.client_utils import DefaultType + + from . import types + from .document_base import InstrumentedField + from .search_base import SearchBase + + +def A( + name_or_agg: Union[MutableMapping[str, Any], "Agg[_R]", str], + filter: Optional[Union[str, "Query"]] = None, + **params: Any, +) -> "Agg[_R]": + if filter is not None: + if name_or_agg != "filter": + raise ValueError( + "Aggregation %r doesn't accept positional argument 'filter'." + % name_or_agg + ) + params["filter"] = filter + + # {"terms": {"field": "tags"}, "aggs": {...}} + if isinstance(name_or_agg, collections.abc.MutableMapping): + if params: + raise ValueError("A() cannot accept parameters when passing in a dict.") + # copy to avoid modifying in-place + agg = deepcopy(name_or_agg) + # pop out nested aggs + aggs = agg.pop("aggs", None) + # pop out meta data + meta = agg.pop("meta", None) + # should be {"terms": {"field": "tags"}} + if len(agg) != 1: + raise ValueError( + 'A() can only accept dict with an aggregation ({"terms": {...}}). ' + "Instead it got (%r)" % name_or_agg + ) + agg_type, params = agg.popitem() + if aggs: + params = params.copy() + params["aggs"] = aggs + if meta: + params = params.copy() + params["meta"] = meta + return Agg[_R].get_dsl_class(agg_type)(_expand__to_dot=False, **params) + + # Terms(...) just return the nested agg + elif isinstance(name_or_agg, Agg): + if params: + raise ValueError( + "A() cannot accept parameters when passing in an Agg object." + ) + return name_or_agg + + # "terms", field="tags" + return Agg[_R].get_dsl_class(name_or_agg)(**params) + + +class Agg(DslBase, Generic[_R]): + _type_name = "agg" + _type_shortcut = staticmethod(A) + name = "" + + def __contains__(self, key: str) -> bool: + return False + + def to_dict(self) -> Dict[str, Any]: + d = super().to_dict() + if isinstance(d[self.name], dict): + n = cast(Dict[str, Any], d[self.name]) + if "meta" in n: + d["meta"] = n.pop("meta") + return d + + def result(self, search: "SearchBase[_R]", data: Dict[str, Any]) -> AttrDict[Any]: + return AggResponse[_R](self, search, data) + + +class AggBase(Generic[_R]): + aggs: Dict[str, Agg[_R]] + _base: Agg[_R] + _params: Dict[str, Any] + _param_defs: ClassVar[Dict[str, Any]] = { + "aggs": {"type": "agg", "hash": True}, + } + + def __contains__(self, key: str) -> bool: + return key in self._params.get("aggs", {}) + + def __getitem__(self, agg_name: str) -> Agg[_R]: + agg = cast( + Agg[_R], self._params.setdefault("aggs", {})[agg_name] + ) # propagate KeyError + + # make sure we're not mutating a shared state - whenever accessing a + # bucket, return a shallow copy of it to be safe + if isinstance(agg, Bucket): + agg = A(agg.name, **agg._params) + # be sure to store the copy so any modifications to it will affect us + self._params["aggs"][agg_name] = agg + + return agg + + def __setitem__(self, agg_name: str, agg: Agg[_R]) -> None: + self.aggs[agg_name] = A(agg) + + def __iter__(self) -> Iterable[str]: + return iter(self.aggs) + + def _agg( + self, + bucket: bool, + name: str, + agg_type: Union[Dict[str, Any], Agg[_R], str], + *args: Any, + **params: Any, + ) -> Agg[_R]: + agg = self[name] = A(agg_type, *args, **params) + + # For chaining - when creating new buckets return them... + if bucket: + return agg + # otherwise return self._base so we can keep chaining + else: + return self._base + + def metric( + self, + name: str, + agg_type: Union[Dict[str, Any], Agg[_R], str], + *args: Any, + **params: Any, + ) -> Agg[_R]: + return self._agg(False, name, agg_type, *args, **params) + + def bucket( + self, + name: str, + agg_type: Union[Dict[str, Any], Agg[_R], str], + *args: Any, + **params: Any, + ) -> "Bucket[_R]": + return cast("Bucket[_R]", self._agg(True, name, agg_type, *args, **params)) + + def pipeline( + self, + name: str, + agg_type: Union[Dict[str, Any], Agg[_R], str], + *args: Any, + **params: Any, + ) -> "Pipeline[_R]": + return cast("Pipeline[_R]", self._agg(False, name, agg_type, *args, **params)) + + def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]: + return BucketData(self, search, data) # type: ignore[arg-type] + + +class Bucket(AggBase[_R], Agg[_R]): + def __init__(self, **params: Any): + super().__init__(**params) + # remember self for chaining + self._base = self + + def to_dict(self) -> Dict[str, Any]: + d = super(AggBase, self).to_dict() + if isinstance(d[self.name], dict): + n = cast(AttrDict[Any], d[self.name]) + if "aggs" in n: + d["aggs"] = n.pop("aggs") + return d + + +class Pipeline(Agg[_R]): + pass + + +class AdjacencyMatrix(Bucket[_R]): + """ + A bucket aggregation returning a form of adjacency matrix. The request + provides a collection of named filter expressions, similar to the + `filters` aggregation. Each bucket in the response represents a non- + empty cell in the matrix of intersecting filters. + + :arg filters: Filters used to create buckets. At least one filter is + required. + :arg separator: Separator used to concatenate filter names. Defaults + to &. + """ + + name = "adjacency_matrix" + _param_defs = { + "filters": {"type": "query", "hash": True}, + } + + def __init__( + self, + *, + filters: Union[Mapping[str, Query], "DefaultType"] = DEFAULT, + separator: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(filters=filters, separator=separator, **kwargs) + + +class AutoDateHistogram(Bucket[_R]): + """ + A multi-bucket aggregation similar to the date histogram, except + instead of providing an interval to use as the width of each bucket, a + target number of buckets is provided. + + :arg buckets: The target number of buckets. Defaults to `10` if + omitted. + :arg field: The field on which to run the aggregation. + :arg format: The date format used to format `key_as_string` in the + response. If no `format` is specified, the first date format + specified in the field mapping is used. + :arg minimum_interval: The minimum rounding interval. This can make + the collection process more efficient, as the aggregation will not + attempt to round at any interval lower than `minimum_interval`. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg offset: Time zone specified as a ISO 8601 UTC offset. + :arg params: + :arg script: + :arg time_zone: Time zone ID. + """ + + name = "auto_date_histogram" + + def __init__( + self, + *, + buckets: Union[int, "DefaultType"] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + minimum_interval: Union[ + Literal["second", "minute", "hour", "day", "month", "year"], "DefaultType" + ] = DEFAULT, + missing: Any = DEFAULT, + offset: Union[str, "DefaultType"] = DEFAULT, + params: Union[Mapping[str, Any], "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + time_zone: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + buckets=buckets, + field=field, + format=format, + minimum_interval=minimum_interval, + missing=missing, + offset=offset, + params=params, + script=script, + time_zone=time_zone, + **kwargs, + ) + + def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]: + return FieldBucketData(self, search, data) + + +class Avg(Agg[_R]): + """ + A single-value metrics aggregation that computes the average of + numeric values that are extracted from the aggregated documents. + + :arg format: + :arg field: The field on which to run the aggregation. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg script: + """ + + name = "avg" + + def __init__( + self, + *, + format: Union[str, "DefaultType"] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + format=format, field=field, missing=missing, script=script, **kwargs + ) + + +class AvgBucket(Pipeline[_R]): + """ + A sibling pipeline aggregation which calculates the mean value of a + specified metric in a sibling aggregation. The specified metric must + be numeric and the sibling aggregation must be a multi-bucket + aggregation. + + :arg format: `DecimalFormat` pattern for the output value. If + specified, the formatted value is returned in the aggregation’s + `value_as_string` property. + :arg gap_policy: Policy to apply when gaps are found in the data. + Defaults to `skip` if omitted. + :arg buckets_path: Path to the buckets that contain one set of values + to correlate. + """ + + name = "avg_bucket" + + def __init__( + self, + *, + format: Union[str, "DefaultType"] = DEFAULT, + gap_policy: Union[ + Literal["skip", "insert_zeros", "keep_values"], "DefaultType" + ] = DEFAULT, + buckets_path: Union[ + str, Sequence[str], Mapping[str, str], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs + ) + + +class Boxplot(Agg[_R]): + """ + A metrics aggregation that computes a box plot of numeric values + extracted from the aggregated documents. + + :arg compression: Limits the maximum number of nodes used by the + underlying TDigest algorithm to `20 * compression`, enabling + control of memory usage and approximation error. + :arg field: The field on which to run the aggregation. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg script: + """ + + name = "boxplot" + + def __init__( + self, + *, + compression: Union[float, "DefaultType"] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + compression=compression, + field=field, + missing=missing, + script=script, + **kwargs, + ) + + +class BucketScript(Pipeline[_R]): + """ + A parent pipeline aggregation which runs a script which can perform + per bucket computations on metrics in the parent multi-bucket + aggregation. + + :arg script: The script to run for this aggregation. + :arg format: `DecimalFormat` pattern for the output value. If + specified, the formatted value is returned in the aggregation’s + `value_as_string` property. + :arg gap_policy: Policy to apply when gaps are found in the data. + Defaults to `skip` if omitted. + :arg buckets_path: Path to the buckets that contain one set of values + to correlate. + """ + + name = "bucket_script" + + def __init__( + self, + *, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + gap_policy: Union[ + Literal["skip", "insert_zeros", "keep_values"], "DefaultType" + ] = DEFAULT, + buckets_path: Union[ + str, Sequence[str], Mapping[str, str], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + script=script, + format=format, + gap_policy=gap_policy, + buckets_path=buckets_path, + **kwargs, + ) + + +class BucketSelector(Pipeline[_R]): + """ + A parent pipeline aggregation which runs a script to determine whether + the current bucket will be retained in the parent multi-bucket + aggregation. + + :arg script: The script to run for this aggregation. + :arg format: `DecimalFormat` pattern for the output value. If + specified, the formatted value is returned in the aggregation’s + `value_as_string` property. + :arg gap_policy: Policy to apply when gaps are found in the data. + Defaults to `skip` if omitted. + :arg buckets_path: Path to the buckets that contain one set of values + to correlate. + """ + + name = "bucket_selector" + + def __init__( + self, + *, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + gap_policy: Union[ + Literal["skip", "insert_zeros", "keep_values"], "DefaultType" + ] = DEFAULT, + buckets_path: Union[ + str, Sequence[str], Mapping[str, str], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + script=script, + format=format, + gap_policy=gap_policy, + buckets_path=buckets_path, + **kwargs, + ) + + +class BucketSort(Bucket[_R]): + """ + A parent pipeline aggregation which sorts the buckets of its parent + multi-bucket aggregation. + + :arg from: Buckets in positions prior to `from` will be truncated. + :arg gap_policy: The policy to apply when gaps are found in the data. + Defaults to `skip` if omitted. + :arg size: The number of buckets to return. Defaults to all buckets of + the parent aggregation. + :arg sort: The list of fields to sort on. + """ + + name = "bucket_sort" + + def __init__( + self, + *, + from_: Union[int, "DefaultType"] = DEFAULT, + gap_policy: Union[ + Literal["skip", "insert_zeros", "keep_values"], "DefaultType" + ] = DEFAULT, + size: Union[int, "DefaultType"] = DEFAULT, + sort: Union[ + Union[Union[str, "InstrumentedField"], "types.SortOptions"], + Sequence[Union[Union[str, "InstrumentedField"], "types.SortOptions"]], + Dict[str, Any], + "DefaultType", + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + from_=from_, gap_policy=gap_policy, size=size, sort=sort, **kwargs + ) + + +class BucketCountKsTest(Pipeline[_R]): + """ + A sibling pipeline aggregation which runs a two sample + Kolmogorov–Smirnov test ("K-S test") against a provided distribution + and the distribution implied by the documents counts in the configured + sibling aggregation. + + :arg alternative: A list of string values indicating which K-S test + alternative to calculate. The valid values are: "greater", "less", + "two_sided". This parameter is key for determining the K-S + statistic used when calculating the K-S test. Default value is all + possible alternative hypotheses. + :arg fractions: A list of doubles indicating the distribution of the + samples with which to compare to the `buckets_path` results. In + typical usage this is the overall proportion of documents in each + bucket, which is compared with the actual document proportions in + each bucket from the sibling aggregation counts. The default is to + assume that overall documents are uniformly distributed on these + buckets, which they would be if one used equal percentiles of a + metric to define the bucket end points. + :arg sampling_method: Indicates the sampling methodology when + calculating the K-S test. Note, this is sampling of the returned + values. This determines the cumulative distribution function (CDF) + points used comparing the two samples. Default is `upper_tail`, + which emphasizes the upper end of the CDF points. Valid options + are: `upper_tail`, `uniform`, and `lower_tail`. + :arg buckets_path: Path to the buckets that contain one set of values + to correlate. + """ + + name = "bucket_count_ks_test" + + def __init__( + self, + *, + alternative: Union[Sequence[str], "DefaultType"] = DEFAULT, + fractions: Union[Sequence[float], "DefaultType"] = DEFAULT, + sampling_method: Union[str, "DefaultType"] = DEFAULT, + buckets_path: Union[ + str, Sequence[str], Mapping[str, str], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + alternative=alternative, + fractions=fractions, + sampling_method=sampling_method, + buckets_path=buckets_path, + **kwargs, + ) + + +class BucketCorrelation(Pipeline[_R]): + """ + A sibling pipeline aggregation which runs a correlation function on + the configured sibling multi-bucket aggregation. + + :arg function: (required) The correlation function to execute. + :arg buckets_path: Path to the buckets that contain one set of values + to correlate. + """ + + name = "bucket_correlation" + + def __init__( + self, + *, + function: Union[ + "types.BucketCorrelationFunction", Dict[str, Any], "DefaultType" + ] = DEFAULT, + buckets_path: Union[ + str, Sequence[str], Mapping[str, str], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__(function=function, buckets_path=buckets_path, **kwargs) + + +class Cardinality(Agg[_R]): + """ + A single-value metrics aggregation that calculates an approximate + count of distinct values. + + :arg precision_threshold: A unique count below which counts are + expected to be close to accurate. This allows to trade memory for + accuracy. Defaults to `3000` if omitted. + :arg rehash: + :arg execution_hint: Mechanism by which cardinality aggregations is + run. + :arg field: The field on which to run the aggregation. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg script: + """ + + name = "cardinality" + + def __init__( + self, + *, + precision_threshold: Union[int, "DefaultType"] = DEFAULT, + rehash: Union[bool, "DefaultType"] = DEFAULT, + execution_hint: Union[ + Literal[ + "global_ordinals", + "segment_ordinals", + "direct", + "save_memory_heuristic", + "save_time_heuristic", + ], + "DefaultType", + ] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + precision_threshold=precision_threshold, + rehash=rehash, + execution_hint=execution_hint, + field=field, + missing=missing, + script=script, + **kwargs, + ) + + +class CategorizeText(Bucket[_R]): + """ + A multi-bucket aggregation that groups semi-structured text into + buckets. + + :arg field: (required) The semi-structured text field to categorize. + :arg max_unique_tokens: The maximum number of unique tokens at any + position up to max_matched_tokens. Must be larger than 1. Smaller + values use less memory and create fewer categories. Larger values + will use more memory and create narrower categories. Max allowed + value is 100. Defaults to `50` if omitted. + :arg max_matched_tokens: The maximum number of token positions to + match on before attempting to merge categories. Larger values will + use more memory and create narrower categories. Max allowed value + is 100. Defaults to `5` if omitted. + :arg similarity_threshold: The minimum percentage of tokens that must + match for text to be added to the category bucket. Must be between + 1 and 100. The larger the value the narrower the categories. + Larger values will increase memory usage and create narrower + categories. Defaults to `50` if omitted. + :arg categorization_filters: This property expects an array of regular + expressions. The expressions are used to filter out matching + sequences from the categorization field values. You can use this + functionality to fine tune the categorization by excluding + sequences from consideration when categories are defined. For + example, you can exclude SQL statements that appear in your log + files. This property cannot be used at the same time as + categorization_analyzer. If you only want to define simple regular + expression filters that are applied prior to tokenization, setting + this property is the easiest method. If you also want to customize + the tokenizer or post-tokenization filtering, use the + categorization_analyzer property instead and include the filters + as pattern_replace character filters. + :arg categorization_analyzer: The categorization analyzer specifies + how the text is analyzed and tokenized before being categorized. + The syntax is very similar to that used to define the analyzer in + the [Analyze endpoint](https://www.elastic.co/guide/en/elasticsear + ch/reference/8.0/indices-analyze.html). This property cannot be + used at the same time as categorization_filters. + :arg shard_size: The number of categorization buckets to return from + each shard before merging all the results. + :arg size: The number of buckets to return. Defaults to `10` if + omitted. + :arg min_doc_count: The minimum number of documents in a bucket to be + returned to the results. + :arg shard_min_doc_count: The minimum number of documents in a bucket + to be returned from the shard before merging. + """ + + name = "categorize_text" + + def __init__( + self, + *, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + max_unique_tokens: Union[int, "DefaultType"] = DEFAULT, + max_matched_tokens: Union[int, "DefaultType"] = DEFAULT, + similarity_threshold: Union[int, "DefaultType"] = DEFAULT, + categorization_filters: Union[Sequence[str], "DefaultType"] = DEFAULT, + categorization_analyzer: Union[ + str, "types.CustomCategorizeTextAnalyzer", Dict[str, Any], "DefaultType" + ] = DEFAULT, + shard_size: Union[int, "DefaultType"] = DEFAULT, + size: Union[int, "DefaultType"] = DEFAULT, + min_doc_count: Union[int, "DefaultType"] = DEFAULT, + shard_min_doc_count: Union[int, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + field=field, + max_unique_tokens=max_unique_tokens, + max_matched_tokens=max_matched_tokens, + similarity_threshold=similarity_threshold, + categorization_filters=categorization_filters, + categorization_analyzer=categorization_analyzer, + shard_size=shard_size, + size=size, + min_doc_count=min_doc_count, + shard_min_doc_count=shard_min_doc_count, + **kwargs, + ) + + +class Children(Bucket[_R]): + """ + A single bucket aggregation that selects child documents that have the + specified type, as defined in a `join` field. + + :arg type: The child type that should be selected. + """ + + name = "children" + + def __init__(self, type: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any): + super().__init__(type=type, **kwargs) + + +class Composite(Bucket[_R]): + """ + A multi-bucket aggregation that creates composite buckets from + different sources. Unlike the other multi-bucket aggregations, you can + use the `composite` aggregation to paginate *all* buckets from a + multi-level aggregation efficiently. + + :arg after: When paginating, use the `after_key` value returned in the + previous response to retrieve the next page. + :arg size: The number of composite buckets that should be returned. + Defaults to `10` if omitted. + :arg sources: The value sources used to build composite buckets. Keys + are returned in the order of the `sources` definition. + """ + + name = "composite" + + def __init__( + self, + *, + after: Union[ + Mapping[ + Union[str, "InstrumentedField"], Union[int, float, str, bool, None, Any] + ], + "DefaultType", + ] = DEFAULT, + size: Union[int, "DefaultType"] = DEFAULT, + sources: Union[Sequence[Mapping[str, Agg[_R]]], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(after=after, size=size, sources=sources, **kwargs) + + +class CumulativeCardinality(Pipeline[_R]): + """ + A parent pipeline aggregation which calculates the cumulative + cardinality in a parent `histogram` or `date_histogram` aggregation. + + :arg format: `DecimalFormat` pattern for the output value. If + specified, the formatted value is returned in the aggregation’s + `value_as_string` property. + :arg gap_policy: Policy to apply when gaps are found in the data. + Defaults to `skip` if omitted. + :arg buckets_path: Path to the buckets that contain one set of values + to correlate. + """ + + name = "cumulative_cardinality" + + def __init__( + self, + *, + format: Union[str, "DefaultType"] = DEFAULT, + gap_policy: Union[ + Literal["skip", "insert_zeros", "keep_values"], "DefaultType" + ] = DEFAULT, + buckets_path: Union[ + str, Sequence[str], Mapping[str, str], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs + ) + + +class CumulativeSum(Pipeline[_R]): + """ + A parent pipeline aggregation which calculates the cumulative sum of a + specified metric in a parent `histogram` or `date_histogram` + aggregation. + + :arg format: `DecimalFormat` pattern for the output value. If + specified, the formatted value is returned in the aggregation’s + `value_as_string` property. + :arg gap_policy: Policy to apply when gaps are found in the data. + Defaults to `skip` if omitted. + :arg buckets_path: Path to the buckets that contain one set of values + to correlate. + """ + + name = "cumulative_sum" + + def __init__( + self, + *, + format: Union[str, "DefaultType"] = DEFAULT, + gap_policy: Union[ + Literal["skip", "insert_zeros", "keep_values"], "DefaultType" + ] = DEFAULT, + buckets_path: Union[ + str, Sequence[str], Mapping[str, str], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs + ) + + +class DateHistogram(Bucket[_R]): + """ + A multi-bucket values source based aggregation that can be applied on + date values or date range values extracted from the documents. It + dynamically builds fixed size (interval) buckets over the values. + + :arg calendar_interval: Calendar-aware interval. Can be specified + using the unit name, such as `month`, or as a single unit + quantity, such as `1M`. + :arg extended_bounds: Enables extending the bounds of the histogram + beyond the data itself. + :arg hard_bounds: Limits the histogram to specified bounds. + :arg field: The date field whose values are use to build a histogram. + :arg fixed_interval: Fixed intervals: a fixed number of SI units and + never deviate, regardless of where they fall on the calendar. + :arg format: The date format used to format `key_as_string` in the + response. If no `format` is specified, the first date format + specified in the field mapping is used. + :arg interval: + :arg min_doc_count: Only returns buckets that have `min_doc_count` + number of documents. By default, all buckets between the first + bucket that matches documents and the last one are returned. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg offset: Changes the start value of each bucket by the specified + positive (`+`) or negative offset (`-`) duration. + :arg order: The sort order of the returned buckets. + :arg params: + :arg script: + :arg time_zone: Time zone used for bucketing and rounding. Defaults to + Coordinated Universal Time (UTC). + :arg keyed: Set to `true` to associate a unique string key with each + bucket and return the ranges as a hash rather than an array. + """ + + name = "date_histogram" + + def __init__( + self, + *, + calendar_interval: Union[ + Literal[ + "second", "minute", "hour", "day", "week", "month", "quarter", "year" + ], + "DefaultType", + ] = DEFAULT, + extended_bounds: Union[ + "types.ExtendedBounds", Dict[str, Any], "DefaultType" + ] = DEFAULT, + hard_bounds: Union[ + "types.ExtendedBounds", Dict[str, Any], "DefaultType" + ] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + fixed_interval: Any = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + interval: Any = DEFAULT, + min_doc_count: Union[int, "DefaultType"] = DEFAULT, + missing: Any = DEFAULT, + offset: Any = DEFAULT, + order: Union[ + Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]], + Sequence[Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]]], + "DefaultType", + ] = DEFAULT, + params: Union[Mapping[str, Any], "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + time_zone: Union[str, "DefaultType"] = DEFAULT, + keyed: Union[bool, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + calendar_interval=calendar_interval, + extended_bounds=extended_bounds, + hard_bounds=hard_bounds, + field=field, + fixed_interval=fixed_interval, + format=format, + interval=interval, + min_doc_count=min_doc_count, + missing=missing, + offset=offset, + order=order, + params=params, + script=script, + time_zone=time_zone, + keyed=keyed, + **kwargs, + ) + + def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]: + return FieldBucketData(self, search, data) + + +class DateRange(Bucket[_R]): + """ + A multi-bucket value source based aggregation that enables the user to + define a set of date ranges - each representing a bucket. + + :arg field: The date field whose values are use to build ranges. + :arg format: The date format used to format `from` and `to` in the + response. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg ranges: Array of date ranges. + :arg time_zone: Time zone used to convert dates from another time zone + to UTC. + :arg keyed: Set to `true` to associate a unique string key with each + bucket and returns the ranges as a hash rather than an array. + """ + + name = "date_range" + + def __init__( + self, + *, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, + ranges: Union[ + Sequence["types.DateRangeExpression"], + Sequence[Dict[str, Any]], + "DefaultType", + ] = DEFAULT, + time_zone: Union[str, "DefaultType"] = DEFAULT, + keyed: Union[bool, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + field=field, + format=format, + missing=missing, + ranges=ranges, + time_zone=time_zone, + keyed=keyed, + **kwargs, + ) + + +class Derivative(Pipeline[_R]): + """ + A parent pipeline aggregation which calculates the derivative of a + specified metric in a parent `histogram` or `date_histogram` + aggregation. + + :arg format: `DecimalFormat` pattern for the output value. If + specified, the formatted value is returned in the aggregation’s + `value_as_string` property. + :arg gap_policy: Policy to apply when gaps are found in the data. + Defaults to `skip` if omitted. + :arg buckets_path: Path to the buckets that contain one set of values + to correlate. + """ + + name = "derivative" + + def __init__( + self, + *, + format: Union[str, "DefaultType"] = DEFAULT, + gap_policy: Union[ + Literal["skip", "insert_zeros", "keep_values"], "DefaultType" + ] = DEFAULT, + buckets_path: Union[ + str, Sequence[str], Mapping[str, str], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs + ) + + +class DiversifiedSampler(Bucket[_R]): + """ + A filtering aggregation used to limit any sub aggregations' processing + to a sample of the top-scoring documents. Similar to the `sampler` + aggregation, but adds the ability to limit the number of matches that + share a common value. + + :arg execution_hint: The type of value used for de-duplication. + Defaults to `global_ordinals` if omitted. + :arg max_docs_per_value: Limits how many documents are permitted per + choice of de-duplicating value. Defaults to `1` if omitted. + :arg script: + :arg shard_size: Limits how many top-scoring documents are collected + in the sample processed on each shard. Defaults to `100` if + omitted. + :arg field: The field used to provide values used for de-duplication. + """ + + name = "diversified_sampler" + + def __init__( + self, + *, + execution_hint: Union[ + Literal["map", "global_ordinals", "bytes_hash"], "DefaultType" + ] = DEFAULT, + max_docs_per_value: Union[int, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + shard_size: Union[int, "DefaultType"] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + execution_hint=execution_hint, + max_docs_per_value=max_docs_per_value, + script=script, + shard_size=shard_size, + field=field, + **kwargs, + ) + + +class ExtendedStats(Agg[_R]): + """ + A multi-value metrics aggregation that computes stats over numeric + values extracted from the aggregated documents. + + :arg sigma: The number of standard deviations above/below the mean to + display. + :arg format: + :arg field: The field on which to run the aggregation. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg script: + """ + + name = "extended_stats" + + def __init__( + self, + *, + sigma: Union[float, "DefaultType"] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + sigma=sigma, + format=format, + field=field, + missing=missing, + script=script, + **kwargs, + ) + + +class ExtendedStatsBucket(Pipeline[_R]): + """ + A sibling pipeline aggregation which calculates a variety of stats + across all bucket of a specified metric in a sibling aggregation. + + :arg sigma: The number of standard deviations above/below the mean to + display. + :arg format: `DecimalFormat` pattern for the output value. If + specified, the formatted value is returned in the aggregation’s + `value_as_string` property. + :arg gap_policy: Policy to apply when gaps are found in the data. + Defaults to `skip` if omitted. + :arg buckets_path: Path to the buckets that contain one set of values + to correlate. + """ + + name = "extended_stats_bucket" + + def __init__( + self, + *, + sigma: Union[float, "DefaultType"] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + gap_policy: Union[ + Literal["skip", "insert_zeros", "keep_values"], "DefaultType" + ] = DEFAULT, + buckets_path: Union[ + str, Sequence[str], Mapping[str, str], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + sigma=sigma, + format=format, + gap_policy=gap_policy, + buckets_path=buckets_path, + **kwargs, + ) + + +class FrequentItemSets(Agg[_R]): + """ + A bucket aggregation which finds frequent item sets, a form of + association rules mining that identifies items that often occur + together. + + :arg fields: (required) Fields to analyze. + :arg minimum_set_size: The minimum size of one item set. Defaults to + `1` if omitted. + :arg minimum_support: The minimum support of one item set. Defaults to + `0.1` if omitted. + :arg size: The number of top item sets to return. Defaults to `10` if + omitted. + :arg filter: Query that filters documents from analysis. + """ + + name = "frequent_item_sets" + _param_defs = { + "filter": {"type": "query"}, + } + + def __init__( + self, + *, + fields: Union[ + Sequence["types.FrequentItemSetsField"], + Sequence[Dict[str, Any]], + "DefaultType", + ] = DEFAULT, + minimum_set_size: Union[int, "DefaultType"] = DEFAULT, + minimum_support: Union[float, "DefaultType"] = DEFAULT, + size: Union[int, "DefaultType"] = DEFAULT, + filter: Union[Query, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + fields=fields, + minimum_set_size=minimum_set_size, + minimum_support=minimum_support, + size=size, + filter=filter, + **kwargs, + ) + + +class Filter(Bucket[_R]): + """ + A single bucket aggregation that narrows the set of documents to those + that match a query. + + :arg filter: A single bucket aggregation that narrows the set of + documents to those that match a query. + """ + + name = "filter" + _param_defs = { + "filter": {"type": "query"}, + "aggs": {"type": "agg", "hash": True}, + } + + def __init__(self, filter: Union[Query, "DefaultType"] = DEFAULT, **kwargs: Any): + super().__init__(filter=filter, **kwargs) + + def to_dict(self) -> Dict[str, Any]: + d = super().to_dict() + if isinstance(d[self.name], dict): + n = cast(AttrDict[Any], d[self.name]) + n.update(n.pop("filter", {})) + return d + + +class Filters(Bucket[_R]): + """ + A multi-bucket aggregation where each bucket contains the documents + that match a query. + + :arg filters: Collection of queries from which to build buckets. + :arg other_bucket: Set to `true` to add a bucket to the response which + will contain all documents that do not match any of the given + filters. + :arg other_bucket_key: The key with which the other bucket is + returned. Defaults to `_other_` if omitted. + :arg keyed: By default, the named filters aggregation returns the + buckets as an object. Set to `false` to return the buckets as an + array of objects. Defaults to `True` if omitted. + """ + + name = "filters" + _param_defs = { + "filters": {"type": "query", "hash": True}, + "aggs": {"type": "agg", "hash": True}, + } + + def __init__( + self, + *, + filters: Union[Dict[str, Query], "DefaultType"] = DEFAULT, + other_bucket: Union[bool, "DefaultType"] = DEFAULT, + other_bucket_key: Union[str, "DefaultType"] = DEFAULT, + keyed: Union[bool, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + filters=filters, + other_bucket=other_bucket, + other_bucket_key=other_bucket_key, + keyed=keyed, + **kwargs, + ) + + +class GeoBounds(Agg[_R]): + """ + A metric aggregation that computes the geographic bounding box + containing all values for a Geopoint or Geoshape field. + + :arg wrap_longitude: Specifies whether the bounding box should be + allowed to overlap the international date line. Defaults to `True` + if omitted. + :arg field: The field on which to run the aggregation. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg script: + """ + + name = "geo_bounds" + + def __init__( + self, + *, + wrap_longitude: Union[bool, "DefaultType"] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + wrap_longitude=wrap_longitude, + field=field, + missing=missing, + script=script, + **kwargs, + ) + + +class GeoCentroid(Agg[_R]): + """ + A metric aggregation that computes the weighted centroid from all + coordinate values for geo fields. + + :arg count: + :arg location: + :arg field: The field on which to run the aggregation. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg script: + """ + + name = "geo_centroid" + + def __init__( + self, + *, + count: Union[int, "DefaultType"] = DEFAULT, + location: Union[ + "types.LatLonGeoLocation", + "types.GeoHashLocation", + Sequence[float], + str, + Dict[str, Any], + "DefaultType", + ] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + count=count, + location=location, + field=field, + missing=missing, + script=script, + **kwargs, + ) + + +class GeoDistance(Bucket[_R]): + """ + A multi-bucket aggregation that works on `geo_point` fields. Evaluates + the distance of each document value from an origin point and + determines the buckets it belongs to, based on ranges defined in the + request. + + :arg distance_type: The distance calculation type. Defaults to `arc` + if omitted. + :arg field: A field of type `geo_point` used to evaluate the distance. + :arg origin: The origin used to evaluate the distance. + :arg ranges: An array of ranges used to bucket documents. + :arg unit: The distance unit. Defaults to `m` if omitted. + """ + + name = "geo_distance" + + def __init__( + self, + *, + distance_type: Union[Literal["arc", "plane"], "DefaultType"] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + origin: Union[ + "types.LatLonGeoLocation", + "types.GeoHashLocation", + Sequence[float], + str, + Dict[str, Any], + "DefaultType", + ] = DEFAULT, + ranges: Union[ + Sequence["types.AggregationRange"], Sequence[Dict[str, Any]], "DefaultType" + ] = DEFAULT, + unit: Union[ + Literal["in", "ft", "yd", "mi", "nmi", "km", "m", "cm", "mm"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + distance_type=distance_type, + field=field, + origin=origin, + ranges=ranges, + unit=unit, + **kwargs, + ) + + +class GeohashGrid(Bucket[_R]): + """ + A multi-bucket aggregation that groups `geo_point` and `geo_shape` + values into buckets that represent a grid. Each cell is labeled using + a geohash which is of user-definable precision. + + :arg bounds: The bounding box to filter the points in each bucket. + :arg field: Field containing indexed `geo_point` or `geo_shape` + values. If the field contains an array, `geohash_grid` aggregates + all array values. + :arg precision: The string length of the geohashes used to define + cells/buckets in the results. Defaults to `5` if omitted. + :arg shard_size: Allows for more accurate counting of the top cells + returned in the final result the aggregation. Defaults to + returning `max(10,(size x number-of-shards))` buckets from each + shard. + :arg size: The maximum number of geohash buckets to return. Defaults + to `10000` if omitted. + """ + + name = "geohash_grid" + + def __init__( + self, + *, + bounds: Union[ + "types.CoordsGeoBounds", + "types.TopLeftBottomRightGeoBounds", + "types.TopRightBottomLeftGeoBounds", + "types.WktGeoBounds", + Dict[str, Any], + "DefaultType", + ] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + precision: Union[float, str, "DefaultType"] = DEFAULT, + shard_size: Union[int, "DefaultType"] = DEFAULT, + size: Union[int, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + bounds=bounds, + field=field, + precision=precision, + shard_size=shard_size, + size=size, + **kwargs, + ) + + +class GeoLine(Agg[_R]): + """ + Aggregates all `geo_point` values within a bucket into a `LineString` + ordered by the chosen sort field. + + :arg point: (required) The name of the geo_point field. + :arg sort: (required) The name of the numeric field to use as the sort + key for ordering the points. When the `geo_line` aggregation is + nested inside a `time_series` aggregation, this field defaults to + `@timestamp`, and any other value will result in error. + :arg include_sort: When `true`, returns an additional array of the + sort values in the feature properties. + :arg sort_order: The order in which the line is sorted (ascending or + descending). Defaults to `asc` if omitted. + :arg size: The maximum length of the line represented in the + aggregation. Valid sizes are between 1 and 10000. Defaults to + `10000` if omitted. + """ + + name = "geo_line" + + def __init__( + self, + *, + point: Union["types.GeoLinePoint", Dict[str, Any], "DefaultType"] = DEFAULT, + sort: Union["types.GeoLineSort", Dict[str, Any], "DefaultType"] = DEFAULT, + include_sort: Union[bool, "DefaultType"] = DEFAULT, + sort_order: Union[Literal["asc", "desc"], "DefaultType"] = DEFAULT, + size: Union[int, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + point=point, + sort=sort, + include_sort=include_sort, + sort_order=sort_order, + size=size, + **kwargs, + ) + + +class GeotileGrid(Bucket[_R]): + """ + A multi-bucket aggregation that groups `geo_point` and `geo_shape` + values into buckets that represent a grid. Each cell corresponds to a + map tile as used by many online map sites. + + :arg field: Field containing indexed `geo_point` or `geo_shape` + values. If the field contains an array, `geotile_grid` aggregates + all array values. + :arg precision: Integer zoom of the key used to define cells/buckets + in the results. Values outside of the range [0,29] will be + rejected. Defaults to `7` if omitted. + :arg shard_size: Allows for more accurate counting of the top cells + returned in the final result the aggregation. Defaults to + returning `max(10,(size x number-of-shards))` buckets from each + shard. + :arg size: The maximum number of buckets to return. Defaults to + `10000` if omitted. + :arg bounds: A bounding box to filter the geo-points or geo-shapes in + each bucket. + """ + + name = "geotile_grid" + + def __init__( + self, + *, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + precision: Union[float, "DefaultType"] = DEFAULT, + shard_size: Union[int, "DefaultType"] = DEFAULT, + size: Union[int, "DefaultType"] = DEFAULT, + bounds: Union[ + "types.CoordsGeoBounds", + "types.TopLeftBottomRightGeoBounds", + "types.TopRightBottomLeftGeoBounds", + "types.WktGeoBounds", + Dict[str, Any], + "DefaultType", + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + field=field, + precision=precision, + shard_size=shard_size, + size=size, + bounds=bounds, + **kwargs, + ) + + +class GeohexGrid(Bucket[_R]): + """ + A multi-bucket aggregation that groups `geo_point` and `geo_shape` + values into buckets that represent a grid. Each cell corresponds to a + H3 cell index and is labeled using the H3Index representation. + + :arg field: (required) Field containing indexed `geo_point` or + `geo_shape` values. If the field contains an array, `geohex_grid` + aggregates all array values. + :arg precision: Integer zoom of the key used to defined cells or + buckets in the results. Value should be between 0-15. Defaults to + `6` if omitted. + :arg bounds: Bounding box used to filter the geo-points in each + bucket. + :arg size: Maximum number of buckets to return. Defaults to `10000` if + omitted. + :arg shard_size: Number of buckets returned from each shard. + """ + + name = "geohex_grid" + + def __init__( + self, + *, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + precision: Union[int, "DefaultType"] = DEFAULT, + bounds: Union[ + "types.CoordsGeoBounds", + "types.TopLeftBottomRightGeoBounds", + "types.TopRightBottomLeftGeoBounds", + "types.WktGeoBounds", + Dict[str, Any], + "DefaultType", + ] = DEFAULT, + size: Union[int, "DefaultType"] = DEFAULT, + shard_size: Union[int, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + field=field, + precision=precision, + bounds=bounds, + size=size, + shard_size=shard_size, + **kwargs, + ) + + +class Global(Bucket[_R]): + """ + Defines a single bucket of all the documents within the search + execution context. This context is defined by the indices and the + document types you’re searching on, but is not influenced by the + search query itself. + """ + + name = "global" + + def __init__(self, **kwargs: Any): + super().__init__(**kwargs) + + +class Histogram(Bucket[_R]): + """ + A multi-bucket values source based aggregation that can be applied on + numeric values or numeric range values extracted from the documents. + It dynamically builds fixed size (interval) buckets over the values. + + :arg extended_bounds: Enables extending the bounds of the histogram + beyond the data itself. + :arg hard_bounds: Limits the range of buckets in the histogram. It is + particularly useful in the case of open data ranges that can + result in a very large number of buckets. + :arg field: The name of the field to aggregate on. + :arg interval: The interval for the buckets. Must be a positive + decimal. + :arg min_doc_count: Only returns buckets that have `min_doc_count` + number of documents. By default, the response will fill gaps in + the histogram with empty buckets. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg offset: By default, the bucket keys start with 0 and then + continue in even spaced steps of `interval`. The bucket boundaries + can be shifted by using the `offset` option. + :arg order: The sort order of the returned buckets. By default, the + returned buckets are sorted by their key ascending. + :arg script: + :arg format: + :arg keyed: If `true`, returns buckets as a hash instead of an array, + keyed by the bucket keys. + """ + + name = "histogram" + + def __init__( + self, + *, + extended_bounds: Union[ + "types.ExtendedBounds", Dict[str, Any], "DefaultType" + ] = DEFAULT, + hard_bounds: Union[ + "types.ExtendedBounds", Dict[str, Any], "DefaultType" + ] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + interval: Union[float, "DefaultType"] = DEFAULT, + min_doc_count: Union[int, "DefaultType"] = DEFAULT, + missing: Union[float, "DefaultType"] = DEFAULT, + offset: Union[float, "DefaultType"] = DEFAULT, + order: Union[ + Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]], + Sequence[Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]]], + "DefaultType", + ] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + keyed: Union[bool, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + extended_bounds=extended_bounds, + hard_bounds=hard_bounds, + field=field, + interval=interval, + min_doc_count=min_doc_count, + missing=missing, + offset=offset, + order=order, + script=script, + format=format, + keyed=keyed, + **kwargs, + ) + + def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]: + return FieldBucketData(self, search, data) + + +class IPRange(Bucket[_R]): + """ + A multi-bucket value source based aggregation that enables the user to + define a set of IP ranges - each representing a bucket. + + :arg field: The date field whose values are used to build ranges. + :arg ranges: Array of IP ranges. + """ + + name = "ip_range" + + def __init__( + self, + *, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + ranges: Union[ + Sequence["types.IpRangeAggregationRange"], + Sequence[Dict[str, Any]], + "DefaultType", + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__(field=field, ranges=ranges, **kwargs) + + +class IPPrefix(Bucket[_R]): + """ + A bucket aggregation that groups documents based on the network or + sub-network of an IP address. + + :arg field: (required) The IP address field to aggregation on. The + field mapping type must be `ip`. + :arg prefix_length: (required) Length of the network prefix. For IPv4 + addresses the accepted range is [0, 32]. For IPv6 addresses the + accepted range is [0, 128]. + :arg is_ipv6: Defines whether the prefix applies to IPv6 addresses. + :arg append_prefix_length: Defines whether the prefix length is + appended to IP address keys in the response. + :arg keyed: Defines whether buckets are returned as a hash rather than + an array in the response. + :arg min_doc_count: Minimum number of documents in a bucket for it to + be included in the response. Defaults to `1` if omitted. + """ + + name = "ip_prefix" + + def __init__( + self, + *, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + prefix_length: Union[int, "DefaultType"] = DEFAULT, + is_ipv6: Union[bool, "DefaultType"] = DEFAULT, + append_prefix_length: Union[bool, "DefaultType"] = DEFAULT, + keyed: Union[bool, "DefaultType"] = DEFAULT, + min_doc_count: Union[int, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + field=field, + prefix_length=prefix_length, + is_ipv6=is_ipv6, + append_prefix_length=append_prefix_length, + keyed=keyed, + min_doc_count=min_doc_count, + **kwargs, + ) + + +class Inference(Pipeline[_R]): + """ + A parent pipeline aggregation which loads a pre-trained model and + performs inference on the collated result fields from the parent + bucket aggregation. + + :arg model_id: (required) The ID or alias for the trained model. + :arg inference_config: Contains the inference type and its options. + :arg format: `DecimalFormat` pattern for the output value. If + specified, the formatted value is returned in the aggregation’s + `value_as_string` property. + :arg gap_policy: Policy to apply when gaps are found in the data. + Defaults to `skip` if omitted. + :arg buckets_path: Path to the buckets that contain one set of values + to correlate. + """ + + name = "inference" + + def __init__( + self, + *, + model_id: Union[str, "DefaultType"] = DEFAULT, + inference_config: Union[ + "types.InferenceConfigContainer", Dict[str, Any], "DefaultType" + ] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + gap_policy: Union[ + Literal["skip", "insert_zeros", "keep_values"], "DefaultType" + ] = DEFAULT, + buckets_path: Union[ + str, Sequence[str], Mapping[str, str], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + model_id=model_id, + inference_config=inference_config, + format=format, + gap_policy=gap_policy, + buckets_path=buckets_path, + **kwargs, + ) + + +class Line(Agg[_R]): + """ + :arg point: (required) The name of the geo_point field. + :arg sort: (required) The name of the numeric field to use as the sort + key for ordering the points. When the `geo_line` aggregation is + nested inside a `time_series` aggregation, this field defaults to + `@timestamp`, and any other value will result in error. + :arg include_sort: When `true`, returns an additional array of the + sort values in the feature properties. + :arg sort_order: The order in which the line is sorted (ascending or + descending). Defaults to `asc` if omitted. + :arg size: The maximum length of the line represented in the + aggregation. Valid sizes are between 1 and 10000. Defaults to + `10000` if omitted. + """ + + name = "line" + + def __init__( + self, + *, + point: Union["types.GeoLinePoint", Dict[str, Any], "DefaultType"] = DEFAULT, + sort: Union["types.GeoLineSort", Dict[str, Any], "DefaultType"] = DEFAULT, + include_sort: Union[bool, "DefaultType"] = DEFAULT, + sort_order: Union[Literal["asc", "desc"], "DefaultType"] = DEFAULT, + size: Union[int, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + point=point, + sort=sort, + include_sort=include_sort, + sort_order=sort_order, + size=size, + **kwargs, + ) + + +class MatrixStats(Agg[_R]): + """ + A numeric aggregation that computes the following statistics over a + set of document fields: `count`, `mean`, `variance`, `skewness`, + `kurtosis`, `covariance`, and `covariance`. + + :arg mode: Array value the aggregation will use for array or multi- + valued fields. Defaults to `avg` if omitted. + :arg fields: An array of fields for computing the statistics. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + """ + + name = "matrix_stats" + + def __init__( + self, + *, + mode: Union[ + Literal["min", "max", "sum", "avg", "median"], "DefaultType" + ] = DEFAULT, + fields: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + missing: Union[ + Mapping[Union[str, "InstrumentedField"], float], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__(mode=mode, fields=fields, missing=missing, **kwargs) + + +class Max(Agg[_R]): + """ + A single-value metrics aggregation that returns the maximum value + among the numeric values extracted from the aggregated documents. + + :arg format: + :arg field: The field on which to run the aggregation. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg script: + """ + + name = "max" + + def __init__( + self, + *, + format: Union[str, "DefaultType"] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + format=format, field=field, missing=missing, script=script, **kwargs + ) + + +class MaxBucket(Pipeline[_R]): + """ + A sibling pipeline aggregation which identifies the bucket(s) with the + maximum value of a specified metric in a sibling aggregation and + outputs both the value and the key(s) of the bucket(s). + + :arg format: `DecimalFormat` pattern for the output value. If + specified, the formatted value is returned in the aggregation’s + `value_as_string` property. + :arg gap_policy: Policy to apply when gaps are found in the data. + Defaults to `skip` if omitted. + :arg buckets_path: Path to the buckets that contain one set of values + to correlate. + """ + + name = "max_bucket" + + def __init__( + self, + *, + format: Union[str, "DefaultType"] = DEFAULT, + gap_policy: Union[ + Literal["skip", "insert_zeros", "keep_values"], "DefaultType" + ] = DEFAULT, + buckets_path: Union[ + str, Sequence[str], Mapping[str, str], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs + ) + + +class MedianAbsoluteDeviation(Agg[_R]): + """ + A single-value aggregation that approximates the median absolute + deviation of its search results. + + :arg compression: Limits the maximum number of nodes used by the + underlying TDigest algorithm to `20 * compression`, enabling + control of memory usage and approximation error. Defaults to + `1000` if omitted. + :arg format: + :arg field: The field on which to run the aggregation. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg script: + """ + + name = "median_absolute_deviation" + + def __init__( + self, + *, + compression: Union[float, "DefaultType"] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + compression=compression, + format=format, + field=field, + missing=missing, + script=script, + **kwargs, + ) + + +class Min(Agg[_R]): + """ + A single-value metrics aggregation that returns the minimum value + among numeric values extracted from the aggregated documents. + + :arg format: + :arg field: The field on which to run the aggregation. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg script: + """ + + name = "min" + + def __init__( + self, + *, + format: Union[str, "DefaultType"] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + format=format, field=field, missing=missing, script=script, **kwargs + ) + + +class MinBucket(Pipeline[_R]): + """ + A sibling pipeline aggregation which identifies the bucket(s) with the + minimum value of a specified metric in a sibling aggregation and + outputs both the value and the key(s) of the bucket(s). + + :arg format: `DecimalFormat` pattern for the output value. If + specified, the formatted value is returned in the aggregation’s + `value_as_string` property. + :arg gap_policy: Policy to apply when gaps are found in the data. + Defaults to `skip` if omitted. + :arg buckets_path: Path to the buckets that contain one set of values + to correlate. + """ + + name = "min_bucket" + + def __init__( + self, + *, + format: Union[str, "DefaultType"] = DEFAULT, + gap_policy: Union[ + Literal["skip", "insert_zeros", "keep_values"], "DefaultType" + ] = DEFAULT, + buckets_path: Union[ + str, Sequence[str], Mapping[str, str], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs + ) + + +class Missing(Bucket[_R]): + """ + A field data based single bucket aggregation, that creates a bucket of + all documents in the current document set context that are missing a + field value (effectively, missing a field or having the configured + NULL value set). + + :arg field: The name of the field. + :arg missing: + """ + + name = "missing" + + def __init__( + self, + *, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(field=field, missing=missing, **kwargs) + + +class MovingAvg(Pipeline[_R]): + """ """ + + name = "moving_avg" + + def __init__(self, **kwargs: Any): + super().__init__(**kwargs) + + +class LinearMovingAverageAggregation(MovingAvg[_R]): + """ + :arg model: (required) + :arg settings: (required) + :arg minimize: + :arg predict: + :arg window: + :arg format: `DecimalFormat` pattern for the output value. If + specified, the formatted value is returned in the aggregation’s + `value_as_string` property. + :arg gap_policy: Policy to apply when gaps are found in the data. + Defaults to `skip` if omitted. + :arg buckets_path: Path to the buckets that contain one set of values + to correlate. + """ + + def __init__( + self, + *, + model: Any = DEFAULT, + settings: Union["types.EmptyObject", Dict[str, Any], "DefaultType"] = DEFAULT, + minimize: Union[bool, "DefaultType"] = DEFAULT, + predict: Union[int, "DefaultType"] = DEFAULT, + window: Union[int, "DefaultType"] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + gap_policy: Union[ + Literal["skip", "insert_zeros", "keep_values"], "DefaultType" + ] = DEFAULT, + buckets_path: Union[ + str, Sequence[str], Mapping[str, str], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + model=model, + settings=settings, + minimize=minimize, + predict=predict, + window=window, + format=format, + gap_policy=gap_policy, + buckets_path=buckets_path, + **kwargs, + ) + + +class SimpleMovingAverageAggregation(MovingAvg[_R]): + """ + :arg model: (required) + :arg settings: (required) + :arg minimize: + :arg predict: + :arg window: + :arg format: `DecimalFormat` pattern for the output value. If + specified, the formatted value is returned in the aggregation’s + `value_as_string` property. + :arg gap_policy: Policy to apply when gaps are found in the data. + Defaults to `skip` if omitted. + :arg buckets_path: Path to the buckets that contain one set of values + to correlate. + """ + + def __init__( + self, + *, + model: Any = DEFAULT, + settings: Union["types.EmptyObject", Dict[str, Any], "DefaultType"] = DEFAULT, + minimize: Union[bool, "DefaultType"] = DEFAULT, + predict: Union[int, "DefaultType"] = DEFAULT, + window: Union[int, "DefaultType"] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + gap_policy: Union[ + Literal["skip", "insert_zeros", "keep_values"], "DefaultType" + ] = DEFAULT, + buckets_path: Union[ + str, Sequence[str], Mapping[str, str], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + model=model, + settings=settings, + minimize=minimize, + predict=predict, + window=window, + format=format, + gap_policy=gap_policy, + buckets_path=buckets_path, + **kwargs, + ) + + +class EwmaMovingAverageAggregation(MovingAvg[_R]): + """ + :arg model: (required) + :arg settings: (required) + :arg minimize: + :arg predict: + :arg window: + :arg format: `DecimalFormat` pattern for the output value. If + specified, the formatted value is returned in the aggregation’s + `value_as_string` property. + :arg gap_policy: Policy to apply when gaps are found in the data. + Defaults to `skip` if omitted. + :arg buckets_path: Path to the buckets that contain one set of values + to correlate. + """ + + def __init__( + self, + *, + model: Any = DEFAULT, + settings: Union[ + "types.EwmaModelSettings", Dict[str, Any], "DefaultType" + ] = DEFAULT, + minimize: Union[bool, "DefaultType"] = DEFAULT, + predict: Union[int, "DefaultType"] = DEFAULT, + window: Union[int, "DefaultType"] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + gap_policy: Union[ + Literal["skip", "insert_zeros", "keep_values"], "DefaultType" + ] = DEFAULT, + buckets_path: Union[ + str, Sequence[str], Mapping[str, str], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + model=model, + settings=settings, + minimize=minimize, + predict=predict, + window=window, + format=format, + gap_policy=gap_policy, + buckets_path=buckets_path, + **kwargs, + ) + + +class HoltMovingAverageAggregation(MovingAvg[_R]): + """ + :arg model: (required) + :arg settings: (required) + :arg minimize: + :arg predict: + :arg window: + :arg format: `DecimalFormat` pattern for the output value. If + specified, the formatted value is returned in the aggregation’s + `value_as_string` property. + :arg gap_policy: Policy to apply when gaps are found in the data. + Defaults to `skip` if omitted. + :arg buckets_path: Path to the buckets that contain one set of values + to correlate. + """ + + def __init__( + self, + *, + model: Any = DEFAULT, + settings: Union[ + "types.HoltLinearModelSettings", Dict[str, Any], "DefaultType" + ] = DEFAULT, + minimize: Union[bool, "DefaultType"] = DEFAULT, + predict: Union[int, "DefaultType"] = DEFAULT, + window: Union[int, "DefaultType"] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + gap_policy: Union[ + Literal["skip", "insert_zeros", "keep_values"], "DefaultType" + ] = DEFAULT, + buckets_path: Union[ + str, Sequence[str], Mapping[str, str], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + model=model, + settings=settings, + minimize=minimize, + predict=predict, + window=window, + format=format, + gap_policy=gap_policy, + buckets_path=buckets_path, + **kwargs, + ) + + +class HoltWintersMovingAverageAggregation(MovingAvg[_R]): + """ + :arg model: (required) + :arg settings: (required) + :arg minimize: + :arg predict: + :arg window: + :arg format: `DecimalFormat` pattern for the output value. If + specified, the formatted value is returned in the aggregation’s + `value_as_string` property. + :arg gap_policy: Policy to apply when gaps are found in the data. + Defaults to `skip` if omitted. + :arg buckets_path: Path to the buckets that contain one set of values + to correlate. + """ + + def __init__( + self, + *, + model: Any = DEFAULT, + settings: Union[ + "types.HoltWintersModelSettings", Dict[str, Any], "DefaultType" + ] = DEFAULT, + minimize: Union[bool, "DefaultType"] = DEFAULT, + predict: Union[int, "DefaultType"] = DEFAULT, + window: Union[int, "DefaultType"] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + gap_policy: Union[ + Literal["skip", "insert_zeros", "keep_values"], "DefaultType" + ] = DEFAULT, + buckets_path: Union[ + str, Sequence[str], Mapping[str, str], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + model=model, + settings=settings, + minimize=minimize, + predict=predict, + window=window, + format=format, + gap_policy=gap_policy, + buckets_path=buckets_path, + **kwargs, + ) + + +class MovingPercentiles(Pipeline[_R]): + """ + Given an ordered series of percentiles, "slides" a window across those + percentiles and computes cumulative percentiles. + + :arg window: The size of window to "slide" across the histogram. + :arg shift: By default, the window consists of the last n values + excluding the current bucket. Increasing `shift` by 1, moves the + starting window position by 1 to the right. + :arg keyed: + :arg format: `DecimalFormat` pattern for the output value. If + specified, the formatted value is returned in the aggregation’s + `value_as_string` property. + :arg gap_policy: Policy to apply when gaps are found in the data. + Defaults to `skip` if omitted. + :arg buckets_path: Path to the buckets that contain one set of values + to correlate. + """ + + name = "moving_percentiles" + + def __init__( + self, + *, + window: Union[int, "DefaultType"] = DEFAULT, + shift: Union[int, "DefaultType"] = DEFAULT, + keyed: Union[bool, "DefaultType"] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + gap_policy: Union[ + Literal["skip", "insert_zeros", "keep_values"], "DefaultType" + ] = DEFAULT, + buckets_path: Union[ + str, Sequence[str], Mapping[str, str], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + window=window, + shift=shift, + keyed=keyed, + format=format, + gap_policy=gap_policy, + buckets_path=buckets_path, + **kwargs, + ) + + +class MovingFn(Pipeline[_R]): + """ + Given an ordered series of data, "slides" a window across the data and + runs a custom script on each window of data. For convenience, a number + of common functions are predefined such as `min`, `max`, and moving + averages. + + :arg script: The script that should be executed on each window of + data. + :arg shift: By default, the window consists of the last n values + excluding the current bucket. Increasing `shift` by 1, moves the + starting window position by 1 to the right. + :arg window: The size of window to "slide" across the histogram. + :arg format: `DecimalFormat` pattern for the output value. If + specified, the formatted value is returned in the aggregation’s + `value_as_string` property. + :arg gap_policy: Policy to apply when gaps are found in the data. + Defaults to `skip` if omitted. + :arg buckets_path: Path to the buckets that contain one set of values + to correlate. + """ + + name = "moving_fn" + + def __init__( + self, + *, + script: Union[str, "DefaultType"] = DEFAULT, + shift: Union[int, "DefaultType"] = DEFAULT, + window: Union[int, "DefaultType"] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + gap_policy: Union[ + Literal["skip", "insert_zeros", "keep_values"], "DefaultType" + ] = DEFAULT, + buckets_path: Union[ + str, Sequence[str], Mapping[str, str], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + script=script, + shift=shift, + window=window, + format=format, + gap_policy=gap_policy, + buckets_path=buckets_path, + **kwargs, + ) + + +class MultiTerms(Bucket[_R]): + """ + A multi-bucket value source based aggregation where buckets are + dynamically built - one per unique set of values. + + :arg terms: (required) The field from which to generate sets of terms. + :arg collect_mode: Specifies the strategy for data collection. + Defaults to `breadth_first` if omitted. + :arg order: Specifies the sort order of the buckets. Defaults to + sorting by descending document count. + :arg min_doc_count: The minimum number of documents in a bucket for it + to be returned. Defaults to `1` if omitted. + :arg shard_min_doc_count: The minimum number of documents in a bucket + on each shard for it to be returned. Defaults to `1` if omitted. + :arg shard_size: The number of candidate terms produced by each shard. + By default, `shard_size` will be automatically estimated based on + the number of shards and the `size` parameter. + :arg show_term_doc_count_error: Calculates the doc count error on per + term basis. + :arg size: The number of term buckets should be returned out of the + overall terms list. Defaults to `10` if omitted. + """ + + name = "multi_terms" + + def __init__( + self, + *, + terms: Union[ + Sequence["types.MultiTermLookup"], Sequence[Dict[str, Any]], "DefaultType" + ] = DEFAULT, + collect_mode: Union[ + Literal["depth_first", "breadth_first"], "DefaultType" + ] = DEFAULT, + order: Union[ + Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]], + Sequence[Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]]], + "DefaultType", + ] = DEFAULT, + min_doc_count: Union[int, "DefaultType"] = DEFAULT, + shard_min_doc_count: Union[int, "DefaultType"] = DEFAULT, + shard_size: Union[int, "DefaultType"] = DEFAULT, + show_term_doc_count_error: Union[bool, "DefaultType"] = DEFAULT, + size: Union[int, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + terms=terms, + collect_mode=collect_mode, + order=order, + min_doc_count=min_doc_count, + shard_min_doc_count=shard_min_doc_count, + shard_size=shard_size, + show_term_doc_count_error=show_term_doc_count_error, + size=size, + **kwargs, + ) + + +class Nested(Bucket[_R]): + """ + A special single bucket aggregation that enables aggregating nested + documents. + + :arg path: The path to the field of type `nested`. + """ + + name = "nested" + + def __init__( + self, + path: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(path=path, **kwargs) + + +class Normalize(Pipeline[_R]): + """ + A parent pipeline aggregation which calculates the specific + normalized/rescaled value for a specific bucket value. + + :arg method: The specific method to apply. + :arg format: `DecimalFormat` pattern for the output value. If + specified, the formatted value is returned in the aggregation’s + `value_as_string` property. + :arg gap_policy: Policy to apply when gaps are found in the data. + Defaults to `skip` if omitted. + :arg buckets_path: Path to the buckets that contain one set of values + to correlate. + """ + + name = "normalize" + + def __init__( + self, + *, + method: Union[ + Literal[ + "rescale_0_1", + "rescale_0_100", + "percent_of_sum", + "mean", + "z-score", + "softmax", + ], + "DefaultType", + ] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + gap_policy: Union[ + Literal["skip", "insert_zeros", "keep_values"], "DefaultType" + ] = DEFAULT, + buckets_path: Union[ + str, Sequence[str], Mapping[str, str], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + method=method, + format=format, + gap_policy=gap_policy, + buckets_path=buckets_path, + **kwargs, + ) + + +class Parent(Bucket[_R]): + """ + A special single bucket aggregation that selects parent documents that + have the specified type, as defined in a `join` field. + + :arg type: The child type that should be selected. + """ + + name = "parent" + + def __init__(self, type: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any): + super().__init__(type=type, **kwargs) + + +class PercentileRanks(Agg[_R]): + """ + A multi-value metrics aggregation that calculates one or more + percentile ranks over numeric values extracted from the aggregated + documents. + + :arg keyed: By default, the aggregation associates a unique string key + with each bucket and returns the ranges as a hash rather than an + array. Set to `false` to disable this behavior. Defaults to `True` + if omitted. + :arg values: An array of values for which to calculate the percentile + ranks. + :arg hdr: Uses the alternative High Dynamic Range Histogram algorithm + to calculate percentile ranks. + :arg tdigest: Sets parameters for the default TDigest algorithm used + to calculate percentile ranks. + :arg format: + :arg field: The field on which to run the aggregation. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg script: + """ + + name = "percentile_ranks" + + def __init__( + self, + *, + keyed: Union[bool, "DefaultType"] = DEFAULT, + values: Union[Sequence[float], None, "DefaultType"] = DEFAULT, + hdr: Union["types.HdrMethod", Dict[str, Any], "DefaultType"] = DEFAULT, + tdigest: Union["types.TDigest", Dict[str, Any], "DefaultType"] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + keyed=keyed, + values=values, + hdr=hdr, + tdigest=tdigest, + format=format, + field=field, + missing=missing, + script=script, + **kwargs, + ) + + +class Percentiles(Agg[_R]): + """ + A multi-value metrics aggregation that calculates one or more + percentiles over numeric values extracted from the aggregated + documents. + + :arg keyed: By default, the aggregation associates a unique string key + with each bucket and returns the ranges as a hash rather than an + array. Set to `false` to disable this behavior. Defaults to `True` + if omitted. + :arg percents: The percentiles to calculate. + :arg hdr: Uses the alternative High Dynamic Range Histogram algorithm + to calculate percentiles. + :arg tdigest: Sets parameters for the default TDigest algorithm used + to calculate percentiles. + :arg format: + :arg field: The field on which to run the aggregation. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg script: + """ + + name = "percentiles" + + def __init__( + self, + *, + keyed: Union[bool, "DefaultType"] = DEFAULT, + percents: Union[Sequence[float], "DefaultType"] = DEFAULT, + hdr: Union["types.HdrMethod", Dict[str, Any], "DefaultType"] = DEFAULT, + tdigest: Union["types.TDigest", Dict[str, Any], "DefaultType"] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + keyed=keyed, + percents=percents, + hdr=hdr, + tdigest=tdigest, + format=format, + field=field, + missing=missing, + script=script, + **kwargs, + ) + + +class PercentilesBucket(Pipeline[_R]): + """ + A sibling pipeline aggregation which calculates percentiles across all + bucket of a specified metric in a sibling aggregation. + + :arg percents: The list of percentiles to calculate. + :arg format: `DecimalFormat` pattern for the output value. If + specified, the formatted value is returned in the aggregation’s + `value_as_string` property. + :arg gap_policy: Policy to apply when gaps are found in the data. + Defaults to `skip` if omitted. + :arg buckets_path: Path to the buckets that contain one set of values + to correlate. + """ + + name = "percentiles_bucket" + + def __init__( + self, + *, + percents: Union[Sequence[float], "DefaultType"] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + gap_policy: Union[ + Literal["skip", "insert_zeros", "keep_values"], "DefaultType" + ] = DEFAULT, + buckets_path: Union[ + str, Sequence[str], Mapping[str, str], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + percents=percents, + format=format, + gap_policy=gap_policy, + buckets_path=buckets_path, + **kwargs, + ) + + +class Range(Bucket[_R]): + """ + A multi-bucket value source based aggregation that enables the user to + define a set of ranges - each representing a bucket. + + :arg field: The date field whose values are use to build ranges. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg ranges: An array of ranges used to bucket documents. + :arg script: + :arg keyed: Set to `true` to associate a unique string key with each + bucket and return the ranges as a hash rather than an array. + :arg format: + """ + + name = "range" + + def __init__( + self, + *, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + missing: Union[int, "DefaultType"] = DEFAULT, + ranges: Union[ + Sequence["types.AggregationRange"], Sequence[Dict[str, Any]], "DefaultType" + ] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + keyed: Union[bool, "DefaultType"] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + field=field, + missing=missing, + ranges=ranges, + script=script, + keyed=keyed, + format=format, + **kwargs, + ) + + +class RareTerms(Bucket[_R]): + """ + A multi-bucket value source based aggregation which finds "rare" + terms — terms that are at the long-tail of the distribution and are + not frequent. + + :arg exclude: Terms that should be excluded from the aggregation. + :arg field: The field from which to return rare terms. + :arg include: Terms that should be included in the aggregation. + :arg max_doc_count: The maximum number of documents a term should + appear in. Defaults to `1` if omitted. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg precision: The precision of the internal CuckooFilters. Smaller + precision leads to better approximation, but higher memory usage. + Defaults to `0.001` if omitted. + :arg value_type: + """ + + name = "rare_terms" + + def __init__( + self, + *, + exclude: Union[str, Sequence[str], "DefaultType"] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + include: Union[ + str, Sequence[str], "types.TermsPartition", Dict[str, Any], "DefaultType" + ] = DEFAULT, + max_doc_count: Union[int, "DefaultType"] = DEFAULT, + missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, + precision: Union[float, "DefaultType"] = DEFAULT, + value_type: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + exclude=exclude, + field=field, + include=include, + max_doc_count=max_doc_count, + missing=missing, + precision=precision, + value_type=value_type, + **kwargs, + ) + + +class Rate(Agg[_R]): + """ + Calculates a rate of documents or a field in each bucket. Can only be + used inside a `date_histogram` or `composite` aggregation. + + :arg unit: The interval used to calculate the rate. By default, the + interval of the `date_histogram` is used. + :arg mode: How the rate is calculated. Defaults to `sum` if omitted. + :arg format: + :arg field: The field on which to run the aggregation. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg script: + """ + + name = "rate" + + def __init__( + self, + *, + unit: Union[ + Literal[ + "second", "minute", "hour", "day", "week", "month", "quarter", "year" + ], + "DefaultType", + ] = DEFAULT, + mode: Union[Literal["sum", "value_count"], "DefaultType"] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + unit=unit, + mode=mode, + format=format, + field=field, + missing=missing, + script=script, + **kwargs, + ) + + +class ReverseNested(Bucket[_R]): + """ + A special single bucket aggregation that enables aggregating on parent + documents from nested documents. Should only be defined inside a + `nested` aggregation. + + :arg path: Defines the nested object field that should be joined back + to. The default is empty, which means that it joins back to the + root/main document level. + """ + + name = "reverse_nested" + + def __init__( + self, + path: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(path=path, **kwargs) + + +class RandomSampler(Bucket[_R]): + """ + A single bucket aggregation that randomly includes documents in the + aggregated results. Sampling provides significant speed improvement at + the cost of accuracy. + + :arg probability: (required) The probability that a document will be + included in the aggregated data. Must be greater than 0, less than + 0.5, or exactly 1. The lower the probability, the fewer documents + are matched. + :arg seed: The seed to generate the random sampling of documents. When + a seed is provided, the random subset of documents is the same + between calls. + :arg shard_seed: When combined with seed, setting shard_seed ensures + 100% consistent sampling over shards where data is exactly the + same. + """ + + name = "random_sampler" + + def __init__( + self, + *, + probability: Union[float, "DefaultType"] = DEFAULT, + seed: Union[int, "DefaultType"] = DEFAULT, + shard_seed: Union[int, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + probability=probability, seed=seed, shard_seed=shard_seed, **kwargs + ) + + +class Sampler(Bucket[_R]): + """ + A filtering aggregation used to limit any sub aggregations' processing + to a sample of the top-scoring documents. + + :arg shard_size: Limits how many top-scoring documents are collected + in the sample processed on each shard. Defaults to `100` if + omitted. + """ + + name = "sampler" + + def __init__(self, shard_size: Union[int, "DefaultType"] = DEFAULT, **kwargs: Any): + super().__init__(shard_size=shard_size, **kwargs) + + +class ScriptedMetric(Agg[_R]): + """ + A metric aggregation that uses scripts to provide a metric output. + + :arg combine_script: Runs once on each shard after document collection + is complete. Allows the aggregation to consolidate the state + returned from each shard. + :arg init_script: Runs prior to any collection of documents. Allows + the aggregation to set up any initial state. + :arg map_script: Run once per document collected. If no + `combine_script` is specified, the resulting state needs to be + stored in the `state` object. + :arg params: A global object with script parameters for `init`, `map` + and `combine` scripts. It is shared between the scripts. + :arg reduce_script: Runs once on the coordinating node after all + shards have returned their results. The script is provided with + access to a variable `states`, which is an array of the result of + the `combine_script` on each shard. + :arg field: The field on which to run the aggregation. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg script: + """ + + name = "scripted_metric" + + def __init__( + self, + *, + combine_script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + init_script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + map_script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + params: Union[Mapping[str, Any], "DefaultType"] = DEFAULT, + reduce_script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + combine_script=combine_script, + init_script=init_script, + map_script=map_script, + params=params, + reduce_script=reduce_script, + field=field, + missing=missing, + script=script, + **kwargs, + ) + + +class SerialDiff(Pipeline[_R]): + """ + An aggregation that subtracts values in a time series from themselves + at different time lags or periods. + + :arg lag: The historical bucket to subtract from the current value. + Must be a positive, non-zero integer. + :arg format: `DecimalFormat` pattern for the output value. If + specified, the formatted value is returned in the aggregation’s + `value_as_string` property. + :arg gap_policy: Policy to apply when gaps are found in the data. + Defaults to `skip` if omitted. + :arg buckets_path: Path to the buckets that contain one set of values + to correlate. + """ + + name = "serial_diff" + + def __init__( + self, + *, + lag: Union[int, "DefaultType"] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + gap_policy: Union[ + Literal["skip", "insert_zeros", "keep_values"], "DefaultType" + ] = DEFAULT, + buckets_path: Union[ + str, Sequence[str], Mapping[str, str], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + lag=lag, + format=format, + gap_policy=gap_policy, + buckets_path=buckets_path, + **kwargs, + ) + + +class SignificantTerms(Bucket[_R]): + """ + Returns interesting or unusual occurrences of terms in a set. + + :arg background_filter: A background filter that can be used to focus + in on significant terms within a narrower context, instead of the + entire index. + :arg chi_square: Use Chi square, as described in "Information + Retrieval", Manning et al., Chapter 13.5.2, as the significance + score. + :arg exclude: Terms to exclude. + :arg execution_hint: Mechanism by which the aggregation should be + executed: using field values directly or using global ordinals. + :arg field: The field from which to return significant terms. + :arg gnd: Use Google normalized distance as described in "The Google + Similarity Distance", Cilibrasi and Vitanyi, 2007, as the + significance score. + :arg include: Terms to include. + :arg jlh: Use JLH score as the significance score. + :arg min_doc_count: Only return terms that are found in more than + `min_doc_count` hits. Defaults to `3` if omitted. + :arg mutual_information: Use mutual information as described in + "Information Retrieval", Manning et al., Chapter 13.5.1, as the + significance score. + :arg percentage: A simple calculation of the number of documents in + the foreground sample with a term divided by the number of + documents in the background with the term. + :arg script_heuristic: Customized score, implemented via a script. + :arg shard_min_doc_count: Regulates the certainty a shard has if the + term should actually be added to the candidate list or not with + respect to the `min_doc_count`. Terms will only be considered if + their local shard frequency within the set is higher than the + `shard_min_doc_count`. + :arg shard_size: Can be used to control the volumes of candidate terms + produced by each shard. By default, `shard_size` will be + automatically estimated based on the number of shards and the + `size` parameter. + :arg size: The number of buckets returned out of the overall terms + list. + """ + + name = "significant_terms" + _param_defs = { + "background_filter": {"type": "query"}, + } + + def __init__( + self, + *, + background_filter: Union[Query, "DefaultType"] = DEFAULT, + chi_square: Union[ + "types.ChiSquareHeuristic", Dict[str, Any], "DefaultType" + ] = DEFAULT, + exclude: Union[str, Sequence[str], "DefaultType"] = DEFAULT, + execution_hint: Union[ + Literal[ + "map", + "global_ordinals", + "global_ordinals_hash", + "global_ordinals_low_cardinality", + ], + "DefaultType", + ] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + gnd: Union[ + "types.GoogleNormalizedDistanceHeuristic", Dict[str, Any], "DefaultType" + ] = DEFAULT, + include: Union[ + str, Sequence[str], "types.TermsPartition", Dict[str, Any], "DefaultType" + ] = DEFAULT, + jlh: Union["types.EmptyObject", Dict[str, Any], "DefaultType"] = DEFAULT, + min_doc_count: Union[int, "DefaultType"] = DEFAULT, + mutual_information: Union[ + "types.MutualInformationHeuristic", Dict[str, Any], "DefaultType" + ] = DEFAULT, + percentage: Union[ + "types.PercentageScoreHeuristic", Dict[str, Any], "DefaultType" + ] = DEFAULT, + script_heuristic: Union[ + "types.ScriptedHeuristic", Dict[str, Any], "DefaultType" + ] = DEFAULT, + shard_min_doc_count: Union[int, "DefaultType"] = DEFAULT, + shard_size: Union[int, "DefaultType"] = DEFAULT, + size: Union[int, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + background_filter=background_filter, + chi_square=chi_square, + exclude=exclude, + execution_hint=execution_hint, + field=field, + gnd=gnd, + include=include, + jlh=jlh, + min_doc_count=min_doc_count, + mutual_information=mutual_information, + percentage=percentage, + script_heuristic=script_heuristic, + shard_min_doc_count=shard_min_doc_count, + shard_size=shard_size, + size=size, + **kwargs, + ) + + +class SignificantText(Bucket[_R]): + """ + Returns interesting or unusual occurrences of free-text terms in a + set. + + :arg background_filter: A background filter that can be used to focus + in on significant terms within a narrower context, instead of the + entire index. + :arg chi_square: Use Chi square, as described in "Information + Retrieval", Manning et al., Chapter 13.5.2, as the significance + score. + :arg exclude: Values to exclude. + :arg execution_hint: Determines whether the aggregation will use field + values directly or global ordinals. + :arg field: The field from which to return significant text. + :arg filter_duplicate_text: Whether to out duplicate text to deal with + noisy data. + :arg gnd: Use Google normalized distance as described in "The Google + Similarity Distance", Cilibrasi and Vitanyi, 2007, as the + significance score. + :arg include: Values to include. + :arg jlh: Use JLH score as the significance score. + :arg min_doc_count: Only return values that are found in more than + `min_doc_count` hits. Defaults to `3` if omitted. + :arg mutual_information: Use mutual information as described in + "Information Retrieval", Manning et al., Chapter 13.5.1, as the + significance score. + :arg percentage: A simple calculation of the number of documents in + the foreground sample with a term divided by the number of + documents in the background with the term. + :arg script_heuristic: Customized score, implemented via a script. + :arg shard_min_doc_count: Regulates the certainty a shard has if the + values should actually be added to the candidate list or not with + respect to the min_doc_count. Values will only be considered if + their local shard frequency within the set is higher than the + `shard_min_doc_count`. + :arg shard_size: The number of candidate terms produced by each shard. + By default, `shard_size` will be automatically estimated based on + the number of shards and the `size` parameter. + :arg size: The number of buckets returned out of the overall terms + list. + :arg source_fields: Overrides the JSON `_source` fields from which + text will be analyzed. + """ + + name = "significant_text" + _param_defs = { + "background_filter": {"type": "query"}, + } + + def __init__( + self, + *, + background_filter: Union[Query, "DefaultType"] = DEFAULT, + chi_square: Union[ + "types.ChiSquareHeuristic", Dict[str, Any], "DefaultType" + ] = DEFAULT, + exclude: Union[str, Sequence[str], "DefaultType"] = DEFAULT, + execution_hint: Union[ + Literal[ + "map", + "global_ordinals", + "global_ordinals_hash", + "global_ordinals_low_cardinality", + ], + "DefaultType", + ] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + filter_duplicate_text: Union[bool, "DefaultType"] = DEFAULT, + gnd: Union[ + "types.GoogleNormalizedDistanceHeuristic", Dict[str, Any], "DefaultType" + ] = DEFAULT, + include: Union[ + str, Sequence[str], "types.TermsPartition", Dict[str, Any], "DefaultType" + ] = DEFAULT, + jlh: Union["types.EmptyObject", Dict[str, Any], "DefaultType"] = DEFAULT, + min_doc_count: Union[int, "DefaultType"] = DEFAULT, + mutual_information: Union[ + "types.MutualInformationHeuristic", Dict[str, Any], "DefaultType" + ] = DEFAULT, + percentage: Union[ + "types.PercentageScoreHeuristic", Dict[str, Any], "DefaultType" + ] = DEFAULT, + script_heuristic: Union[ + "types.ScriptedHeuristic", Dict[str, Any], "DefaultType" + ] = DEFAULT, + shard_min_doc_count: Union[int, "DefaultType"] = DEFAULT, + shard_size: Union[int, "DefaultType"] = DEFAULT, + size: Union[int, "DefaultType"] = DEFAULT, + source_fields: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + background_filter=background_filter, + chi_square=chi_square, + exclude=exclude, + execution_hint=execution_hint, + field=field, + filter_duplicate_text=filter_duplicate_text, + gnd=gnd, + include=include, + jlh=jlh, + min_doc_count=min_doc_count, + mutual_information=mutual_information, + percentage=percentage, + script_heuristic=script_heuristic, + shard_min_doc_count=shard_min_doc_count, + shard_size=shard_size, + size=size, + source_fields=source_fields, + **kwargs, + ) + + +class Stats(Agg[_R]): + """ + A multi-value metrics aggregation that computes stats over numeric + values extracted from the aggregated documents. + + :arg format: + :arg field: The field on which to run the aggregation. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg script: + """ + + name = "stats" + + def __init__( + self, + *, + format: Union[str, "DefaultType"] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + format=format, field=field, missing=missing, script=script, **kwargs + ) + + +class StatsBucket(Pipeline[_R]): + """ + A sibling pipeline aggregation which calculates a variety of stats + across all bucket of a specified metric in a sibling aggregation. + + :arg format: `DecimalFormat` pattern for the output value. If + specified, the formatted value is returned in the aggregation’s + `value_as_string` property. + :arg gap_policy: Policy to apply when gaps are found in the data. + Defaults to `skip` if omitted. + :arg buckets_path: Path to the buckets that contain one set of values + to correlate. + """ + + name = "stats_bucket" + + def __init__( + self, + *, + format: Union[str, "DefaultType"] = DEFAULT, + gap_policy: Union[ + Literal["skip", "insert_zeros", "keep_values"], "DefaultType" + ] = DEFAULT, + buckets_path: Union[ + str, Sequence[str], Mapping[str, str], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs + ) + + +class StringStats(Agg[_R]): + """ + A multi-value metrics aggregation that computes statistics over string + values extracted from the aggregated documents. + + :arg show_distribution: Shows the probability distribution for all + characters. + :arg field: The field on which to run the aggregation. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg script: + """ + + name = "string_stats" + + def __init__( + self, + *, + show_distribution: Union[bool, "DefaultType"] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + show_distribution=show_distribution, + field=field, + missing=missing, + script=script, + **kwargs, + ) + + +class Sum(Agg[_R]): + """ + A single-value metrics aggregation that sums numeric values that are + extracted from the aggregated documents. + + :arg format: + :arg field: The field on which to run the aggregation. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg script: + """ + + name = "sum" + + def __init__( + self, + *, + format: Union[str, "DefaultType"] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + format=format, field=field, missing=missing, script=script, **kwargs + ) + + +class SumBucket(Pipeline[_R]): + """ + A sibling pipeline aggregation which calculates the sum of a specified + metric across all buckets in a sibling aggregation. + + :arg format: `DecimalFormat` pattern for the output value. If + specified, the formatted value is returned in the aggregation’s + `value_as_string` property. + :arg gap_policy: Policy to apply when gaps are found in the data. + Defaults to `skip` if omitted. + :arg buckets_path: Path to the buckets that contain one set of values + to correlate. + """ + + name = "sum_bucket" + + def __init__( + self, + *, + format: Union[str, "DefaultType"] = DEFAULT, + gap_policy: Union[ + Literal["skip", "insert_zeros", "keep_values"], "DefaultType" + ] = DEFAULT, + buckets_path: Union[ + str, Sequence[str], Mapping[str, str], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs + ) + + +class Terms(Bucket[_R]): + """ + A multi-bucket value source based aggregation where buckets are + dynamically built - one per unique value. + + :arg collect_mode: Determines how child aggregations should be + calculated: breadth-first or depth-first. + :arg exclude: Values to exclude. Accepts regular expressions and + partitions. + :arg execution_hint: Determines whether the aggregation will use field + values directly or global ordinals. + :arg field: The field from which to return terms. + :arg include: Values to include. Accepts regular expressions and + partitions. + :arg min_doc_count: Only return values that are found in more than + `min_doc_count` hits. Defaults to `1` if omitted. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg missing_order: + :arg missing_bucket: + :arg value_type: Coerced unmapped fields into the specified type. + :arg order: Specifies the sort order of the buckets. Defaults to + sorting by descending document count. + :arg script: + :arg shard_min_doc_count: Regulates the certainty a shard has if the + term should actually be added to the candidate list or not with + respect to the `min_doc_count`. Terms will only be considered if + their local shard frequency within the set is higher than the + `shard_min_doc_count`. + :arg shard_size: The number of candidate terms produced by each shard. + By default, `shard_size` will be automatically estimated based on + the number of shards and the `size` parameter. + :arg show_term_doc_count_error: Set to `true` to return the + `doc_count_error_upper_bound`, which is an upper bound to the + error on the `doc_count` returned by each shard. + :arg size: The number of buckets returned out of the overall terms + list. Defaults to `10` if omitted. + :arg format: + """ + + name = "terms" + + def __init__( + self, + *, + collect_mode: Union[ + Literal["depth_first", "breadth_first"], "DefaultType" + ] = DEFAULT, + exclude: Union[str, Sequence[str], "DefaultType"] = DEFAULT, + execution_hint: Union[ + Literal[ + "map", + "global_ordinals", + "global_ordinals_hash", + "global_ordinals_low_cardinality", + ], + "DefaultType", + ] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + include: Union[ + str, Sequence[str], "types.TermsPartition", Dict[str, Any], "DefaultType" + ] = DEFAULT, + min_doc_count: Union[int, "DefaultType"] = DEFAULT, + missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, + missing_order: Union[ + Literal["first", "last", "default"], "DefaultType" + ] = DEFAULT, + missing_bucket: Union[bool, "DefaultType"] = DEFAULT, + value_type: Union[str, "DefaultType"] = DEFAULT, + order: Union[ + Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]], + Sequence[Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]]], + "DefaultType", + ] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + shard_min_doc_count: Union[int, "DefaultType"] = DEFAULT, + shard_size: Union[int, "DefaultType"] = DEFAULT, + show_term_doc_count_error: Union[bool, "DefaultType"] = DEFAULT, + size: Union[int, "DefaultType"] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + collect_mode=collect_mode, + exclude=exclude, + execution_hint=execution_hint, + field=field, + include=include, + min_doc_count=min_doc_count, + missing=missing, + missing_order=missing_order, + missing_bucket=missing_bucket, + value_type=value_type, + order=order, + script=script, + shard_min_doc_count=shard_min_doc_count, + shard_size=shard_size, + show_term_doc_count_error=show_term_doc_count_error, + size=size, + format=format, + **kwargs, + ) + + def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]: + return FieldBucketData(self, search, data) + + +class TimeSeries(Bucket[_R]): + """ + The time series aggregation queries data created using a time series + index. This is typically data such as metrics or other data streams + with a time component, and requires creating an index using the time + series mode. + + :arg size: The maximum number of results to return. Defaults to + `10000` if omitted. + :arg keyed: Set to `true` to associate a unique string key with each + bucket and returns the ranges as a hash rather than an array. + """ + + name = "time_series" + + def __init__( + self, + *, + size: Union[int, "DefaultType"] = DEFAULT, + keyed: Union[bool, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(size=size, keyed=keyed, **kwargs) + + +class TopHits(Agg[_R]): + """ + A metric aggregation that returns the top matching documents per + bucket. + + :arg docvalue_fields: Fields for which to return doc values. + :arg explain: If `true`, returns detailed information about score + computation as part of a hit. + :arg fields: Array of wildcard (*) patterns. The request returns + values for field names matching these patterns in the hits.fields + property of the response. + :arg from: Starting document offset. + :arg highlight: Specifies the highlighter to use for retrieving + highlighted snippets from one or more fields in the search + results. + :arg script_fields: Returns the result of one or more script + evaluations for each hit. + :arg size: The maximum number of top matching hits to return per + bucket. Defaults to `3` if omitted. + :arg sort: Sort order of the top matching hits. By default, the hits + are sorted by the score of the main query. + :arg _source: Selects the fields of the source that are returned. + :arg stored_fields: Returns values for the specified stored fields + (fields that use the `store` mapping option). + :arg track_scores: If `true`, calculates and returns document scores, + even if the scores are not used for sorting. + :arg version: If `true`, returns document version as part of a hit. + :arg seq_no_primary_term: If `true`, returns sequence number and + primary term of the last modification of each hit. + :arg field: The field on which to run the aggregation. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg script: + """ + + name = "top_hits" + + def __init__( + self, + *, + docvalue_fields: Union[ + Sequence["types.FieldAndFormat"], Sequence[Dict[str, Any]], "DefaultType" + ] = DEFAULT, + explain: Union[bool, "DefaultType"] = DEFAULT, + fields: Union[ + Sequence["types.FieldAndFormat"], Sequence[Dict[str, Any]], "DefaultType" + ] = DEFAULT, + from_: Union[int, "DefaultType"] = DEFAULT, + highlight: Union["types.Highlight", Dict[str, Any], "DefaultType"] = DEFAULT, + script_fields: Union[ + Mapping[str, "types.ScriptField"], Dict[str, Any], "DefaultType" + ] = DEFAULT, + size: Union[int, "DefaultType"] = DEFAULT, + sort: Union[ + Union[Union[str, "InstrumentedField"], "types.SortOptions"], + Sequence[Union[Union[str, "InstrumentedField"], "types.SortOptions"]], + Dict[str, Any], + "DefaultType", + ] = DEFAULT, + _source: Union[ + bool, "types.SourceFilter", Dict[str, Any], "DefaultType" + ] = DEFAULT, + stored_fields: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + track_scores: Union[bool, "DefaultType"] = DEFAULT, + version: Union[bool, "DefaultType"] = DEFAULT, + seq_no_primary_term: Union[bool, "DefaultType"] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + docvalue_fields=docvalue_fields, + explain=explain, + fields=fields, + from_=from_, + highlight=highlight, + script_fields=script_fields, + size=size, + sort=sort, + _source=_source, + stored_fields=stored_fields, + track_scores=track_scores, + version=version, + seq_no_primary_term=seq_no_primary_term, + field=field, + missing=missing, + script=script, + **kwargs, + ) + + def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]: + return TopHitsData(self, search, data) + + +class TTest(Agg[_R]): + """ + A metrics aggregation that performs a statistical hypothesis test in + which the test statistic follows a Student’s t-distribution under the + null hypothesis on numeric values extracted from the aggregated + documents. + + :arg a: Test population A. + :arg b: Test population B. + :arg type: The type of test. Defaults to `heteroscedastic` if omitted. + """ + + name = "t_test" + + def __init__( + self, + *, + a: Union["types.TestPopulation", Dict[str, Any], "DefaultType"] = DEFAULT, + b: Union["types.TestPopulation", Dict[str, Any], "DefaultType"] = DEFAULT, + type: Union[ + Literal["paired", "homoscedastic", "heteroscedastic"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__(a=a, b=b, type=type, **kwargs) + + +class TopMetrics(Agg[_R]): + """ + A metric aggregation that selects metrics from the document with the + largest or smallest sort value. + + :arg metrics: The fields of the top document to return. + :arg size: The number of top documents from which to return metrics. + Defaults to `1` if omitted. + :arg sort: The sort order of the documents. + :arg field: The field on which to run the aggregation. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg script: + """ + + name = "top_metrics" + + def __init__( + self, + *, + metrics: Union[ + "types.TopMetricsValue", + Sequence["types.TopMetricsValue"], + Sequence[Dict[str, Any]], + "DefaultType", + ] = DEFAULT, + size: Union[int, "DefaultType"] = DEFAULT, + sort: Union[ + Union[Union[str, "InstrumentedField"], "types.SortOptions"], + Sequence[Union[Union[str, "InstrumentedField"], "types.SortOptions"]], + Dict[str, Any], + "DefaultType", + ] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + metrics=metrics, + size=size, + sort=sort, + field=field, + missing=missing, + script=script, + **kwargs, + ) + + +class ValueCount(Agg[_R]): + """ + A single-value metrics aggregation that counts the number of values + that are extracted from the aggregated documents. + + :arg format: + :arg field: The field on which to run the aggregation. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + :arg script: + """ + + name = "value_count" + + def __init__( + self, + *, + format: Union[str, "DefaultType"] = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + format=format, field=field, missing=missing, script=script, **kwargs + ) + + +class WeightedAvg(Agg[_R]): + """ + A single-value metrics aggregation that computes the weighted average + of numeric values that are extracted from the aggregated documents. + + :arg format: A numeric response formatter. + :arg value: Configuration for the field that provides the values. + :arg value_type: + :arg weight: Configuration for the field or script that provides the + weights. + """ + + name = "weighted_avg" + + def __init__( + self, + *, + format: Union[str, "DefaultType"] = DEFAULT, + value: Union[ + "types.WeightedAverageValue", Dict[str, Any], "DefaultType" + ] = DEFAULT, + value_type: Union[ + Literal[ + "string", + "long", + "double", + "number", + "date", + "date_nanos", + "ip", + "numeric", + "geo_point", + "boolean", + ], + "DefaultType", + ] = DEFAULT, + weight: Union[ + "types.WeightedAverageValue", Dict[str, Any], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + format=format, value=value, value_type=value_type, weight=weight, **kwargs + ) + + +class VariableWidthHistogram(Bucket[_R]): + """ + A multi-bucket aggregation similar to the histogram, except instead of + providing an interval to use as the width of each bucket, a target + number of buckets is provided. + + :arg field: The name of the field. + :arg buckets: The target number of buckets. Defaults to `10` if + omitted. + :arg shard_size: The number of buckets that the coordinating node will + request from each shard. Defaults to `buckets * 50`. + :arg initial_buffer: Specifies the number of individual documents that + will be stored in memory on a shard before the initial bucketing + algorithm is run. Defaults to `min(10 * shard_size, 50000)`. + :arg script: + """ + + name = "variable_width_histogram" + + def __init__( + self, + *, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + buckets: Union[int, "DefaultType"] = DEFAULT, + shard_size: Union[int, "DefaultType"] = DEFAULT, + initial_buffer: Union[int, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + field=field, + buckets=buckets, + shard_size=shard_size, + initial_buffer=initial_buffer, + script=script, + **kwargs, + ) + + def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]: + return FieldBucketData(self, search, data) diff --git a/elasticsearch/dsl/analysis.py b/elasticsearch/dsl/analysis.py new file mode 100644 index 000000000..bc5ee6456 --- /dev/null +++ b/elasticsearch/dsl/analysis.py @@ -0,0 +1,341 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import Any, ClassVar, Dict, List, Optional, Union, cast + +from . import async_connections, connections +from .utils import AsyncUsingType, AttrDict, DslBase, UsingType, merge + +__all__ = ["tokenizer", "analyzer", "char_filter", "token_filter", "normalizer"] + + +class AnalysisBase: + @classmethod + def _type_shortcut( + cls, + name_or_instance: Union[str, "AnalysisBase"], + type: Optional[str] = None, + **kwargs: Any, + ) -> DslBase: + if isinstance(name_or_instance, cls): + if type or kwargs: + raise ValueError(f"{cls.__name__}() cannot accept parameters.") + return name_or_instance # type: ignore[return-value] + + if not (type or kwargs): + return cls.get_dsl_class("builtin")(name_or_instance) # type: ignore[no-any-return, attr-defined] + + return cls.get_dsl_class(type, "custom")( # type: ignore[no-any-return, attr-defined] + name_or_instance, type or "custom", **kwargs + ) + + +class CustomAnalysis: + name = "custom" + + def __init__(self, filter_name: str, builtin_type: str = "custom", **kwargs: Any): + self._builtin_type = builtin_type + self._name = filter_name + super().__init__(**kwargs) + + def to_dict(self) -> Dict[str, Any]: + # only name to present in lists + return self._name # type: ignore[return-value] + + def get_definition(self) -> Dict[str, Any]: + d = super().to_dict() # type: ignore[misc] + d = d.pop(self.name) + d["type"] = self._builtin_type + return d # type: ignore[no-any-return] + + +class CustomAnalysisDefinition(CustomAnalysis): + _type_name: str + _param_defs: ClassVar[Dict[str, Any]] + filter: List[Any] + char_filter: List[Any] + + def get_analysis_definition(self) -> Dict[str, Any]: + out = {self._type_name: {self._name: self.get_definition()}} + + t = cast("Tokenizer", getattr(self, "tokenizer", None)) + if "tokenizer" in self._param_defs and hasattr(t, "get_definition"): + out["tokenizer"] = {t._name: t.get_definition()} + + filters = { + f._name: f.get_definition() + for f in self.filter + if hasattr(f, "get_definition") + } + if filters: + out["filter"] = filters + + # any sub filter definitions like multiplexers etc? + for f in self.filter: + if hasattr(f, "get_analysis_definition"): + d = f.get_analysis_definition() + if d: + merge(out, d, True) + + char_filters = { + f._name: f.get_definition() + for f in self.char_filter + if hasattr(f, "get_definition") + } + if char_filters: + out["char_filter"] = char_filters + + return out + + +class BuiltinAnalysis: + name = "builtin" + + def __init__(self, name: str): + self._name = name + super().__init__() + + def to_dict(self) -> Dict[str, Any]: + # only name to present in lists + return self._name # type: ignore[return-value] + + +class Analyzer(AnalysisBase, DslBase): + _type_name = "analyzer" + name = "" + + +class BuiltinAnalyzer(BuiltinAnalysis, Analyzer): + def get_analysis_definition(self) -> Dict[str, Any]: + return {} + + +class CustomAnalyzer(CustomAnalysisDefinition, Analyzer): + _param_defs = { + "filter": {"type": "token_filter", "multi": True}, + "char_filter": {"type": "char_filter", "multi": True}, + "tokenizer": {"type": "tokenizer"}, + } + + def _get_body( + self, text: str, explain: bool, attributes: Optional[Dict[str, Any]] + ) -> Dict[str, Any]: + body = {"text": text, "explain": explain} + if attributes: + body["attributes"] = attributes + + definition = self.get_analysis_definition() + analyzer_def = self.get_definition() + + for section in ("tokenizer", "char_filter", "filter"): + if section not in analyzer_def: + continue + sec_def = definition.get(section, {}) + sec_names = analyzer_def[section] + + if isinstance(sec_names, str): + body[section] = sec_def.get(sec_names, sec_names) + else: + body[section] = [ + sec_def.get(sec_name, sec_name) for sec_name in sec_names + ] + + if self._builtin_type != "custom": + body["analyzer"] = self._builtin_type + + return body + + def simulate( + self, + text: str, + using: UsingType = "default", + explain: bool = False, + attributes: Optional[Dict[str, Any]] = None, + ) -> AttrDict[Any]: + """ + Use the Analyze API of elasticsearch to test the outcome of this analyzer. + + :arg text: Text to be analyzed + :arg using: connection alias to use, defaults to ``'default'`` + :arg explain: will output all token attributes for each token. You can + filter token attributes you want to output by setting ``attributes`` + option. + :arg attributes: if ``explain`` is specified, filter the token + attributes to return. + """ + es = connections.get_connection(using) + return AttrDict( + cast( + Dict[str, Any], + es.indices.analyze(body=self._get_body(text, explain, attributes)), + ) + ) + + async def async_simulate( + self, + text: str, + using: AsyncUsingType = "default", + explain: bool = False, + attributes: Optional[Dict[str, Any]] = None, + ) -> AttrDict[Any]: + """ + Use the Analyze API of elasticsearch to test the outcome of this analyzer. + + :arg text: Text to be analyzed + :arg using: connection alias to use, defaults to ``'default'`` + :arg explain: will output all token attributes for each token. You can + filter token attributes you want to output by setting ``attributes`` + option. + :arg attributes: if ``explain`` is specified, filter the token + attributes to return. + """ + es = async_connections.get_connection(using) + return AttrDict( + cast( + Dict[str, Any], + await es.indices.analyze( + body=self._get_body(text, explain, attributes) + ), + ) + ) + + +class Normalizer(AnalysisBase, DslBase): + _type_name = "normalizer" + name = "" + + +class BuiltinNormalizer(BuiltinAnalysis, Normalizer): + def get_analysis_definition(self) -> Dict[str, Any]: + return {} + + +class CustomNormalizer(CustomAnalysisDefinition, Normalizer): + _param_defs = { + "filter": {"type": "token_filter", "multi": True}, + "char_filter": {"type": "char_filter", "multi": True}, + } + + +class Tokenizer(AnalysisBase, DslBase): + _type_name = "tokenizer" + name = "" + + +class BuiltinTokenizer(BuiltinAnalysis, Tokenizer): + pass + + +class CustomTokenizer(CustomAnalysis, Tokenizer): + pass + + +class TokenFilter(AnalysisBase, DslBase): + _type_name = "token_filter" + name = "" + + +class BuiltinTokenFilter(BuiltinAnalysis, TokenFilter): + pass + + +class CustomTokenFilter(CustomAnalysis, TokenFilter): + pass + + +class MultiplexerTokenFilter(CustomTokenFilter): + name = "multiplexer" + + def get_definition(self) -> Dict[str, Any]: + d = super(CustomTokenFilter, self).get_definition() + + if "filters" in d: + d["filters"] = [ + # comma delimited string given by user + ( + fs + if isinstance(fs, str) + else + # list of strings or TokenFilter objects + ", ".join(f.to_dict() if hasattr(f, "to_dict") else f for f in fs) + ) + for fs in self.filters + ] + return d + + def get_analysis_definition(self) -> Dict[str, Any]: + if not hasattr(self, "filters"): + return {} + + fs: Dict[str, Any] = {} + d = {"filter": fs} + for filters in self.filters: + if isinstance(filters, str): + continue + fs.update( + { + f._name: f.get_definition() + for f in filters + if hasattr(f, "get_definition") + } + ) + return d + + +class ConditionalTokenFilter(CustomTokenFilter): + name = "condition" + + def get_definition(self) -> Dict[str, Any]: + d = super(CustomTokenFilter, self).get_definition() + if "filter" in d: + d["filter"] = [ + f.to_dict() if hasattr(f, "to_dict") else f for f in self.filter + ] + return d + + def get_analysis_definition(self) -> Dict[str, Any]: + if not hasattr(self, "filter"): + return {} + + return { + "filter": { + f._name: f.get_definition() + for f in self.filter + if hasattr(f, "get_definition") + } + } + + +class CharFilter(AnalysisBase, DslBase): + _type_name = "char_filter" + name = "" + + +class BuiltinCharFilter(BuiltinAnalysis, CharFilter): + pass + + +class CustomCharFilter(CustomAnalysis, CharFilter): + pass + + +# shortcuts for direct use +analyzer = Analyzer._type_shortcut +tokenizer = Tokenizer._type_shortcut +token_filter = TokenFilter._type_shortcut +char_filter = CharFilter._type_shortcut +normalizer = Normalizer._type_shortcut diff --git a/elasticsearch/dsl/async_connections.py b/elasticsearch/dsl/async_connections.py new file mode 100644 index 000000000..8a23d3828 --- /dev/null +++ b/elasticsearch/dsl/async_connections.py @@ -0,0 +1,37 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import Type + +from elasticsearch import AsyncElasticsearch + +from .connections import Connections + + +class AsyncElasticsearchConnections(Connections[AsyncElasticsearch]): + def __init__( + self, *, elasticsearch_class: Type[AsyncElasticsearch] = AsyncElasticsearch + ): + super().__init__(elasticsearch_class=elasticsearch_class) + + +connections = AsyncElasticsearchConnections(elasticsearch_class=AsyncElasticsearch) +configure = connections.configure +add_connection = connections.add_connection +remove_connection = connections.remove_connection +create_connection = connections.create_connection +get_connection = connections.get_connection diff --git a/elasticsearch/dsl/connections.py b/elasticsearch/dsl/connections.py new file mode 100644 index 000000000..8acd80c6e --- /dev/null +++ b/elasticsearch/dsl/connections.py @@ -0,0 +1,142 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import Any, Dict, Generic, Type, TypeVar, Union + +from elasticsearch import Elasticsearch, __versionstr__ + +from .serializer import serializer + +_T = TypeVar("_T") + + +class Connections(Generic[_T]): + """ + Class responsible for holding connections to different clusters. Used as a + singleton in this module. + """ + + def __init__(self, *, elasticsearch_class: Type[_T]): + self._kwargs: Dict[str, Any] = {} + self._conns: Dict[str, _T] = {} + self.elasticsearch_class: Type[_T] = elasticsearch_class + + def configure(self, **kwargs: Any) -> None: + """ + Configure multiple connections at once, useful for passing in config + dictionaries obtained from other sources, like Django's settings or a + configuration management tool. + + Example:: + + connections.configure( + default={'hosts': 'localhost'}, + dev={'hosts': ['esdev1.example.com:9200'], 'sniff_on_start': True}, + ) + + Connections will only be constructed lazily when requested through + ``get_connection``. + """ + for k in list(self._conns): + # try and preserve existing client to keep the persistent connections alive + if k in self._kwargs and kwargs.get(k, None) == self._kwargs[k]: + continue + del self._conns[k] + self._kwargs = kwargs + + def add_connection(self, alias: str, conn: _T) -> None: + """ + Add a connection object, it will be passed through as-is. + """ + self._conns[alias] = self._with_user_agent(conn) + + def remove_connection(self, alias: str) -> None: + """ + Remove connection from the registry. Raises ``KeyError`` if connection + wasn't found. + """ + errors = 0 + for d in (self._conns, self._kwargs): + try: + del d[alias] + except KeyError: + errors += 1 + + if errors == 2: + raise KeyError(f"There is no connection with alias {alias!r}.") + + def create_connection(self, alias: str = "default", **kwargs: Any) -> _T: + """ + Construct an instance of ``elasticsearch.Elasticsearch`` and register + it under given alias. + """ + kwargs.setdefault("serializer", serializer) + conn = self._conns[alias] = self.elasticsearch_class(**kwargs) + return self._with_user_agent(conn) + + def get_connection(self, alias: Union[str, _T] = "default") -> _T: + """ + Retrieve a connection, construct it if necessary (only configuration + was passed to us). If a non-string alias has been passed through we + assume it's already a client instance and will just return it as-is. + + Raises ``KeyError`` if no client (or its definition) is registered + under the alias. + """ + # do not check isinstance(Elasticsearch) so that people can wrap their + # clients + if not isinstance(alias, str): + return self._with_user_agent(alias) + + # connection already established + try: + return self._conns[alias] + except KeyError: + pass + + # if not, try to create it + try: + return self.create_connection(alias, **self._kwargs[alias]) + except KeyError: + # no connection and no kwargs to set one up + raise KeyError(f"There is no connection with alias {alias!r}.") + + def _with_user_agent(self, conn: _T) -> _T: + # try to inject our user agent + if hasattr(conn, "_headers"): + is_frozen = conn._headers.frozen + if is_frozen: + conn._headers = conn._headers.copy() + conn._headers.update( + {"user-agent": f"elasticsearch-dsl-py/{__versionstr__}"} + ) + if is_frozen: + conn._headers.freeze() + return conn + + +class ElasticsearchConnections(Connections[Elasticsearch]): + def __init__(self, *, elasticsearch_class: Type[Elasticsearch] = Elasticsearch): + super().__init__(elasticsearch_class=elasticsearch_class) + + +connections = ElasticsearchConnections() +configure = connections.configure +add_connection = connections.add_connection +remove_connection = connections.remove_connection +create_connection = connections.create_connection +get_connection = connections.get_connection diff --git a/elasticsearch/dsl/document.py b/elasticsearch/dsl/document.py new file mode 100644 index 000000000..c27c5af04 --- /dev/null +++ b/elasticsearch/dsl/document.py @@ -0,0 +1,20 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from ._async.document import AsyncDocument # noqa: F401 +from ._sync.document import Document # noqa: F401 +from .document_base import InnerDoc, MetaField # noqa: F401 diff --git a/elasticsearch/dsl/document_base.py b/elasticsearch/dsl/document_base.py new file mode 100644 index 000000000..b5e373741 --- /dev/null +++ b/elasticsearch/dsl/document_base.py @@ -0,0 +1,444 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from datetime import date, datetime +from fnmatch import fnmatch +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ClassVar, + Dict, + Generic, + List, + Optional, + Tuple, + TypeVar, + Union, + get_args, + overload, +) + +try: + from types import UnionType +except ImportError: + UnionType = None # type: ignore[assignment, misc] + +from typing_extensions import dataclass_transform + +from .exceptions import ValidationException +from .field import Binary, Boolean, Date, Field, Float, Integer, Nested, Object, Text +from .mapping import Mapping +from .utils import DOC_META_FIELDS, ObjectBase + +if TYPE_CHECKING: + from elastic_transport import ObjectApiResponse + + from .index_base import IndexBase + + +class MetaField: + def __init__(self, *args: Any, **kwargs: Any): + self.args, self.kwargs = args, kwargs + + +class InstrumentedField: + """Proxy object for a mapped document field. + + An object of this instance is returned when a field is accessed as a class + attribute of a ``Document`` or ``InnerDoc`` subclass. These objects can + be used in any situation in which a reference to a field is required, such + as when specifying sort options in a search:: + + class MyDocument(Document): + name: str + + s = MyDocument.search() + s = s.sort(-MyDocument.name) # sort by name in descending order + """ + + def __init__(self, name: str, field: Field): + self._name = name + self._field = field + + # note that the return value type here assumes classes will only be used to + # access fields (I haven't found a way to make this type dynamic based on a + # decision taken at runtime) + def __getattr__(self, attr: str) -> "InstrumentedField": + try: + # first let's see if this is an attribute of this object + return super().__getattribute__(attr) # type: ignore[no-any-return] + except AttributeError: + try: + # next we see if we have a sub-field with this name + return InstrumentedField(f"{self._name}.{attr}", self._field[attr]) + except KeyError: + # lastly we let the wrapped field resolve this attribute + return getattr(self._field, attr) # type: ignore[no-any-return] + + def __pos__(self) -> str: + """Return the field name representation for ascending sort order""" + return f"{self._name}" + + def __neg__(self) -> str: + """Return the field name representation for descending sort order""" + return f"-{self._name}" + + def __str__(self) -> str: + return self._name + + def __repr__(self) -> str: + return f"InstrumentedField[{self._name}]" + + +class DocumentMeta(type): + _doc_type: "DocumentOptions" + _index: "IndexBase" + + def __new__( + cls, name: str, bases: Tuple[type, ...], attrs: Dict[str, Any] + ) -> "DocumentMeta": + # DocumentMeta filters attrs in place + attrs["_doc_type"] = DocumentOptions(name, bases, attrs) + return super().__new__(cls, name, bases, attrs) + + def __getattr__(cls, attr: str) -> Any: + if attr in cls._doc_type.mapping: + return InstrumentedField(attr, cls._doc_type.mapping[attr]) + return super().__getattribute__(attr) + + +class DocumentOptions: + type_annotation_map = { + int: (Integer, {}), + float: (Float, {}), + bool: (Boolean, {}), + str: (Text, {}), + bytes: (Binary, {}), + datetime: (Date, {}), + date: (Date, {"format": "yyyy-MM-dd"}), + } + + def __init__(self, name: str, bases: Tuple[type, ...], attrs: Dict[str, Any]): + meta = attrs.pop("Meta", None) + + # create the mapping instance + self.mapping: Mapping = getattr(meta, "mapping", Mapping()) + + # register the document's fields, which can be given in a few formats: + # + # class MyDocument(Document): + # # required field using native typing + # # (str, int, float, bool, datetime, date) + # field1: str + # + # # optional field using native typing + # field2: Optional[datetime] + # + # # array field using native typing + # field3: list[int] + # + # # sub-object, same as Object(MyInnerDoc) + # field4: MyInnerDoc + # + # # nested sub-objects, same as Nested(MyInnerDoc) + # field5: list[MyInnerDoc] + # + # # use typing, but override with any stock or custom field + # field6: bool = MyCustomField() + # + # # best mypy and pyright support and dataclass-like behavior + # field7: M[date] + # field8: M[str] = mapped_field(MyCustomText(), default="foo") + # + # # legacy format without Python typing + # field9 = Text() + # + # # ignore attributes + # field10: ClassVar[string] = "a regular class variable" + annotations = attrs.get("__annotations__", {}) + fields = set([n for n in attrs if isinstance(attrs[n], Field)]) + fields.update(annotations.keys()) + field_defaults = {} + for name in fields: + value: Any = None + required = None + multi = None + if name in annotations: + # the field has a type annotation, so next we try to figure out + # what field type we can use + type_ = annotations[name] + skip = False + required = True + multi = False + while hasattr(type_, "__origin__"): + if type_.__origin__ == ClassVar: + skip = True + break + elif type_.__origin__ == Mapped: + # M[type] -> extract the wrapped type + type_ = type_.__args__[0] + elif type_.__origin__ == Union: + if len(type_.__args__) == 2 and type_.__args__[1] is type(None): + # Optional[type] -> mark instance as optional + required = False + type_ = type_.__args__[0] + else: + raise TypeError("Unsupported union") + elif type_.__origin__ in [list, List]: + # List[type] -> mark instance as multi + multi = True + required = False + type_ = type_.__args__[0] + else: + break + if skip or type_ == ClassVar: + # skip ClassVar attributes + continue + if type(type_) is UnionType: + # a union given with the pipe syntax + args = get_args(type_) + if len(args) == 2 and args[1] is type(None): + required = False + type_ = type_.__args__[0] + else: + raise TypeError("Unsupported union") + field = None + field_args: List[Any] = [] + field_kwargs: Dict[str, Any] = {} + if isinstance(type_, type) and issubclass(type_, InnerDoc): + # object or nested field + field = Nested if multi else Object + field_args = [type_] + elif type_ in self.type_annotation_map: + # use best field type for the type hint provided + field, field_kwargs = self.type_annotation_map[type_] # type: ignore[assignment] + + if field: + field_kwargs = { + "multi": multi, + "required": required, + **field_kwargs, + } + value = field(*field_args, **field_kwargs) + + if name in attrs: + # this field has a right-side value, which can be field + # instance on its own or wrapped with mapped_field() + attr_value = attrs[name] + if isinstance(attr_value, dict): + # the mapped_field() wrapper function was used so we need + # to look for the field instance and also record any + # dataclass-style defaults + attr_value = attrs[name].get("_field") + default_value = attrs[name].get("default") or attrs[name].get( + "default_factory" + ) + if default_value: + field_defaults[name] = default_value + if attr_value: + value = attr_value + if required is not None: + value._required = required + if multi is not None: + value._multi = multi + + if value is None: + raise TypeError(f"Cannot map field {name}") + + self.mapping.field(name, value) + if name in attrs: + del attrs[name] + + # store dataclass-style defaults for ObjectBase.__init__ to assign + attrs["_defaults"] = field_defaults + + # add all the mappings for meta fields + for name in dir(meta): + if isinstance(getattr(meta, name, None), MetaField): + params = getattr(meta, name) + self.mapping.meta(name, *params.args, **params.kwargs) + + # document inheritance - include the fields from parents' mappings + for b in bases: + if hasattr(b, "_doc_type") and hasattr(b._doc_type, "mapping"): + self.mapping.update(b._doc_type.mapping, update_only=True) + + @property + def name(self) -> str: + return self.mapping.properties.name + + +_FieldType = TypeVar("_FieldType") + + +class Mapped(Generic[_FieldType]): + """Class that represents the type of a mapped field. + + This class can be used as an optional wrapper on a field type to help type + checkers assign the correct type when the field is used as a class + attribute. + + Consider the following definitions:: + + class MyDocument(Document): + first: str + second: M[str] + + mydoc = MyDocument(first="1", second="2") + + Type checkers have no trouble inferring the type of both ``mydoc.first`` + and ``mydoc.second`` as ``str``, but while ``MyDocument.first`` will be + incorrectly typed as ``str``, ``MyDocument.second`` should be assigned the + correct ``InstrumentedField`` type. + """ + + __slots__: Dict[str, Any] = {} + + if TYPE_CHECKING: + + @overload + def __get__(self, instance: None, owner: Any) -> InstrumentedField: ... + + @overload + def __get__(self, instance: object, owner: Any) -> _FieldType: ... + + def __get__( + self, instance: Optional[object], owner: Any + ) -> Union[InstrumentedField, _FieldType]: ... + + def __set__(self, instance: Optional[object], value: _FieldType) -> None: ... + + def __delete__(self, instance: Any) -> None: ... + + +M = Mapped + + +def mapped_field( + field: Optional[Field] = None, + *, + init: bool = True, + default: Any = None, + default_factory: Optional[Callable[[], Any]] = None, + **kwargs: Any, +) -> Any: + """Construct a field using dataclass behaviors + + This function can be used in the right side of a document field definition + as a wrapper for the field instance or as a way to provide dataclass-compatible + options. + + :param field: The instance of ``Field`` to use for this field. If not provided, + an instance that is appropriate for the type given to the field is used. + :param init: a value of ``True`` adds this field to the constructor, and a + value of ``False`` omits it from it. The default is ``True``. + :param default: a default value to use for this field when one is not provided + explicitly. + :param default_factory: a callable that returns a default value for the field, + when one isn't provided explicitly. Only one of ``factory`` and + ``default_factory`` can be used. + """ + return { + "_field": field, + "init": init, + "default": default, + "default_factory": default_factory, + **kwargs, + } + + +@dataclass_transform(field_specifiers=(mapped_field,)) +class InnerDoc(ObjectBase, metaclass=DocumentMeta): + """ + Common class for inner documents like Object or Nested + """ + + @classmethod + def from_es( + cls, + data: Union[Dict[str, Any], "ObjectApiResponse[Any]"], + data_only: bool = False, + ) -> "InnerDoc": + if data_only: + data = {"_source": data} + return super().from_es(data) + + +class DocumentBase(ObjectBase): + """ + Model-like class for persisting documents in elasticsearch. + """ + + @classmethod + def _matches(cls, hit: Dict[str, Any]) -> bool: + if cls._index._name is None: + return True + return fnmatch(hit.get("_index", ""), cls._index._name) + + @classmethod + def _default_index(cls, index: Optional[str] = None) -> str: + return index or cls._index._name + + def _get_index( + self, index: Optional[str] = None, required: bool = True + ) -> Optional[str]: + if index is None: + index = getattr(self.meta, "index", None) + if index is None: + index = getattr(self._index, "_name", None) + if index is None and required: + raise ValidationException("No index") + if index and "*" in index: + raise ValidationException("You cannot write to a wildcard index.") + return index + + def __repr__(self) -> str: + return "{}({})".format( + self.__class__.__name__, + ", ".join( + f"{key}={getattr(self.meta, key)!r}" + for key in ("index", "id") + if key in self.meta + ), + ) + + def to_dict(self, include_meta: bool = False, skip_empty: bool = True) -> Dict[str, Any]: # type: ignore[override] + """ + Serialize the instance into a dictionary so that it can be saved in elasticsearch. + + :arg include_meta: if set to ``True`` will include all the metadata + (``_index``, ``_id`` etc). Otherwise just the document's + data is serialized. This is useful when passing multiple instances into + ``elasticsearch.helpers.bulk``. + :arg skip_empty: if set to ``False`` will cause empty values (``None``, + ``[]``, ``{}``) to be left on the document. Those values will be + stripped out otherwise as they make no difference in elasticsearch. + """ + d = super().to_dict(skip_empty=skip_empty) + if not include_meta: + return d + + meta = {"_" + k: self.meta[k] for k in DOC_META_FIELDS if k in self.meta} + + # in case of to_dict include the index unlike save/update/delete + index = self._get_index(required=False) + if index is not None: + meta["_index"] = index + + meta["_source"] = d + return meta diff --git a/elasticsearch/dsl/exceptions.py b/elasticsearch/dsl/exceptions.py new file mode 100644 index 000000000..8aae0ffa8 --- /dev/null +++ b/elasticsearch/dsl/exceptions.py @@ -0,0 +1,32 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +class ElasticsearchDslException(Exception): + pass + + +class UnknownDslObject(ElasticsearchDslException): + pass + + +class ValidationException(ValueError, ElasticsearchDslException): + pass + + +class IllegalOperation(ElasticsearchDslException): + pass diff --git a/elasticsearch/dsl/faceted_search.py b/elasticsearch/dsl/faceted_search.py new file mode 100644 index 000000000..96941b08c --- /dev/null +++ b/elasticsearch/dsl/faceted_search.py @@ -0,0 +1,28 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from ._async.faceted_search import AsyncFacetedSearch # noqa: F401 +from ._sync.faceted_search import FacetedSearch # noqa: F401 +from .faceted_search_base import ( # noqa: F401 + DateHistogramFacet, + Facet, + FacetedResponse, + HistogramFacet, + NestedFacet, + RangeFacet, + TermsFacet, +) diff --git a/elasticsearch/dsl/faceted_search_base.py b/elasticsearch/dsl/faceted_search_base.py new file mode 100644 index 000000000..5caa041bf --- /dev/null +++ b/elasticsearch/dsl/faceted_search_base.py @@ -0,0 +1,489 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from datetime import datetime, timedelta +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Generic, + List, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +from typing_extensions import Self + +from .aggs import A, Agg +from .query import MatchAll, Nested, Query, Range, Terms +from .response import Response +from .utils import _R, AttrDict + +if TYPE_CHECKING: + from .document_base import DocumentBase + from .response.aggs import BucketData + from .search_base import SearchBase + +FilterValueType = Union[str, datetime, Sequence[str]] + +__all__ = [ + "FacetedSearchBase", + "HistogramFacet", + "TermsFacet", + "DateHistogramFacet", + "RangeFacet", + "NestedFacet", +] + + +class Facet(Generic[_R]): + """ + A facet on faceted search. Wraps and aggregation and provides functionality + to create a filter for selected values and return a list of facet values + from the result of the aggregation. + """ + + agg_type: str = "" + + def __init__( + self, metric: Optional[Agg[_R]] = None, metric_sort: str = "desc", **kwargs: Any + ): + self.filter_values = () + self._params = kwargs + self._metric = metric + if metric and metric_sort: + self._params["order"] = {"metric": metric_sort} + + def get_aggregation(self) -> Agg[_R]: + """ + Return the aggregation object. + """ + agg: Agg[_R] = A(self.agg_type, **self._params) + if self._metric: + agg.metric("metric", self._metric) + return agg + + def add_filter(self, filter_values: List[FilterValueType]) -> Optional[Query]: + """ + Construct a filter. + """ + if not filter_values: + return None + + f = self.get_value_filter(filter_values[0]) + for v in filter_values[1:]: + f |= self.get_value_filter(v) + return f + + def get_value_filter(self, filter_value: FilterValueType) -> Query: # type: ignore[empty-body] + """ + Construct a filter for an individual value + """ + pass + + def is_filtered(self, key: str, filter_values: List[FilterValueType]) -> bool: + """ + Is a filter active on the given key. + """ + return key in filter_values + + def get_value(self, bucket: "BucketData[_R]") -> Any: + """ + return a value representing a bucket. Its key as default. + """ + return bucket["key"] + + def get_metric(self, bucket: "BucketData[_R]") -> int: + """ + Return a metric, by default doc_count for a bucket. + """ + if self._metric: + return cast(int, bucket["metric"]["value"]) + return cast(int, bucket["doc_count"]) + + def get_values( + self, data: "BucketData[_R]", filter_values: List[FilterValueType] + ) -> List[Tuple[Any, int, bool]]: + """ + Turn the raw bucket data into a list of tuples containing the key, + number of documents and a flag indicating whether this value has been + selected or not. + """ + out = [] + for bucket in data.buckets: + b = cast("BucketData[_R]", bucket) + key = self.get_value(b) + out.append((key, self.get_metric(b), self.is_filtered(key, filter_values))) + return out + + +class TermsFacet(Facet[_R]): + agg_type = "terms" + + def add_filter(self, filter_values: List[FilterValueType]) -> Optional[Query]: + """Create a terms filter instead of bool containing term filters.""" + if filter_values: + return Terms(self._params["field"], filter_values, _expand__to_dot=False) + return None + + +class RangeFacet(Facet[_R]): + agg_type = "range" + + def _range_to_dict( + self, range: Tuple[Any, Tuple[Optional[int], Optional[int]]] + ) -> Dict[str, Any]: + key, _range = range + out: Dict[str, Any] = {"key": key} + if _range[0] is not None: + out["from"] = _range[0] + if _range[1] is not None: + out["to"] = _range[1] + return out + + def __init__( + self, + ranges: Sequence[Tuple[Any, Tuple[Optional[int], Optional[int]]]], + **kwargs: Any, + ): + super().__init__(**kwargs) + self._params["ranges"] = list(map(self._range_to_dict, ranges)) + self._params["keyed"] = False + self._ranges = dict(ranges) + + def get_value_filter(self, filter_value: FilterValueType) -> Query: + f, t = self._ranges[filter_value] + limits: Dict[str, Any] = {} + if f is not None: + limits["gte"] = f + if t is not None: + limits["lt"] = t + + return Range(self._params["field"], limits, _expand__to_dot=False) + + +class HistogramFacet(Facet[_R]): + agg_type = "histogram" + + def get_value_filter(self, filter_value: FilterValueType) -> Range: + return Range( + self._params["field"], + { + "gte": filter_value, + "lt": filter_value + self._params["interval"], + }, + _expand__to_dot=False, + ) + + +def _date_interval_year(d: datetime) -> datetime: + return d.replace( + year=d.year + 1, day=(28 if d.month == 2 and d.day == 29 else d.day) + ) + + +def _date_interval_month(d: datetime) -> datetime: + return (d + timedelta(days=32)).replace(day=1) + + +def _date_interval_week(d: datetime) -> datetime: + return d + timedelta(days=7) + + +def _date_interval_day(d: datetime) -> datetime: + return d + timedelta(days=1) + + +def _date_interval_hour(d: datetime) -> datetime: + return d + timedelta(hours=1) + + +class DateHistogramFacet(Facet[_R]): + agg_type = "date_histogram" + + DATE_INTERVALS = { + "year": _date_interval_year, + "1Y": _date_interval_year, + "month": _date_interval_month, + "1M": _date_interval_month, + "week": _date_interval_week, + "1w": _date_interval_week, + "day": _date_interval_day, + "1d": _date_interval_day, + "hour": _date_interval_hour, + "1h": _date_interval_hour, + } + + def __init__(self, **kwargs: Any): + kwargs.setdefault("min_doc_count", 0) + super().__init__(**kwargs) + + def get_value(self, bucket: "BucketData[_R]") -> Any: + if not isinstance(bucket["key"], datetime): + # Elasticsearch returns key=None instead of 0 for date 1970-01-01, + # so we need to set key to 0 to avoid TypeError exception + if bucket["key"] is None: + bucket["key"] = 0 + # Preserve milliseconds in the datetime + return datetime.utcfromtimestamp(int(cast(int, bucket["key"])) / 1000.0) + else: + return bucket["key"] + + def get_value_filter(self, filter_value: Any) -> Range: + for interval_type in ("calendar_interval", "fixed_interval"): + if interval_type in self._params: + break + else: + interval_type = "interval" + + return Range( + self._params["field"], + { + "gte": filter_value, + "lt": self.DATE_INTERVALS[self._params[interval_type]](filter_value), + }, + _expand__to_dot=False, + ) + + +class NestedFacet(Facet[_R]): + agg_type = "nested" + + def __init__(self, path: str, nested_facet: Facet[_R]): + self._path = path + self._inner = nested_facet + super().__init__(path=path, aggs={"inner": nested_facet.get_aggregation()}) + + def get_values( + self, data: "BucketData[_R]", filter_values: List[FilterValueType] + ) -> List[Tuple[Any, int, bool]]: + return self._inner.get_values(data.inner, filter_values) + + def add_filter(self, filter_values: List[FilterValueType]) -> Optional[Query]: + inner_q = self._inner.add_filter(filter_values) + if inner_q: + return Nested(path=self._path, query=inner_q) + return None + + +class FacetedResponse(Response[_R]): + if TYPE_CHECKING: + _faceted_search: "FacetedSearchBase[_R]" + _facets: Dict[str, List[Tuple[Any, int, bool]]] + + @property + def query_string(self) -> Optional[Union[str, Query]]: + return self._faceted_search._query + + @property + def facets(self) -> Dict[str, List[Tuple[Any, int, bool]]]: + if not hasattr(self, "_facets"): + super(AttrDict, self).__setattr__("_facets", AttrDict({})) + for name, facet in self._faceted_search.facets.items(): + self._facets[name] = facet.get_values( + getattr(getattr(self.aggregations, "_filter_" + name), name), + self._faceted_search.filter_values.get(name, []), + ) + return self._facets + + +class FacetedSearchBase(Generic[_R]): + """ + Abstraction for creating faceted navigation searches that takes care of + composing the queries, aggregations and filters as needed as well as + presenting the results in an easy-to-consume fashion:: + + class BlogSearch(FacetedSearch): + index = 'blogs' + doc_types = [Blog, Post] + fields = ['title^5', 'category', 'description', 'body'] + + facets = { + 'type': TermsFacet(field='_type'), + 'category': TermsFacet(field='category'), + 'weekly_posts': DateHistogramFacet(field='published_from', interval='week') + } + + def search(self): + ' Override search to add your own filters ' + s = super(BlogSearch, self).search() + return s.filter('term', published=True) + + # when using: + blog_search = BlogSearch("web framework", filters={"category": "python"}) + + # supports pagination + blog_search[10:20] + + response = blog_search.execute() + + # easy access to aggregation results: + for category, hit_count, is_selected in response.facets.category: + print( + "Category %s has %d hits%s." % ( + category, + hit_count, + ' and is chosen' if is_selected else '' + ) + ) + + """ + + index: Optional[str] = None + doc_types: Optional[List[Union[str, Type["DocumentBase"]]]] = None + fields: Sequence[str] = [] + facets: Dict[str, Facet[_R]] = {} + using = "default" + + if TYPE_CHECKING: + + def search(self) -> "SearchBase[_R]": ... + + def __init__( + self, + query: Optional[Union[str, Query]] = None, + filters: Dict[str, FilterValueType] = {}, + sort: Sequence[str] = [], + ): + """ + :arg query: the text to search for + :arg filters: facet values to filter + :arg sort: sort information to be passed to :class:`~elasticsearch.dsl.Search` + """ + self._query = query + self._filters: Dict[str, Query] = {} + self._sort = sort + self.filter_values: Dict[str, List[FilterValueType]] = {} + for name, value in filters.items(): + self.add_filter(name, value) + + self._s = self.build_search() + + def __getitem__(self, k: Union[int, slice]) -> Self: + self._s = self._s[k] + return self + + def add_filter( + self, name: str, filter_values: Union[FilterValueType, List[FilterValueType]] + ) -> None: + """ + Add a filter for a facet. + """ + # normalize the value into a list + if not isinstance(filter_values, (tuple, list)): + if filter_values is None: + return + filter_values = [ + filter_values, + ] + + # remember the filter values for use in FacetedResponse + self.filter_values[name] = filter_values # type: ignore[assignment] + + # get the filter from the facet + f = self.facets[name].add_filter(filter_values) # type: ignore[arg-type] + if f is None: + return + + self._filters[name] = f + + def query( + self, search: "SearchBase[_R]", query: Union[str, Query] + ) -> "SearchBase[_R]": + """ + Add query part to ``search``. + + Override this if you wish to customize the query used. + """ + if query: + if self.fields: + return search.query("multi_match", fields=self.fields, query=query) + else: + return search.query("multi_match", query=query) + return search + + def aggregate(self, search: "SearchBase[_R]") -> None: + """ + Add aggregations representing the facets selected, including potential + filters. + """ + for f, facet in self.facets.items(): + agg = facet.get_aggregation() + agg_filter: Query = MatchAll() + for field, filter in self._filters.items(): + if f == field: + continue + agg_filter &= filter + search.aggs.bucket("_filter_" + f, "filter", filter=agg_filter).bucket( + f, agg + ) + + def filter(self, search: "SearchBase[_R]") -> "SearchBase[_R]": + """ + Add a ``post_filter`` to the search request narrowing the results based + on the facet filters. + """ + if not self._filters: + return search + + post_filter: Query = MatchAll() + for f in self._filters.values(): + post_filter &= f + return search.post_filter(post_filter) + + def highlight(self, search: "SearchBase[_R]") -> "SearchBase[_R]": + """ + Add highlighting for all the fields + """ + return search.highlight( + *(f if "^" not in f else f.split("^", 1)[0] for f in self.fields) + ) + + def sort(self, search: "SearchBase[_R]") -> "SearchBase[_R]": + """ + Add sorting information to the request. + """ + if self._sort: + search = search.sort(*self._sort) + return search + + def params(self, **kwargs: Any) -> None: + """ + Specify query params to be used when executing the search. All the + keyword arguments will override the current values. See + https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.search + for all available parameters. + """ + self._s = self._s.params(**kwargs) + + def build_search(self) -> "SearchBase[_R]": + """ + Construct the ``Search`` object. + """ + s = self.search() + if self._query is not None: + s = self.query(s, self._query) + s = self.filter(s) + if self.fields: + s = self.highlight(s) + s = self.sort(s) + self.aggregate(s) + return s diff --git a/elasticsearch/dsl/field.py b/elasticsearch/dsl/field.py new file mode 100644 index 000000000..8f9dd2ff7 --- /dev/null +++ b/elasticsearch/dsl/field.py @@ -0,0 +1,587 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import base64 +import collections.abc +import ipaddress +from copy import deepcopy +from datetime import date, datetime +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Iterable, + Iterator, + Optional, + Tuple, + Type, + Union, + cast, +) + +from dateutil import parser, tz + +from .exceptions import ValidationException +from .query import Q +from .utils import AttrDict, AttrList, DslBase +from .wrappers import Range + +if TYPE_CHECKING: + from datetime import tzinfo + from ipaddress import IPv4Address, IPv6Address + + from _operator import _SupportsComparison + + from .document import InnerDoc + from .mapping_base import MappingBase + from .query import Query + +unicode = str + + +def construct_field( + name_or_field: Union[ + str, + "Field", + Dict[str, Any], + ], + **params: Any, +) -> "Field": + # {"type": "text", "analyzer": "snowball"} + if isinstance(name_or_field, collections.abc.Mapping): + if params: + raise ValueError( + "construct_field() cannot accept parameters when passing in a dict." + ) + params = deepcopy(name_or_field) + if "type" not in params: + # inner object can be implicitly defined + if "properties" in params: + name = "object" + else: + raise ValueError('construct_field() needs to have a "type" key.') + else: + name = params.pop("type") + return Field.get_dsl_class(name)(**params) + + # Text() + if isinstance(name_or_field, Field): + if params: + raise ValueError( + "construct_field() cannot accept parameters " + "when passing in a construct_field object." + ) + return name_or_field + + # "text", analyzer="snowball" + return Field.get_dsl_class(name_or_field)(**params) + + +class Field(DslBase): + _type_name = "field" + _type_shortcut = staticmethod(construct_field) + # all fields can be multifields + _param_defs = {"fields": {"type": "field", "hash": True}} + name = "" + _coerce = False + + def __init__( + self, multi: bool = False, required: bool = False, *args: Any, **kwargs: Any + ): + """ + :arg bool multi: specifies whether field can contain array of values + :arg bool required: specifies whether field is required + """ + self._multi = multi + self._required = required + super().__init__(*args, **kwargs) + + def __getitem__(self, subfield: str) -> "Field": + return cast(Field, self._params.get("fields", {})[subfield]) + + def _serialize(self, data: Any) -> Any: + return data + + def _deserialize(self, data: Any) -> Any: + return data + + def _empty(self) -> Optional[Any]: + return None + + def empty(self) -> Optional[Any]: + if self._multi: + return AttrList([]) + return self._empty() + + def serialize(self, data: Any) -> Any: + if isinstance(data, (list, AttrList, tuple)): + return list(map(self._serialize, cast(Iterable[Any], data))) + return self._serialize(data) + + def deserialize(self, data: Any) -> Any: + if isinstance(data, (list, AttrList, tuple)): + data = [ + None if d is None else self._deserialize(d) + for d in cast(Iterable[Any], data) + ] + return data + if data is None: + return None + return self._deserialize(data) + + def clean(self, data: Any) -> Any: + if data is not None: + data = self.deserialize(data) + if data in (None, [], {}) and self._required: + raise ValidationException("Value required for this field.") + return data + + def to_dict(self) -> Dict[str, Any]: + d = super().to_dict() + name, value = cast(Tuple[str, Dict[str, Any]], d.popitem()) + value["type"] = name + return value + + +class CustomField(Field): + name = "custom" + _coerce = True + + def to_dict(self) -> Dict[str, Any]: + if isinstance(self.builtin_type, Field): + return self.builtin_type.to_dict() + + d = super().to_dict() + d["type"] = self.builtin_type + return d + + +class Object(Field): + name = "object" + _coerce = True + + def __init__( + self, + doc_class: Optional[Type["InnerDoc"]] = None, + dynamic: Optional[Union[bool, str]] = None, + properties: Optional[Dict[str, Any]] = None, + **kwargs: Any, + ): + """ + :arg document.InnerDoc doc_class: base doc class that handles mapping. + If no `doc_class` is provided, new instance of `InnerDoc` will be created, + populated with `properties` and used. Can not be provided together with `properties` + :arg dynamic: whether new properties may be created dynamically. + Valid values are `True`, `False`, `'strict'`. + Can not be provided together with `doc_class`. + See https://www.elastic.co/guide/en/elasticsearch/reference/current/dynamic.html + for more details + :arg dict properties: used to construct underlying mapping if no `doc_class` is provided. + Can not be provided together with `doc_class` + """ + if doc_class and (properties or dynamic is not None): + raise ValidationException( + "doc_class and properties/dynamic should not be provided together" + ) + if doc_class: + self._doc_class: Type["InnerDoc"] = doc_class + else: + # FIXME import + from .document import InnerDoc + + # no InnerDoc subclass, creating one instead... + self._doc_class = type("InnerDoc", (InnerDoc,), {}) + for name, field in (properties or {}).items(): + self._doc_class._doc_type.mapping.field(name, field) + if dynamic is not None: + self._doc_class._doc_type.mapping.meta("dynamic", dynamic) + + self._mapping: "MappingBase" = deepcopy(self._doc_class._doc_type.mapping) + super().__init__(**kwargs) + + def __getitem__(self, name: str) -> Field: + return self._mapping[name] + + def __contains__(self, name: str) -> bool: + return name in self._mapping + + def _empty(self) -> "InnerDoc": + return self._wrap({}) + + def _wrap(self, data: Dict[str, Any]) -> "InnerDoc": + return self._doc_class.from_es(data, data_only=True) + + def empty(self) -> Union["InnerDoc", AttrList[Any]]: + if self._multi: + return AttrList[Any]([], self._wrap) + return self._empty() + + def to_dict(self) -> Dict[str, Any]: + d = self._mapping.to_dict() + d.update(super().to_dict()) + return d + + def _collect_fields(self) -> Iterator[Field]: + return self._mapping.properties._collect_fields() + + def _deserialize(self, data: Any) -> "InnerDoc": + # don't wrap already wrapped data + if isinstance(data, self._doc_class): + return data + + if isinstance(data, AttrDict): + data = data._d_ + + return self._wrap(data) + + def _serialize( + self, data: Optional[Union[Dict[str, Any], "InnerDoc"]] + ) -> Optional[Dict[str, Any]]: + if data is None: + return None + + # somebody assigned raw dict to the field, we should tolerate that + if isinstance(data, collections.abc.Mapping): + return data + + return data.to_dict() + + def clean(self, data: Any) -> Any: + data = super().clean(data) + if data is None: + return None + if isinstance(data, (list, AttrList)): + for d in cast(Iterator["InnerDoc"], data): + d.full_clean() + else: + data.full_clean() + return data + + def update(self, other: Any, update_only: bool = False) -> None: + if not isinstance(other, Object): + # not an inner/nested object, no merge possible + return + + self._mapping.update(other._mapping, update_only) + + +class Nested(Object): + name = "nested" + + def __init__(self, *args: Any, **kwargs: Any): + kwargs.setdefault("multi", True) + super().__init__(*args, **kwargs) + + +class Date(Field): + name = "date" + _coerce = True + + def __init__( + self, + default_timezone: Optional[Union[str, "tzinfo"]] = None, + *args: Any, + **kwargs: Any, + ): + """ + :arg default_timezone: timezone that will be automatically used for tz-naive values + May be instance of `datetime.tzinfo` or string containing TZ offset + """ + if isinstance(default_timezone, str): + self._default_timezone = tz.gettz(default_timezone) + else: + self._default_timezone = default_timezone + super().__init__(*args, **kwargs) + + def _deserialize(self, data: Any) -> Union[datetime, date]: + if isinstance(data, str): + try: + data = parser.parse(data) + except Exception as e: + raise ValidationException( + f"Could not parse date from the value ({data!r})", e + ) + # we treat the yyyy-MM-dd format as a special case + if hasattr(self, "format") and self.format == "yyyy-MM-dd": + data = data.date() + + if isinstance(data, datetime): + if self._default_timezone and data.tzinfo is None: + data = data.replace(tzinfo=self._default_timezone) + return data + if isinstance(data, date): + return data + if isinstance(data, int): + # Divide by a float to preserve milliseconds on the datetime. + return datetime.utcfromtimestamp(data / 1000.0) + + raise ValidationException(f"Could not parse date from the value ({data!r})") + + +class Text(Field): + _param_defs = { + "fields": {"type": "field", "hash": True}, + "analyzer": {"type": "analyzer"}, + "search_analyzer": {"type": "analyzer"}, + "search_quote_analyzer": {"type": "analyzer"}, + } + name = "text" + + +class SearchAsYouType(Field): + _param_defs = { + "analyzer": {"type": "analyzer"}, + "search_analyzer": {"type": "analyzer"}, + "search_quote_analyzer": {"type": "analyzer"}, + } + name = "search_as_you_type" + + +class Keyword(Field): + _param_defs = { + "fields": {"type": "field", "hash": True}, + "search_analyzer": {"type": "analyzer"}, + "normalizer": {"type": "normalizer"}, + } + name = "keyword" + + +class ConstantKeyword(Keyword): + name = "constant_keyword" + + +class Boolean(Field): + name = "boolean" + _coerce = True + + def _deserialize(self, data: Any) -> bool: + if data == "false": + return False + return bool(data) + + def clean(self, data: Any) -> Optional[bool]: + if data is not None: + data = self.deserialize(data) + if data is None and self._required: + raise ValidationException("Value required for this field.") + return data # type: ignore[no-any-return] + + +class Float(Field): + name = "float" + _coerce = True + + def _deserialize(self, data: Any) -> float: + return float(data) + + +class DenseVector(Field): + name = "dense_vector" + _coerce = True + + def __init__(self, **kwargs: Any): + self._element_type = kwargs.get("element_type", "float") + if self._element_type in ["float", "byte"]: + kwargs["multi"] = True + super().__init__(**kwargs) + + def _deserialize(self, data: Any) -> Any: + if self._element_type == "float": + return float(data) + elif self._element_type == "byte": + return int(data) + return data + + +class SparseVector(Field): + name = "sparse_vector" + + +class HalfFloat(Float): + name = "half_float" + + +class ScaledFloat(Float): + name = "scaled_float" + + def __init__(self, scaling_factor: int, *args: Any, **kwargs: Any): + super().__init__(scaling_factor=scaling_factor, *args, **kwargs) + + +class Double(Float): + name = "double" + + +class RankFeature(Float): + name = "rank_feature" + + +class RankFeatures(Field): + name = "rank_features" + + +class Integer(Field): + name = "integer" + _coerce = True + + def _deserialize(self, data: Any) -> int: + return int(data) + + +class Byte(Integer): + name = "byte" + + +class Short(Integer): + name = "short" + + +class Long(Integer): + name = "long" + + +class Ip(Field): + name = "ip" + _coerce = True + + def _deserialize(self, data: Any) -> Union["IPv4Address", "IPv6Address"]: + # the ipaddress library for pypy only accepts unicode. + return ipaddress.ip_address(unicode(data)) + + def _serialize(self, data: Any) -> Optional[str]: + if data is None: + return None + return str(data) + + +class Binary(Field): + name = "binary" + _coerce = True + + def clean(self, data: str) -> str: + # Binary fields are opaque, so there's not much cleaning + # that can be done. + return data + + def _deserialize(self, data: Any) -> bytes: + return base64.b64decode(data) + + def _serialize(self, data: Any) -> Optional[str]: + if data is None: + return None + return base64.b64encode(data).decode() + + +class Point(Field): + name = "point" + + +class Shape(Field): + name = "shape" + + +class GeoPoint(Field): + name = "geo_point" + + +class GeoShape(Field): + name = "geo_shape" + + +class Completion(Field): + _param_defs = { + "analyzer": {"type": "analyzer"}, + "search_analyzer": {"type": "analyzer"}, + } + name = "completion" + + +class Percolator(Field): + name = "percolator" + _coerce = True + + def _deserialize(self, data: Any) -> "Query": + return Q(data) # type: ignore[no-any-return] + + def _serialize(self, data: Any) -> Optional[Dict[str, Any]]: + if data is None: + return None + return data.to_dict() # type: ignore[no-any-return] + + +class RangeField(Field): + _coerce = True + _core_field: Optional[Field] = None + + def _deserialize(self, data: Any) -> Range["_SupportsComparison"]: + if isinstance(data, Range): + return data + data = {k: self._core_field.deserialize(v) for k, v in data.items()} # type: ignore[union-attr] + return Range(data) + + def _serialize(self, data: Any) -> Optional[Dict[str, Any]]: + if data is None: + return None + if not isinstance(data, collections.abc.Mapping): + data = data.to_dict() + return {k: self._core_field.serialize(v) for k, v in data.items()} # type: ignore[union-attr] + + +class IntegerRange(RangeField): + name = "integer_range" + _core_field = Integer() + + +class FloatRange(RangeField): + name = "float_range" + _core_field = Float() + + +class LongRange(RangeField): + name = "long_range" + _core_field = Long() + + +class DoubleRange(RangeField): + name = "double_range" + _core_field = Double() + + +class DateRange(RangeField): + name = "date_range" + _core_field = Date() + + +class IpRange(Field): + # not a RangeField since ip_range supports CIDR ranges + name = "ip_range" + + +class Join(Field): + name = "join" + + +class TokenCount(Field): + name = "token_count" + + +class Murmur3(Field): + name = "murmur3" + + +class SemanticText(Field): + name = "semantic_text" diff --git a/elasticsearch/dsl/function.py b/elasticsearch/dsl/function.py new file mode 100644 index 000000000..9744e6f8b --- /dev/null +++ b/elasticsearch/dsl/function.py @@ -0,0 +1,180 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import collections.abc +from copy import deepcopy +from typing import ( + Any, + ClassVar, + Dict, + Literal, + MutableMapping, + Optional, + Union, + overload, +) + +from elastic_transport.client_utils import DEFAULT, DefaultType + +from .utils import AttrDict, DslBase + + +@overload +def SF(name_or_sf: MutableMapping[str, Any]) -> "ScoreFunction": ... + + +@overload +def SF(name_or_sf: "ScoreFunction") -> "ScoreFunction": ... + + +@overload +def SF(name_or_sf: str, **params: Any) -> "ScoreFunction": ... + + +def SF( + name_or_sf: Union[str, "ScoreFunction", MutableMapping[str, Any]], + **params: Any, +) -> "ScoreFunction": + # {"script_score": {"script": "_score"}, "filter": {}} + if isinstance(name_or_sf, collections.abc.MutableMapping): + if params: + raise ValueError("SF() cannot accept parameters when passing in a dict.") + + kwargs: Dict[str, Any] = {} + sf = deepcopy(name_or_sf) + for k in ScoreFunction._param_defs: + if k in name_or_sf: + kwargs[k] = sf.pop(k) + + # not sf, so just filter+weight, which used to be boost factor + sf_params = params + if not sf: + name = "boost_factor" + # {'FUNCTION': {...}} + elif len(sf) == 1: + name, sf_params = sf.popitem() + else: + raise ValueError(f"SF() got an unexpected fields in the dictionary: {sf!r}") + + # boost factor special case, see elasticsearch #6343 + if not isinstance(sf_params, collections.abc.Mapping): + sf_params = {"value": sf_params} + + # mix known params (from _param_defs) and from inside the function + kwargs.update(sf_params) + return ScoreFunction.get_dsl_class(name)(**kwargs) + + # ScriptScore(script="_score", filter=Q()) + if isinstance(name_or_sf, ScoreFunction): + if params: + raise ValueError( + "SF() cannot accept parameters when passing in a ScoreFunction object." + ) + return name_or_sf + + # "script_score", script="_score", filter=Q() + return ScoreFunction.get_dsl_class(name_or_sf)(**params) + + +class ScoreFunction(DslBase): + _type_name = "score_function" + _type_shortcut = staticmethod(SF) + _param_defs = { + "query": {"type": "query"}, + "filter": {"type": "query"}, + "weight": {}, + } + name: ClassVar[Optional[str]] = None + + def to_dict(self) -> Dict[str, Any]: + d = super().to_dict() + # filter and query dicts should be at the same level as us + for k in self._param_defs: + if self.name is not None: + val = d[self.name] + if isinstance(val, dict) and k in val: + d[k] = val.pop(k) + return d + + +class ScriptScore(ScoreFunction): + name = "script_score" + + +class BoostFactor(ScoreFunction): + name = "boost_factor" + + def to_dict(self) -> Dict[str, Any]: + d = super().to_dict() + if self.name is not None: + val = d[self.name] + if isinstance(val, dict): + if "value" in val: + d[self.name] = val.pop("value") + else: + del d[self.name] + return d + + +class RandomScore(ScoreFunction): + name = "random_score" + + +class FieldValueFactorScore(ScoreFunction): + name = "field_value_factor" + + +class FieldValueFactor(FieldValueFactorScore): # alias of the above + pass + + +class Linear(ScoreFunction): + name = "linear" + + +class Gauss(ScoreFunction): + name = "gauss" + + +class Exp(ScoreFunction): + name = "exp" + + +class DecayFunction(AttrDict[Any]): + def __init__( + self, + *, + decay: Union[float, "DefaultType"] = DEFAULT, + offset: Any = DEFAULT, + scale: Any = DEFAULT, + origin: Any = DEFAULT, + multi_value_mode: Union[ + Literal["min", "max", "avg", "sum"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if decay != DEFAULT: + kwargs["decay"] = decay + if offset != DEFAULT: + kwargs["offset"] = offset + if scale != DEFAULT: + kwargs["scale"] = scale + if origin != DEFAULT: + kwargs["origin"] = origin + if multi_value_mode != DEFAULT: + kwargs["multi_value_mode"] = multi_value_mode + super().__init__(kwargs) diff --git a/elasticsearch/dsl/index.py b/elasticsearch/dsl/index.py new file mode 100644 index 000000000..368e58d42 --- /dev/null +++ b/elasticsearch/dsl/index.py @@ -0,0 +1,23 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from ._async.index import ( # noqa: F401 + AsyncComposableIndexTemplate, + AsyncIndex, + AsyncIndexTemplate, +) +from ._sync.index import ComposableIndexTemplate, Index, IndexTemplate # noqa: F401 diff --git a/elasticsearch/dsl/index_base.py b/elasticsearch/dsl/index_base.py new file mode 100644 index 000000000..71ff50339 --- /dev/null +++ b/elasticsearch/dsl/index_base.py @@ -0,0 +1,178 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple + +from typing_extensions import Self + +from . import analysis +from .utils import AnyUsingType, merge + +if TYPE_CHECKING: + from .document_base import DocumentMeta + from .field import Field + from .mapping_base import MappingBase + + +class IndexBase: + def __init__(self, name: str, mapping_class: type, using: AnyUsingType = "default"): + """ + :arg name: name of the index + :arg using: connection alias to use, defaults to ``'default'`` + """ + self._name = name + self._doc_types: List["DocumentMeta"] = [] + self._using = using + self._settings: Dict[str, Any] = {} + self._aliases: Dict[str, Any] = {} + self._analysis: Dict[str, Any] = {} + self._mapping_class = mapping_class + self._mapping: Optional["MappingBase"] = None + + def resolve_nested( + self, field_path: str + ) -> Tuple[List[str], Optional["MappingBase"]]: + for doc in self._doc_types: + nested, field = doc._doc_type.mapping.resolve_nested(field_path) + if field is not None: + return nested, field + if self._mapping: + return self._mapping.resolve_nested(field_path) + return [], None + + def resolve_field(self, field_path: str) -> Optional["Field"]: + for doc in self._doc_types: + field = doc._doc_type.mapping.resolve_field(field_path) + if field is not None: + return field + if self._mapping: + return self._mapping.resolve_field(field_path) + return None + + def get_or_create_mapping(self) -> "MappingBase": + if self._mapping is None: + self._mapping = self._mapping_class() + return self._mapping + + def mapping(self, mapping: "MappingBase") -> None: + """ + Associate a mapping (an instance of + :class:`~elasticsearch.dsl.Mapping`) with this index. + This means that, when this index is created, it will contain the + mappings for the document type defined by those mappings. + """ + self.get_or_create_mapping().update(mapping) + + def document(self, document: "DocumentMeta") -> "DocumentMeta": + """ + Associate a :class:`~elasticsearch.dsl.Document` subclass with an index. + This means that, when this index is created, it will contain the + mappings for the ``Document``. If the ``Document`` class doesn't have a + default index yet (by defining ``class Index``), this instance will be + used. Can be used as a decorator:: + + i = Index('blog') + + @i.document + class Post(Document): + title = Text() + + # create the index, including Post mappings + i.create() + + # .search() will now return a Search object that will return + # properly deserialized Post instances + s = i.search() + """ + self._doc_types.append(document) + + # If the document index does not have any name, that means the user + # did not set any index already to the document. + # So set this index as document index + if document._index._name is None: + document._index = self + + return document + + def settings(self, **kwargs: Any) -> Self: + """ + Add settings to the index:: + + i = Index('i') + i.settings(number_of_shards=1, number_of_replicas=0) + + Multiple calls to ``settings`` will merge the keys, later overriding + the earlier. + """ + self._settings.update(kwargs) + return self + + def aliases(self, **kwargs: Any) -> Self: + """ + Add aliases to the index definition:: + + i = Index('blog-v2') + i.aliases(blog={}, published={'filter': Q('term', published=True)}) + """ + self._aliases.update(kwargs) + return self + + def analyzer(self, *args: Any, **kwargs: Any) -> None: + """ + Explicitly add an analyzer to an index. Note that all custom analyzers + defined in mappings will also be created. This is useful for search analyzers. + + Example:: + + from elasticsearch.dsl import analyzer, tokenizer + + my_analyzer = analyzer('my_analyzer', + tokenizer=tokenizer('trigram', 'nGram', min_gram=3, max_gram=3), + filter=['lowercase'] + ) + + i = Index('blog') + i.analyzer(my_analyzer) + + """ + analyzer = analysis.analyzer(*args, **kwargs) + d = analyzer.get_analysis_definition() + # empty custom analyzer, probably already defined out of our control + if not d: + return + + # merge the definition + merge(self._analysis, d, True) + + def to_dict(self) -> Dict[str, Any]: + out = {} + if self._settings: + out["settings"] = self._settings + if self._aliases: + out["aliases"] = self._aliases + mappings = self._mapping.to_dict() if self._mapping else {} + analysis = self._mapping._collect_analysis() if self._mapping else {} + for d in self._doc_types: + mapping = d._doc_type.mapping + merge(mappings, mapping.to_dict(), True) + merge(analysis, mapping._collect_analysis(), True) + if mappings: + out["mappings"] = mappings + if analysis or self._analysis: + merge(analysis, self._analysis) + out.setdefault("settings", {})["analysis"] = analysis + return out diff --git a/elasticsearch/dsl/mapping.py b/elasticsearch/dsl/mapping.py new file mode 100644 index 000000000..e39dd0490 --- /dev/null +++ b/elasticsearch/dsl/mapping.py @@ -0,0 +1,19 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from ._async.mapping import AsyncMapping # noqa: F401 +from ._sync.mapping import Mapping # noqa: F401 diff --git a/elasticsearch/dsl/mapping_base.py b/elasticsearch/dsl/mapping_base.py new file mode 100644 index 000000000..cb8110fd1 --- /dev/null +++ b/elasticsearch/dsl/mapping_base.py @@ -0,0 +1,219 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import collections.abc +from itertools import chain +from typing import Any, Dict, Iterator, List, Optional, Tuple, cast + +from typing_extensions import Self + +from .field import Field, Nested, Text, construct_field +from .utils import DslBase + +META_FIELDS = frozenset( + ( + "dynamic", + "transform", + "dynamic_date_formats", + "date_detection", + "numeric_detection", + "dynamic_templates", + "enabled", + ) +) + + +class Properties(DslBase): + name = "properties" + _param_defs = {"properties": {"type": "field", "hash": True}} + + properties: Dict[str, Field] + + def __init__(self) -> None: + super().__init__() + + def __repr__(self) -> str: + return "Properties()" + + def __getitem__(self, name: str) -> Field: + return self.properties[name] + + def __contains__(self, name: str) -> bool: + return name in self.properties + + def to_dict(self) -> Dict[str, Any]: + return cast(Dict[str, Field], super().to_dict()["properties"]) + + def field(self, name: str, *args: Any, **kwargs: Any) -> Self: + self.properties[name] = construct_field(*args, **kwargs) + return self + + def _collect_fields(self) -> Iterator[Field]: + """Iterate over all Field objects within, including multi fields.""" + fields = cast(Dict[str, Field], self.properties.to_dict()) # type: ignore[attr-defined] + for f in fields.values(): + yield f + # multi fields + if hasattr(f, "fields"): + yield from f.fields.to_dict().values() + # nested and inner objects + if hasattr(f, "_collect_fields"): + yield from f._collect_fields() + + def update(self, other_object: Any) -> None: + if not hasattr(other_object, "properties"): + # not an inner/nested object, no merge possible + return + + our, other = self.properties, other_object.properties + for name in other: + if name in our: + if hasattr(our[name], "update"): + our[name].update(other[name]) + continue + our[name] = other[name] + + +class MappingBase: + def __init__(self) -> None: + self.properties = Properties() + self._meta: Dict[str, Any] = {} + + def __repr__(self) -> str: + return "Mapping()" + + def _clone(self) -> Self: + m = self.__class__() + m.properties._params = self.properties._params.copy() + return m + + def resolve_nested( + self, field_path: str + ) -> Tuple[List[str], Optional["MappingBase"]]: + field = self + nested = [] + parts = field_path.split(".") + for i, step in enumerate(parts): + try: + field = field[step] # type: ignore[assignment] + except KeyError: + return [], None + if isinstance(field, Nested): + nested.append(".".join(parts[: i + 1])) + return nested, field + + def resolve_field(self, field_path: str) -> Optional[Field]: + field = self + for step in field_path.split("."): + try: + field = field[step] # type: ignore[assignment] + except KeyError: + return None + return cast(Field, field) + + def _collect_analysis(self) -> Dict[str, Any]: + analysis: Dict[str, Any] = {} + fields = [] + if "_all" in self._meta: + fields.append(Text(**self._meta["_all"])) + + for f in chain(fields, self.properties._collect_fields()): + for analyzer_name in ( + "analyzer", + "normalizer", + "search_analyzer", + "search_quote_analyzer", + ): + if not hasattr(f, analyzer_name): + continue + analyzer = getattr(f, analyzer_name) + d = analyzer.get_analysis_definition() + # empty custom analyzer, probably already defined out of our control + if not d: + continue + + # merge the definition + # TODO: conflict detection/resolution + for key in d: + analysis.setdefault(key, {}).update(d[key]) + + return analysis + + def _update_from_dict(self, raw: Dict[str, Any]) -> None: + for name, definition in raw.get("properties", {}).items(): + self.field(name, definition) + + # metadata like _all etc + for name, value in raw.items(): + if name != "properties": + if isinstance(value, collections.abc.Mapping): + self.meta(name, **value) + else: + self.meta(name, value) + + def update(self, mapping: "MappingBase", update_only: bool = False) -> None: + for name in mapping: + if update_only and name in self: + # nested and inner objects, merge recursively + if hasattr(self[name], "update"): + # FIXME only merge subfields, not the settings + self[name].update(mapping[name], update_only) + continue + self.field(name, mapping[name]) + + if update_only: + for name in mapping._meta: + if name not in self._meta: + self._meta[name] = mapping._meta[name] + else: + self._meta.update(mapping._meta) + + def __contains__(self, name: str) -> bool: + return name in self.properties.properties + + def __getitem__(self, name: str) -> Field: + return self.properties.properties[name] + + def __iter__(self) -> Iterator[str]: + return iter(self.properties.properties) + + def field(self, *args: Any, **kwargs: Any) -> Self: + self.properties.field(*args, **kwargs) + return self + + def meta(self, name: str, params: Any = None, **kwargs: Any) -> Self: + if not name.startswith("_") and name not in META_FIELDS: + name = "_" + name + + if params and kwargs: + raise ValueError("Meta configs cannot have both value and a dictionary.") + + self._meta[name] = kwargs if params is None else params + return self + + def to_dict(self) -> Dict[str, Any]: + meta = self._meta + + # hard coded serialization of analyzers in _all + if "_all" in meta: + meta = meta.copy() + _all = meta["_all"] = meta["_all"].copy() + for f in ("analyzer", "search_analyzer", "search_quote_analyzer"): + if hasattr(_all.get(f, None), "to_dict"): + _all[f] = _all[f].to_dict() + meta.update(self.properties.to_dict()) + return meta diff --git a/elasticsearch/dsl/query.py b/elasticsearch/dsl/query.py new file mode 100644 index 000000000..b5808959c --- /dev/null +++ b/elasticsearch/dsl/query.py @@ -0,0 +1,2794 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import collections.abc +from copy import deepcopy +from itertools import chain +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ClassVar, + Dict, + List, + Literal, + Mapping, + MutableMapping, + Optional, + Protocol, + Sequence, + TypeVar, + Union, + cast, + overload, +) + +from elastic_transport.client_utils import DEFAULT + +# 'SF' looks unused but the test suite assumes it's available +# from this module so others are liable to do so as well. +from .function import SF # noqa: F401 +from .function import ScoreFunction +from .utils import DslBase + +if TYPE_CHECKING: + from elastic_transport.client_utils import DefaultType + + from . import types, wrappers + from .document_base import InstrumentedField + +_T = TypeVar("_T") +_M = TypeVar("_M", bound=Mapping[str, Any]) + + +class QProxiedProtocol(Protocol[_T]): + _proxied: _T + + +@overload +def Q(name_or_query: MutableMapping[str, _M]) -> "Query": ... + + +@overload +def Q(name_or_query: "Query") -> "Query": ... + + +@overload +def Q(name_or_query: QProxiedProtocol[_T]) -> _T: ... + + +@overload +def Q(name_or_query: str = "match_all", **params: Any) -> "Query": ... + + +def Q( + name_or_query: Union[ + str, + "Query", + QProxiedProtocol[_T], + MutableMapping[str, _M], + ] = "match_all", + **params: Any, +) -> Union["Query", _T]: + # {"match": {"title": "python"}} + if isinstance(name_or_query, collections.abc.MutableMapping): + if params: + raise ValueError("Q() cannot accept parameters when passing in a dict.") + if len(name_or_query) != 1: + raise ValueError( + 'Q() can only accept dict with a single query ({"match": {...}}). ' + "Instead it got (%r)" % name_or_query + ) + name, q_params = deepcopy(name_or_query).popitem() + return Query.get_dsl_class(name)(_expand__to_dot=False, **q_params) + + # MatchAll() + if isinstance(name_or_query, Query): + if params: + raise ValueError( + "Q() cannot accept parameters when passing in a Query object." + ) + return name_or_query + + # s.query = Q('filtered', query=s.query) + if hasattr(name_or_query, "_proxied"): + return cast(QProxiedProtocol[_T], name_or_query)._proxied + + # "match", title="python" + return Query.get_dsl_class(name_or_query)(**params) + + +class Query(DslBase): + _type_name = "query" + _type_shortcut = staticmethod(Q) + name: ClassVar[Optional[str]] = None + + # Add type annotations for methods not defined in every subclass + __ror__: ClassVar[Callable[["Query", "Query"], "Query"]] + __radd__: ClassVar[Callable[["Query", "Query"], "Query"]] + __rand__: ClassVar[Callable[["Query", "Query"], "Query"]] + + def __add__(self, other: "Query") -> "Query": + # make sure we give queries that know how to combine themselves + # preference + if hasattr(other, "__radd__"): + return other.__radd__(self) + return Bool(must=[self, other]) + + def __invert__(self) -> "Query": + return Bool(must_not=[self]) + + def __or__(self, other: "Query") -> "Query": + # make sure we give queries that know how to combine themselves + # preference + if hasattr(other, "__ror__"): + return other.__ror__(self) + return Bool(should=[self, other]) + + def __and__(self, other: "Query") -> "Query": + # make sure we give queries that know how to combine themselves + # preference + if hasattr(other, "__rand__"): + return other.__rand__(self) + return Bool(must=[self, other]) + + +class Bool(Query): + """ + matches documents matching boolean combinations of other queries. + + :arg filter: The clause (query) must appear in matching documents. + However, unlike `must`, the score of the query will be ignored. + :arg minimum_should_match: Specifies the number or percentage of + `should` clauses returned documents must match. + :arg must: The clause (query) must appear in matching documents and + will contribute to the score. + :arg must_not: The clause (query) must not appear in the matching + documents. Because scoring is ignored, a score of `0` is returned + for all documents. + :arg should: The clause (query) should appear in the matching + document. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "bool" + _param_defs = { + "filter": {"type": "query", "multi": True}, + "must": {"type": "query", "multi": True}, + "must_not": {"type": "query", "multi": True}, + "should": {"type": "query", "multi": True}, + } + + def __init__( + self, + *, + filter: Union[Query, Sequence[Query], "DefaultType"] = DEFAULT, + minimum_should_match: Union[int, str, "DefaultType"] = DEFAULT, + must: Union[Query, Sequence[Query], "DefaultType"] = DEFAULT, + must_not: Union[Query, Sequence[Query], "DefaultType"] = DEFAULT, + should: Union[Query, Sequence[Query], "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + filter=filter, + minimum_should_match=minimum_should_match, + must=must, + must_not=must_not, + should=should, + boost=boost, + _name=_name, + **kwargs, + ) + + def __add__(self, other: Query) -> "Bool": + q = self._clone() + if isinstance(other, Bool): + q.must += other.must + q.should += other.should + q.must_not += other.must_not + q.filter += other.filter + else: + q.must.append(other) + return q + + __radd__ = __add__ + + def __or__(self, other: Query) -> Query: + for q in (self, other): + if isinstance(q, Bool) and not any( + (q.must, q.must_not, q.filter, getattr(q, "minimum_should_match", None)) + ): + other = self if q is other else other + q = q._clone() + if isinstance(other, Bool) and not any( + ( + other.must, + other.must_not, + other.filter, + getattr(other, "minimum_should_match", None), + ) + ): + q.should.extend(other.should) + else: + q.should.append(other) + return q + + return Bool(should=[self, other]) + + __ror__ = __or__ + + @property + def _min_should_match(self) -> int: + return getattr( + self, + "minimum_should_match", + 0 if not self.should or (self.must or self.filter) else 1, + ) + + def __invert__(self) -> Query: + # Because an empty Bool query is treated like + # MatchAll the inverse should be MatchNone + if not any(chain(self.must, self.filter, self.should, self.must_not)): + return MatchNone() + + negations: List[Query] = [] + for q in chain(self.must, self.filter): + negations.append(~q) + + for q in self.must_not: + negations.append(q) + + if self.should and self._min_should_match: + negations.append(Bool(must_not=self.should[:])) + + if len(negations) == 1: + return negations[0] + return Bool(should=negations) + + def __and__(self, other: Query) -> Query: + q = self._clone() + if isinstance(other, Bool): + q.must += other.must + q.must_not += other.must_not + q.filter += other.filter + q.should = [] + + # reset minimum_should_match as it will get calculated below + if "minimum_should_match" in q._params: + del q._params["minimum_should_match"] + + for qx in (self, other): + min_should_match = qx._min_should_match + # TODO: percentages or negative numbers will fail here + # for now we report an error + if not isinstance(min_should_match, int) or min_should_match < 0: + raise ValueError( + "Can only combine queries with positive integer values for minimum_should_match" + ) + # all subqueries are required + if len(qx.should) <= min_should_match: + q.must.extend(qx.should) + # not all of them are required, use it and remember min_should_match + elif not q.should: + q.minimum_should_match = min_should_match + q.should = qx.should + # all queries are optional, just extend should + elif q._min_should_match == 0 and min_should_match == 0: + q.should.extend(qx.should) + # not all are required, add a should list to the must with proper min_should_match + else: + q.must.append( + Bool(should=qx.should, minimum_should_match=min_should_match) + ) + else: + if not (q.must or q.filter) and q.should: + q._params.setdefault("minimum_should_match", 1) + q.must.append(other) + return q + + __rand__ = __and__ + + +class Boosting(Query): + """ + Returns documents matching a `positive` query while reducing the + relevance score of documents that also match a `negative` query. + + :arg negative_boost: (required) Floating point number between 0 and + 1.0 used to decrease the relevance scores of documents matching + the `negative` query. + :arg negative: (required) Query used to decrease the relevance score + of matching documents. + :arg positive: (required) Any returned documents must match this + query. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "boosting" + _param_defs = { + "negative": {"type": "query"}, + "positive": {"type": "query"}, + } + + def __init__( + self, + *, + negative_boost: Union[float, "DefaultType"] = DEFAULT, + negative: Union[Query, "DefaultType"] = DEFAULT, + positive: Union[Query, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + negative_boost=negative_boost, + negative=negative, + positive=positive, + boost=boost, + _name=_name, + **kwargs, + ) + + +class Common(Query): + """ + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + + name = "common" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union[ + "types.CommonTermsQuery", Dict[str, Any], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) + + +class CombinedFields(Query): + """ + The `combined_fields` query supports searching multiple text fields as + if their contents had been indexed into one combined field. + + :arg fields: (required) List of fields to search. Field wildcard + patterns are allowed. Only `text` fields are supported, and they + must all have the same search `analyzer`. + :arg query: (required) Text to search for in the provided `fields`. + The `combined_fields` query analyzes the provided text before + performing a search. + :arg auto_generate_synonyms_phrase_query: If true, match phrase + queries are automatically created for multi-term synonyms. + Defaults to `True` if omitted. + :arg operator: Boolean logic used to interpret text in the query + value. Defaults to `or` if omitted. + :arg minimum_should_match: Minimum number of clauses that must match + for a document to be returned. + :arg zero_terms_query: Indicates whether no documents are returned if + the analyzer removes all tokens, such as when using a `stop` + filter. Defaults to `none` if omitted. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "combined_fields" + + def __init__( + self, + *, + fields: Union[ + Sequence[Union[str, "InstrumentedField"]], "DefaultType" + ] = DEFAULT, + query: Union[str, "DefaultType"] = DEFAULT, + auto_generate_synonyms_phrase_query: Union[bool, "DefaultType"] = DEFAULT, + operator: Union[Literal["or", "and"], "DefaultType"] = DEFAULT, + minimum_should_match: Union[int, str, "DefaultType"] = DEFAULT, + zero_terms_query: Union[Literal["none", "all"], "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + fields=fields, + query=query, + auto_generate_synonyms_phrase_query=auto_generate_synonyms_phrase_query, + operator=operator, + minimum_should_match=minimum_should_match, + zero_terms_query=zero_terms_query, + boost=boost, + _name=_name, + **kwargs, + ) + + +class ConstantScore(Query): + """ + Wraps a filter query and returns every matching document with a + relevance score equal to the `boost` parameter value. + + :arg filter: (required) Filter query you wish to run. Any returned + documents must match this query. Filter queries do not calculate + relevance scores. To speed up performance, Elasticsearch + automatically caches frequently used filter queries. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "constant_score" + _param_defs = { + "filter": {"type": "query"}, + } + + def __init__( + self, + *, + filter: Union[Query, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(filter=filter, boost=boost, _name=_name, **kwargs) + + +class DisMax(Query): + """ + Returns documents matching one or more wrapped queries, called query + clauses or clauses. If a returned document matches multiple query + clauses, the `dis_max` query assigns the document the highest + relevance score from any matching clause, plus a tie breaking + increment for any additional matching subqueries. + + :arg queries: (required) One or more query clauses. Returned documents + must match one or more of these queries. If a document matches + multiple queries, Elasticsearch uses the highest relevance score. + :arg tie_breaker: Floating point number between 0 and 1.0 used to + increase the relevance scores of documents matching multiple query + clauses. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "dis_max" + _param_defs = { + "queries": {"type": "query", "multi": True}, + } + + def __init__( + self, + *, + queries: Union[Sequence[Query], "DefaultType"] = DEFAULT, + tie_breaker: Union[float, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + queries=queries, tie_breaker=tie_breaker, boost=boost, _name=_name, **kwargs + ) + + +class DistanceFeature(Query): + """ + Boosts the relevance score of documents closer to a provided origin + date or point. For example, you can use this query to give more weight + to documents closer to a certain date or location. + + :arg origin: (required) Date or point of origin used to calculate + distances. If the `field` value is a `date` or `date_nanos` field, + the `origin` value must be a date. Date Math, such as `now-1h`, is + supported. If the field value is a `geo_point` field, the `origin` + value must be a geopoint. + :arg pivot: (required) Distance from the `origin` at which relevance + scores receive half of the `boost` value. If the `field` value is + a `date` or `date_nanos` field, the `pivot` value must be a time + unit, such as `1h` or `10d`. If the `field` value is a `geo_point` + field, the `pivot` value must be a distance unit, such as `1km` or + `12m`. + :arg field: (required) Name of the field used to calculate distances. + This field must meet the following criteria: be a `date`, + `date_nanos` or `geo_point` field; have an `index` mapping + parameter value of `true`, which is the default; have an + `doc_values` mapping parameter value of `true`, which is the + default. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "distance_feature" + + def __init__( + self, + *, + origin: Any = DEFAULT, + pivot: Any = DEFAULT, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + origin=origin, pivot=pivot, field=field, boost=boost, _name=_name, **kwargs + ) + + +class Exists(Query): + """ + Returns documents that contain an indexed value for a field. + + :arg field: (required) Name of the field you wish to search. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "exists" + + def __init__( + self, + *, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(field=field, boost=boost, _name=_name, **kwargs) + + +class FunctionScore(Query): + """ + The `function_score` enables you to modify the score of documents that + are retrieved by a query. + + :arg boost_mode: Defines how he newly computed score is combined with + the score of the query Defaults to `multiply` if omitted. + :arg functions: One or more functions that compute a new score for + each document returned by the query. + :arg max_boost: Restricts the new score to not exceed the provided + limit. + :arg min_score: Excludes documents that do not meet the provided score + threshold. + :arg query: A query that determines the documents for which a new + score is computed. + :arg score_mode: Specifies how the computed scores are combined + Defaults to `multiply` if omitted. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "function_score" + _param_defs = { + "functions": {"type": "score_function", "multi": True}, + "query": {"type": "query"}, + "filter": {"type": "query"}, + } + + def __init__( + self, + *, + boost_mode: Union[ + Literal["multiply", "replace", "sum", "avg", "max", "min"], "DefaultType" + ] = DEFAULT, + functions: Union[Sequence[ScoreFunction], "DefaultType"] = DEFAULT, + max_boost: Union[float, "DefaultType"] = DEFAULT, + min_score: Union[float, "DefaultType"] = DEFAULT, + query: Union[Query, "DefaultType"] = DEFAULT, + score_mode: Union[ + Literal["multiply", "sum", "avg", "first", "max", "min"], "DefaultType" + ] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if functions is DEFAULT: + functions = [] + for name in ScoreFunction._classes: + if name in kwargs: + functions.append({name: kwargs.pop(name)}) # type: ignore[arg-type] + super().__init__( + boost_mode=boost_mode, + functions=functions, + max_boost=max_boost, + min_score=min_score, + query=query, + score_mode=score_mode, + boost=boost, + _name=_name, + **kwargs, + ) + + +class Fuzzy(Query): + """ + Returns documents that contain terms similar to the search term, as + measured by a Levenshtein edit distance. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + + name = "fuzzy" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union["types.FuzzyQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) + + +class GeoBoundingBox(Query): + """ + Matches geo_point and geo_shape values that intersect a bounding box. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + :arg type: + :arg validation_method: Set to `IGNORE_MALFORMED` to accept geo points + with invalid latitude or longitude. Set to `COERCE` to also try to + infer correct latitude or longitude. Defaults to `'strict'` if + omitted. + :arg ignore_unmapped: Set to `true` to ignore an unmapped field and + not match any documents for this query. Set to `false` to throw an + exception if the field is not mapped. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "geo_bounding_box" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union[ + "types.CoordsGeoBounds", + "types.TopLeftBottomRightGeoBounds", + "types.TopRightBottomLeftGeoBounds", + "types.WktGeoBounds", + Dict[str, Any], + "DefaultType", + ] = DEFAULT, + *, + type: Union[Literal["memory", "indexed"], "DefaultType"] = DEFAULT, + validation_method: Union[ + Literal["coerce", "ignore_malformed", "strict"], "DefaultType" + ] = DEFAULT, + ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__( + type=type, + validation_method=validation_method, + ignore_unmapped=ignore_unmapped, + boost=boost, + _name=_name, + **kwargs, + ) + + +class GeoDistance(Query): + """ + Matches `geo_point` and `geo_shape` values within a given distance of + a geopoint. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + :arg distance: (required) The radius of the circle centred on the + specified location. Points which fall into this circle are + considered to be matches. + :arg distance_type: How to compute the distance. Set to `plane` for a + faster calculation that's inaccurate on long distances and close + to the poles. Defaults to `'arc'` if omitted. + :arg validation_method: Set to `IGNORE_MALFORMED` to accept geo points + with invalid latitude or longitude. Set to `COERCE` to also try to + infer correct latitude or longitude. Defaults to `'strict'` if + omitted. + :arg ignore_unmapped: Set to `true` to ignore an unmapped field and + not match any documents for this query. Set to `false` to throw an + exception if the field is not mapped. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "geo_distance" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union[ + "types.LatLonGeoLocation", + "types.GeoHashLocation", + Sequence[float], + str, + Dict[str, Any], + "DefaultType", + ] = DEFAULT, + *, + distance: Union[str, "DefaultType"] = DEFAULT, + distance_type: Union[Literal["arc", "plane"], "DefaultType"] = DEFAULT, + validation_method: Union[ + Literal["coerce", "ignore_malformed", "strict"], "DefaultType" + ] = DEFAULT, + ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__( + distance=distance, + distance_type=distance_type, + validation_method=validation_method, + ignore_unmapped=ignore_unmapped, + boost=boost, + _name=_name, + **kwargs, + ) + + +class GeoPolygon(Query): + """ + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + :arg validation_method: Defaults to `'strict'` if omitted. + :arg ignore_unmapped: + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "geo_polygon" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union[ + "types.GeoPolygonPoints", Dict[str, Any], "DefaultType" + ] = DEFAULT, + *, + validation_method: Union[ + Literal["coerce", "ignore_malformed", "strict"], "DefaultType" + ] = DEFAULT, + ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__( + validation_method=validation_method, + ignore_unmapped=ignore_unmapped, + boost=boost, + _name=_name, + **kwargs, + ) + + +class GeoShape(Query): + """ + Filter documents indexed using either the `geo_shape` or the + `geo_point` type. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + :arg ignore_unmapped: Set to `true` to ignore an unmapped field and + not match any documents for this query. Set to `false` to throw an + exception if the field is not mapped. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "geo_shape" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union[ + "types.GeoShapeFieldQuery", Dict[str, Any], "DefaultType" + ] = DEFAULT, + *, + ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__( + ignore_unmapped=ignore_unmapped, boost=boost, _name=_name, **kwargs + ) + + +class HasChild(Query): + """ + Returns parent documents whose joined child documents match a provided + query. + + :arg query: (required) Query you wish to run on child documents of the + `type` field. If a child document matches the search, the query + returns the parent document. + :arg type: (required) Name of the child relationship mapped for the + `join` field. + :arg ignore_unmapped: Indicates whether to ignore an unmapped `type` + and not return any documents instead of an error. + :arg inner_hits: If defined, each search hit will contain inner hits. + :arg max_children: Maximum number of child documents that match the + query allowed for a returned parent document. If the parent + document exceeds this limit, it is excluded from the search + results. + :arg min_children: Minimum number of child documents that match the + query required to match the query for a returned parent document. + If the parent document does not meet this limit, it is excluded + from the search results. + :arg score_mode: Indicates how scores for matching child documents + affect the root parent document’s relevance score. Defaults to + `'none'` if omitted. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "has_child" + _param_defs = { + "query": {"type": "query"}, + } + + def __init__( + self, + *, + query: Union[Query, "DefaultType"] = DEFAULT, + type: Union[str, "DefaultType"] = DEFAULT, + ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, + inner_hits: Union["types.InnerHits", Dict[str, Any], "DefaultType"] = DEFAULT, + max_children: Union[int, "DefaultType"] = DEFAULT, + min_children: Union[int, "DefaultType"] = DEFAULT, + score_mode: Union[ + Literal["none", "avg", "sum", "max", "min"], "DefaultType" + ] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + query=query, + type=type, + ignore_unmapped=ignore_unmapped, + inner_hits=inner_hits, + max_children=max_children, + min_children=min_children, + score_mode=score_mode, + boost=boost, + _name=_name, + **kwargs, + ) + + +class HasParent(Query): + """ + Returns child documents whose joined parent document matches a + provided query. + + :arg parent_type: (required) Name of the parent relationship mapped + for the `join` field. + :arg query: (required) Query you wish to run on parent documents of + the `parent_type` field. If a parent document matches the search, + the query returns its child documents. + :arg ignore_unmapped: Indicates whether to ignore an unmapped + `parent_type` and not return any documents instead of an error. + You can use this parameter to query multiple indices that may not + contain the `parent_type`. + :arg inner_hits: If defined, each search hit will contain inner hits. + :arg score: Indicates whether the relevance score of a matching parent + document is aggregated into its child documents. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "has_parent" + _param_defs = { + "query": {"type": "query"}, + } + + def __init__( + self, + *, + parent_type: Union[str, "DefaultType"] = DEFAULT, + query: Union[Query, "DefaultType"] = DEFAULT, + ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, + inner_hits: Union["types.InnerHits", Dict[str, Any], "DefaultType"] = DEFAULT, + score: Union[bool, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + parent_type=parent_type, + query=query, + ignore_unmapped=ignore_unmapped, + inner_hits=inner_hits, + score=score, + boost=boost, + _name=_name, + **kwargs, + ) + + +class Ids(Query): + """ + Returns documents based on their IDs. This query uses document IDs + stored in the `_id` field. + + :arg values: An array of document IDs. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "ids" + + def __init__( + self, + *, + values: Union[str, Sequence[str], "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(values=values, boost=boost, _name=_name, **kwargs) + + +class Intervals(Query): + """ + Returns documents based on the order and proximity of matching terms. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + + name = "intervals" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union["types.IntervalsQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) + + +class Knn(Query): + """ + Finds the k nearest vectors to a query vector, as measured by a + similarity metric. knn query finds nearest vectors through approximate + search on indexed dense_vectors. + + :arg field: (required) The name of the vector field to search against + :arg query_vector: The query vector + :arg query_vector_builder: The query vector builder. You must provide + a query_vector_builder or query_vector, but not both. + :arg num_candidates: The number of nearest neighbor candidates to + consider per shard + :arg k: The final number of nearest neighbors to return as top hits + :arg filter: Filters for the kNN search query + :arg similarity: The minimum similarity for a vector to be considered + a match + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "knn" + _param_defs = { + "filter": {"type": "query", "multi": True}, + } + + def __init__( + self, + *, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + query_vector: Union[Sequence[float], "DefaultType"] = DEFAULT, + query_vector_builder: Union[ + "types.QueryVectorBuilder", Dict[str, Any], "DefaultType" + ] = DEFAULT, + num_candidates: Union[int, "DefaultType"] = DEFAULT, + k: Union[int, "DefaultType"] = DEFAULT, + filter: Union[Query, Sequence[Query], "DefaultType"] = DEFAULT, + similarity: Union[float, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + field=field, + query_vector=query_vector, + query_vector_builder=query_vector_builder, + num_candidates=num_candidates, + k=k, + filter=filter, + similarity=similarity, + boost=boost, + _name=_name, + **kwargs, + ) + + +class Match(Query): + """ + Returns documents that match a provided text, number, date or boolean + value. The provided text is analyzed before matching. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + + name = "match" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union["types.MatchQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) + + +class MatchAll(Query): + """ + Matches all documents, giving them all a `_score` of 1.0. + + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "match_all" + + def __init__( + self, + *, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(boost=boost, _name=_name, **kwargs) + + def __add__(self, other: "Query") -> "Query": + return other._clone() + + __and__ = __rand__ = __radd__ = __add__ + + def __or__(self, other: "Query") -> "MatchAll": + return self + + __ror__ = __or__ + + def __invert__(self) -> "MatchNone": + return MatchNone() + + +EMPTY_QUERY = MatchAll() + + +class MatchBoolPrefix(Query): + """ + Analyzes its input and constructs a `bool` query from the terms. Each + term except the last is used in a `term` query. The last term is used + in a prefix query. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + + name = "match_bool_prefix" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union[ + "types.MatchBoolPrefixQuery", Dict[str, Any], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) + + +class MatchNone(Query): + """ + Matches no documents. + + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "match_none" + + def __init__( + self, + *, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(boost=boost, _name=_name, **kwargs) + + def __add__(self, other: "Query") -> "MatchNone": + return self + + __and__ = __rand__ = __radd__ = __add__ + + def __or__(self, other: "Query") -> "Query": + return other._clone() + + __ror__ = __or__ + + def __invert__(self) -> MatchAll: + return MatchAll() + + +class MatchPhrase(Query): + """ + Analyzes the text and creates a phrase query out of the analyzed text. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + + name = "match_phrase" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union[ + "types.MatchPhraseQuery", Dict[str, Any], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) + + +class MatchPhrasePrefix(Query): + """ + Returns documents that contain the words of a provided text, in the + same order as provided. The last term of the provided text is treated + as a prefix, matching any words that begin with that term. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + + name = "match_phrase_prefix" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union[ + "types.MatchPhrasePrefixQuery", Dict[str, Any], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) + + +class MoreLikeThis(Query): + """ + Returns documents that are "like" a given set of documents. + + :arg like: (required) Specifies free form text and/or a single or + multiple documents for which you want to find similar documents. + :arg analyzer: The analyzer that is used to analyze the free form + text. Defaults to the analyzer associated with the first field in + fields. + :arg boost_terms: Each term in the formed query could be further + boosted by their tf-idf score. This sets the boost factor to use + when using this feature. Defaults to deactivated (0). + :arg fail_on_unsupported_field: Controls whether the query should fail + (throw an exception) if any of the specified fields are not of the + supported types (`text` or `keyword`). Defaults to `True` if + omitted. + :arg fields: A list of fields to fetch and analyze the text from. + Defaults to the `index.query.default_field` index setting, which + has a default value of `*`. + :arg include: Specifies whether the input documents should also be + included in the search results returned. + :arg max_doc_freq: The maximum document frequency above which the + terms are ignored from the input document. + :arg max_query_terms: The maximum number of query terms that can be + selected. Defaults to `25` if omitted. + :arg max_word_length: The maximum word length above which the terms + are ignored. Defaults to unbounded (`0`). + :arg min_doc_freq: The minimum document frequency below which the + terms are ignored from the input document. Defaults to `5` if + omitted. + :arg minimum_should_match: After the disjunctive query has been + formed, this parameter controls the number of terms that must + match. + :arg min_term_freq: The minimum term frequency below which the terms + are ignored from the input document. Defaults to `2` if omitted. + :arg min_word_length: The minimum word length below which the terms + are ignored. + :arg routing: + :arg stop_words: An array of stop words. Any word in this set is + ignored. + :arg unlike: Used in combination with `like` to exclude documents that + match a set of terms. + :arg version: + :arg version_type: Defaults to `'internal'` if omitted. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "more_like_this" + + def __init__( + self, + *, + like: Union[ + Union[str, "types.LikeDocument"], + Sequence[Union[str, "types.LikeDocument"]], + Dict[str, Any], + "DefaultType", + ] = DEFAULT, + analyzer: Union[str, "DefaultType"] = DEFAULT, + boost_terms: Union[float, "DefaultType"] = DEFAULT, + fail_on_unsupported_field: Union[bool, "DefaultType"] = DEFAULT, + fields: Union[ + Sequence[Union[str, "InstrumentedField"]], "DefaultType" + ] = DEFAULT, + include: Union[bool, "DefaultType"] = DEFAULT, + max_doc_freq: Union[int, "DefaultType"] = DEFAULT, + max_query_terms: Union[int, "DefaultType"] = DEFAULT, + max_word_length: Union[int, "DefaultType"] = DEFAULT, + min_doc_freq: Union[int, "DefaultType"] = DEFAULT, + minimum_should_match: Union[int, str, "DefaultType"] = DEFAULT, + min_term_freq: Union[int, "DefaultType"] = DEFAULT, + min_word_length: Union[int, "DefaultType"] = DEFAULT, + routing: Union[str, "DefaultType"] = DEFAULT, + stop_words: Union[str, Sequence[str], "DefaultType"] = DEFAULT, + unlike: Union[ + Union[str, "types.LikeDocument"], + Sequence[Union[str, "types.LikeDocument"]], + Dict[str, Any], + "DefaultType", + ] = DEFAULT, + version: Union[int, "DefaultType"] = DEFAULT, + version_type: Union[ + Literal["internal", "external", "external_gte", "force"], "DefaultType" + ] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + like=like, + analyzer=analyzer, + boost_terms=boost_terms, + fail_on_unsupported_field=fail_on_unsupported_field, + fields=fields, + include=include, + max_doc_freq=max_doc_freq, + max_query_terms=max_query_terms, + max_word_length=max_word_length, + min_doc_freq=min_doc_freq, + minimum_should_match=minimum_should_match, + min_term_freq=min_term_freq, + min_word_length=min_word_length, + routing=routing, + stop_words=stop_words, + unlike=unlike, + version=version, + version_type=version_type, + boost=boost, + _name=_name, + **kwargs, + ) + + +class MultiMatch(Query): + """ + Enables you to search for a provided text, number, date or boolean + value across multiple fields. The provided text is analyzed before + matching. + + :arg query: (required) Text, number, boolean value or date you wish to + find in the provided field. + :arg analyzer: Analyzer used to convert the text in the query value + into tokens. + :arg auto_generate_synonyms_phrase_query: If `true`, match phrase + queries are automatically created for multi-term synonyms. + Defaults to `True` if omitted. + :arg cutoff_frequency: + :arg fields: The fields to be queried. Defaults to the + `index.query.default_field` index settings, which in turn defaults + to `*`. + :arg fuzziness: Maximum edit distance allowed for matching. + :arg fuzzy_rewrite: Method used to rewrite the query. + :arg fuzzy_transpositions: If `true`, edits for fuzzy matching include + transpositions of two adjacent characters (for example, `ab` to + `ba`). Can be applied to the term subqueries constructed for all + terms but the final term. Defaults to `True` if omitted. + :arg lenient: If `true`, format-based errors, such as providing a text + query value for a numeric field, are ignored. + :arg max_expansions: Maximum number of terms to which the query will + expand. Defaults to `50` if omitted. + :arg minimum_should_match: Minimum number of clauses that must match + for a document to be returned. + :arg operator: Boolean logic used to interpret text in the query + value. Defaults to `'or'` if omitted. + :arg prefix_length: Number of beginning characters left unchanged for + fuzzy matching. + :arg slop: Maximum number of positions allowed between matching + tokens. + :arg tie_breaker: Determines how scores for each per-term blended + query and scores across groups are combined. + :arg type: How `the` multi_match query is executed internally. + Defaults to `'best_fields'` if omitted. + :arg zero_terms_query: Indicates whether no documents are returned if + the `analyzer` removes all tokens, such as when using a `stop` + filter. Defaults to `'none'` if omitted. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "multi_match" + + def __init__( + self, + *, + query: Union[str, "DefaultType"] = DEFAULT, + analyzer: Union[str, "DefaultType"] = DEFAULT, + auto_generate_synonyms_phrase_query: Union[bool, "DefaultType"] = DEFAULT, + cutoff_frequency: Union[float, "DefaultType"] = DEFAULT, + fields: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + fuzziness: Union[str, int, "DefaultType"] = DEFAULT, + fuzzy_rewrite: Union[str, "DefaultType"] = DEFAULT, + fuzzy_transpositions: Union[bool, "DefaultType"] = DEFAULT, + lenient: Union[bool, "DefaultType"] = DEFAULT, + max_expansions: Union[int, "DefaultType"] = DEFAULT, + minimum_should_match: Union[int, str, "DefaultType"] = DEFAULT, + operator: Union[Literal["and", "or"], "DefaultType"] = DEFAULT, + prefix_length: Union[int, "DefaultType"] = DEFAULT, + slop: Union[int, "DefaultType"] = DEFAULT, + tie_breaker: Union[float, "DefaultType"] = DEFAULT, + type: Union[ + Literal[ + "best_fields", + "most_fields", + "cross_fields", + "phrase", + "phrase_prefix", + "bool_prefix", + ], + "DefaultType", + ] = DEFAULT, + zero_terms_query: Union[Literal["all", "none"], "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + query=query, + analyzer=analyzer, + auto_generate_synonyms_phrase_query=auto_generate_synonyms_phrase_query, + cutoff_frequency=cutoff_frequency, + fields=fields, + fuzziness=fuzziness, + fuzzy_rewrite=fuzzy_rewrite, + fuzzy_transpositions=fuzzy_transpositions, + lenient=lenient, + max_expansions=max_expansions, + minimum_should_match=minimum_should_match, + operator=operator, + prefix_length=prefix_length, + slop=slop, + tie_breaker=tie_breaker, + type=type, + zero_terms_query=zero_terms_query, + boost=boost, + _name=_name, + **kwargs, + ) + + +class Nested(Query): + """ + Wraps another query to search nested fields. If an object matches the + search, the nested query returns the root parent document. + + :arg path: (required) Path to the nested object you wish to search. + :arg query: (required) Query you wish to run on nested objects in the + path. + :arg ignore_unmapped: Indicates whether to ignore an unmapped path and + not return any documents instead of an error. + :arg inner_hits: If defined, each search hit will contain inner hits. + :arg score_mode: How scores for matching child objects affect the root + parent document’s relevance score. Defaults to `'avg'` if omitted. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "nested" + _param_defs = { + "query": {"type": "query"}, + } + + def __init__( + self, + *, + path: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + query: Union[Query, "DefaultType"] = DEFAULT, + ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, + inner_hits: Union["types.InnerHits", Dict[str, Any], "DefaultType"] = DEFAULT, + score_mode: Union[ + Literal["none", "avg", "sum", "max", "min"], "DefaultType" + ] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + path=path, + query=query, + ignore_unmapped=ignore_unmapped, + inner_hits=inner_hits, + score_mode=score_mode, + boost=boost, + _name=_name, + **kwargs, + ) + + +class ParentId(Query): + """ + Returns child documents joined to a specific parent document. + + :arg id: ID of the parent document. + :arg ignore_unmapped: Indicates whether to ignore an unmapped `type` + and not return any documents instead of an error. + :arg type: Name of the child relationship mapped for the `join` field. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "parent_id" + + def __init__( + self, + *, + id: Union[str, "DefaultType"] = DEFAULT, + ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, + type: Union[str, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + id=id, + ignore_unmapped=ignore_unmapped, + type=type, + boost=boost, + _name=_name, + **kwargs, + ) + + +class Percolate(Query): + """ + Matches queries stored in an index. + + :arg field: (required) Field that holds the indexed queries. The field + must use the `percolator` mapping type. + :arg document: The source of the document being percolated. + :arg documents: An array of sources of the documents being percolated. + :arg id: The ID of a stored document to percolate. + :arg index: The index of a stored document to percolate. + :arg name: The suffix used for the `_percolator_document_slot` field + when multiple `percolate` queries are specified. + :arg preference: Preference used to fetch document to percolate. + :arg routing: Routing used to fetch document to percolate. + :arg version: The expected version of a stored document to percolate. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "percolate" + + def __init__( + self, + *, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + document: Any = DEFAULT, + documents: Union[Sequence[Any], "DefaultType"] = DEFAULT, + id: Union[str, "DefaultType"] = DEFAULT, + index: Union[str, "DefaultType"] = DEFAULT, + name: Union[str, "DefaultType"] = DEFAULT, + preference: Union[str, "DefaultType"] = DEFAULT, + routing: Union[str, "DefaultType"] = DEFAULT, + version: Union[int, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + field=field, + document=document, + documents=documents, + id=id, + index=index, + name=name, + preference=preference, + routing=routing, + version=version, + boost=boost, + _name=_name, + **kwargs, + ) + + +class Pinned(Query): + """ + Promotes selected documents to rank higher than those matching a given + query. + + :arg organic: (required) Any choice of query used to rank documents + which will be ranked below the "pinned" documents. + :arg ids: Document IDs listed in the order they are to appear in + results. Required if `docs` is not specified. + :arg docs: Documents listed in the order they are to appear in + results. Required if `ids` is not specified. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "pinned" + _param_defs = { + "organic": {"type": "query"}, + } + + def __init__( + self, + *, + organic: Union[Query, "DefaultType"] = DEFAULT, + ids: Union[Sequence[str], "DefaultType"] = DEFAULT, + docs: Union[ + Sequence["types.PinnedDoc"], Sequence[Dict[str, Any]], "DefaultType" + ] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + organic=organic, ids=ids, docs=docs, boost=boost, _name=_name, **kwargs + ) + + +class Prefix(Query): + """ + Returns documents that contain a specific prefix in a provided field. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + + name = "prefix" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union["types.PrefixQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) + + +class QueryString(Query): + """ + Returns documents based on a provided query string, using a parser + with a strict syntax. + + :arg query: (required) Query string you wish to parse and use for + search. + :arg allow_leading_wildcard: If `true`, the wildcard characters `*` + and `?` are allowed as the first character of the query string. + Defaults to `True` if omitted. + :arg analyzer: Analyzer used to convert text in the query string into + tokens. + :arg analyze_wildcard: If `true`, the query attempts to analyze + wildcard terms in the query string. + :arg auto_generate_synonyms_phrase_query: If `true`, match phrase + queries are automatically created for multi-term synonyms. + Defaults to `True` if omitted. + :arg default_field: Default field to search if no field is provided in + the query string. Supports wildcards (`*`). Defaults to the + `index.query.default_field` index setting, which has a default + value of `*`. + :arg default_operator: Default boolean logic used to interpret text in + the query string if no operators are specified. Defaults to `'or'` + if omitted. + :arg enable_position_increments: If `true`, enable position increments + in queries constructed from a `query_string` search. Defaults to + `True` if omitted. + :arg escape: + :arg fields: Array of fields to search. Supports wildcards (`*`). + :arg fuzziness: Maximum edit distance allowed for fuzzy matching. + :arg fuzzy_max_expansions: Maximum number of terms to which the query + expands for fuzzy matching. Defaults to `50` if omitted. + :arg fuzzy_prefix_length: Number of beginning characters left + unchanged for fuzzy matching. + :arg fuzzy_rewrite: Method used to rewrite the query. + :arg fuzzy_transpositions: If `true`, edits for fuzzy matching include + transpositions of two adjacent characters (for example, `ab` to + `ba`). Defaults to `True` if omitted. + :arg lenient: If `true`, format-based errors, such as providing a text + value for a numeric field, are ignored. + :arg max_determinized_states: Maximum number of automaton states + required for the query. Defaults to `10000` if omitted. + :arg minimum_should_match: Minimum number of clauses that must match + for a document to be returned. + :arg phrase_slop: Maximum number of positions allowed between matching + tokens for phrases. + :arg quote_analyzer: Analyzer used to convert quoted text in the query + string into tokens. For quoted text, this parameter overrides the + analyzer specified in the `analyzer` parameter. + :arg quote_field_suffix: Suffix appended to quoted text in the query + string. You can use this suffix to use a different analysis method + for exact matches. + :arg rewrite: Method used to rewrite the query. + :arg tie_breaker: How to combine the queries generated from the + individual search terms in the resulting `dis_max` query. + :arg time_zone: Coordinated Universal Time (UTC) offset or IANA time + zone used to convert date values in the query string to UTC. + :arg type: Determines how the query matches and scores documents. + Defaults to `'best_fields'` if omitted. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "query_string" + + def __init__( + self, + *, + query: Union[str, "DefaultType"] = DEFAULT, + allow_leading_wildcard: Union[bool, "DefaultType"] = DEFAULT, + analyzer: Union[str, "DefaultType"] = DEFAULT, + analyze_wildcard: Union[bool, "DefaultType"] = DEFAULT, + auto_generate_synonyms_phrase_query: Union[bool, "DefaultType"] = DEFAULT, + default_field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + default_operator: Union[Literal["and", "or"], "DefaultType"] = DEFAULT, + enable_position_increments: Union[bool, "DefaultType"] = DEFAULT, + escape: Union[bool, "DefaultType"] = DEFAULT, + fields: Union[ + Sequence[Union[str, "InstrumentedField"]], "DefaultType" + ] = DEFAULT, + fuzziness: Union[str, int, "DefaultType"] = DEFAULT, + fuzzy_max_expansions: Union[int, "DefaultType"] = DEFAULT, + fuzzy_prefix_length: Union[int, "DefaultType"] = DEFAULT, + fuzzy_rewrite: Union[str, "DefaultType"] = DEFAULT, + fuzzy_transpositions: Union[bool, "DefaultType"] = DEFAULT, + lenient: Union[bool, "DefaultType"] = DEFAULT, + max_determinized_states: Union[int, "DefaultType"] = DEFAULT, + minimum_should_match: Union[int, str, "DefaultType"] = DEFAULT, + phrase_slop: Union[float, "DefaultType"] = DEFAULT, + quote_analyzer: Union[str, "DefaultType"] = DEFAULT, + quote_field_suffix: Union[str, "DefaultType"] = DEFAULT, + rewrite: Union[str, "DefaultType"] = DEFAULT, + tie_breaker: Union[float, "DefaultType"] = DEFAULT, + time_zone: Union[str, "DefaultType"] = DEFAULT, + type: Union[ + Literal[ + "best_fields", + "most_fields", + "cross_fields", + "phrase", + "phrase_prefix", + "bool_prefix", + ], + "DefaultType", + ] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + query=query, + allow_leading_wildcard=allow_leading_wildcard, + analyzer=analyzer, + analyze_wildcard=analyze_wildcard, + auto_generate_synonyms_phrase_query=auto_generate_synonyms_phrase_query, + default_field=default_field, + default_operator=default_operator, + enable_position_increments=enable_position_increments, + escape=escape, + fields=fields, + fuzziness=fuzziness, + fuzzy_max_expansions=fuzzy_max_expansions, + fuzzy_prefix_length=fuzzy_prefix_length, + fuzzy_rewrite=fuzzy_rewrite, + fuzzy_transpositions=fuzzy_transpositions, + lenient=lenient, + max_determinized_states=max_determinized_states, + minimum_should_match=minimum_should_match, + phrase_slop=phrase_slop, + quote_analyzer=quote_analyzer, + quote_field_suffix=quote_field_suffix, + rewrite=rewrite, + tie_breaker=tie_breaker, + time_zone=time_zone, + type=type, + boost=boost, + _name=_name, + **kwargs, + ) + + +class Range(Query): + """ + Returns documents that contain terms within a provided range. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + + name = "range" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union["wrappers.Range[Any]", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) + + +class RankFeature(Query): + """ + Boosts the relevance score of documents based on the numeric value of + a `rank_feature` or `rank_features` field. + + :arg field: (required) `rank_feature` or `rank_features` field used to + boost relevance scores. + :arg saturation: Saturation function used to boost relevance scores + based on the value of the rank feature `field`. + :arg log: Logarithmic function used to boost relevance scores based on + the value of the rank feature `field`. + :arg linear: Linear function used to boost relevance scores based on + the value of the rank feature `field`. + :arg sigmoid: Sigmoid function used to boost relevance scores based on + the value of the rank feature `field`. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "rank_feature" + + def __init__( + self, + *, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + saturation: Union[ + "types.RankFeatureFunctionSaturation", Dict[str, Any], "DefaultType" + ] = DEFAULT, + log: Union[ + "types.RankFeatureFunctionLogarithm", Dict[str, Any], "DefaultType" + ] = DEFAULT, + linear: Union[ + "types.RankFeatureFunctionLinear", Dict[str, Any], "DefaultType" + ] = DEFAULT, + sigmoid: Union[ + "types.RankFeatureFunctionSigmoid", Dict[str, Any], "DefaultType" + ] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + field=field, + saturation=saturation, + log=log, + linear=linear, + sigmoid=sigmoid, + boost=boost, + _name=_name, + **kwargs, + ) + + +class Regexp(Query): + """ + Returns documents that contain terms matching a regular expression. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + + name = "regexp" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union["types.RegexpQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) + + +class Rule(Query): + """ + :arg organic: (required) + :arg ruleset_ids: (required) + :arg match_criteria: (required) + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "rule" + _param_defs = { + "organic": {"type": "query"}, + } + + def __init__( + self, + *, + organic: Union[Query, "DefaultType"] = DEFAULT, + ruleset_ids: Union[Sequence[str], "DefaultType"] = DEFAULT, + match_criteria: Any = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + organic=organic, + ruleset_ids=ruleset_ids, + match_criteria=match_criteria, + boost=boost, + _name=_name, + **kwargs, + ) + + +class Script(Query): + """ + Filters documents based on a provided script. The script query is + typically used in a filter context. + + :arg script: (required) Contains a script to run as a query. This + script must return a boolean value, `true` or `false`. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "script" + + def __init__( + self, + *, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(script=script, boost=boost, _name=_name, **kwargs) + + +class ScriptScore(Query): + """ + Uses a script to provide a custom score for returned documents. + + :arg query: (required) Query used to return documents. + :arg script: (required) Script used to compute the score of documents + returned by the query. Important: final relevance scores from the + `script_score` query cannot be negative. + :arg min_score: Documents with a score lower than this floating point + number are excluded from the search results. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "script_score" + _param_defs = { + "query": {"type": "query"}, + } + + def __init__( + self, + *, + query: Union[Query, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + min_score: Union[float, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + query=query, + script=script, + min_score=min_score, + boost=boost, + _name=_name, + **kwargs, + ) + + +class Semantic(Query): + """ + A semantic query to semantic_text field types + + :arg field: (required) The field to query, which must be a + semantic_text field type + :arg query: (required) The query text + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "semantic" + + def __init__( + self, + *, + field: Union[str, "DefaultType"] = DEFAULT, + query: Union[str, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(field=field, query=query, boost=boost, _name=_name, **kwargs) + + +class Shape(Query): + """ + Queries documents that contain fields indexed using the `shape` type. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + :arg ignore_unmapped: When set to `true` the query ignores an unmapped + field and will not match any documents. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "shape" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union["types.ShapeFieldQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + *, + ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__( + ignore_unmapped=ignore_unmapped, boost=boost, _name=_name, **kwargs + ) + + +class SimpleQueryString(Query): + """ + Returns documents based on a provided query string, using a parser + with a limited but fault-tolerant syntax. + + :arg query: (required) Query string in the simple query string syntax + you wish to parse and use for search. + :arg analyzer: Analyzer used to convert text in the query string into + tokens. + :arg analyze_wildcard: If `true`, the query attempts to analyze + wildcard terms in the query string. + :arg auto_generate_synonyms_phrase_query: If `true`, the parser + creates a match_phrase query for each multi-position token. + Defaults to `True` if omitted. + :arg default_operator: Default boolean logic used to interpret text in + the query string if no operators are specified. Defaults to `'or'` + if omitted. + :arg fields: Array of fields you wish to search. Accepts wildcard + expressions. You also can boost relevance scores for matches to + particular fields using a caret (`^`) notation. Defaults to the + `index.query.default_field index` setting, which has a default + value of `*`. + :arg flags: List of enabled operators for the simple query string + syntax. Defaults to `ALL` if omitted. + :arg fuzzy_max_expansions: Maximum number of terms to which the query + expands for fuzzy matching. Defaults to `50` if omitted. + :arg fuzzy_prefix_length: Number of beginning characters left + unchanged for fuzzy matching. + :arg fuzzy_transpositions: If `true`, edits for fuzzy matching include + transpositions of two adjacent characters (for example, `ab` to + `ba`). + :arg lenient: If `true`, format-based errors, such as providing a text + value for a numeric field, are ignored. + :arg minimum_should_match: Minimum number of clauses that must match + for a document to be returned. + :arg quote_field_suffix: Suffix appended to quoted text in the query + string. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "simple_query_string" + + def __init__( + self, + *, + query: Union[str, "DefaultType"] = DEFAULT, + analyzer: Union[str, "DefaultType"] = DEFAULT, + analyze_wildcard: Union[bool, "DefaultType"] = DEFAULT, + auto_generate_synonyms_phrase_query: Union[bool, "DefaultType"] = DEFAULT, + default_operator: Union[Literal["and", "or"], "DefaultType"] = DEFAULT, + fields: Union[ + Sequence[Union[str, "InstrumentedField"]], "DefaultType" + ] = DEFAULT, + flags: Union[ + "types.PipeSeparatedFlags", Dict[str, Any], "DefaultType" + ] = DEFAULT, + fuzzy_max_expansions: Union[int, "DefaultType"] = DEFAULT, + fuzzy_prefix_length: Union[int, "DefaultType"] = DEFAULT, + fuzzy_transpositions: Union[bool, "DefaultType"] = DEFAULT, + lenient: Union[bool, "DefaultType"] = DEFAULT, + minimum_should_match: Union[int, str, "DefaultType"] = DEFAULT, + quote_field_suffix: Union[str, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + query=query, + analyzer=analyzer, + analyze_wildcard=analyze_wildcard, + auto_generate_synonyms_phrase_query=auto_generate_synonyms_phrase_query, + default_operator=default_operator, + fields=fields, + flags=flags, + fuzzy_max_expansions=fuzzy_max_expansions, + fuzzy_prefix_length=fuzzy_prefix_length, + fuzzy_transpositions=fuzzy_transpositions, + lenient=lenient, + minimum_should_match=minimum_should_match, + quote_field_suffix=quote_field_suffix, + boost=boost, + _name=_name, + **kwargs, + ) + + +class SpanContaining(Query): + """ + Returns matches which enclose another span query. + + :arg big: (required) Can be any span query. Matching spans from `big` + that contain matches from `little` are returned. + :arg little: (required) Can be any span query. Matching spans from + `big` that contain matches from `little` are returned. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "span_containing" + + def __init__( + self, + *, + big: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + little: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(big=big, little=little, boost=boost, _name=_name, **kwargs) + + +class SpanFieldMasking(Query): + """ + Wrapper to allow span queries to participate in composite single-field + span queries by _lying_ about their search field. + + :arg field: (required) + :arg query: (required) + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "span_field_masking" + + def __init__( + self, + *, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + query: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(field=field, query=query, boost=boost, _name=_name, **kwargs) + + +class SpanFirst(Query): + """ + Matches spans near the beginning of a field. + + :arg end: (required) Controls the maximum end position permitted in a + match. + :arg match: (required) Can be any other span type query. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "span_first" + + def __init__( + self, + *, + end: Union[int, "DefaultType"] = DEFAULT, + match: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(end=end, match=match, boost=boost, _name=_name, **kwargs) + + +class SpanMulti(Query): + """ + Allows you to wrap a multi term query (one of `wildcard`, `fuzzy`, + `prefix`, `range`, or `regexp` query) as a `span` query, so it can be + nested. + + :arg match: (required) Should be a multi term query (one of + `wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query). + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "span_multi" + _param_defs = { + "match": {"type": "query"}, + } + + def __init__( + self, + *, + match: Union[Query, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(match=match, boost=boost, _name=_name, **kwargs) + + +class SpanNear(Query): + """ + Matches spans which are near one another. You can specify `slop`, the + maximum number of intervening unmatched positions, as well as whether + matches are required to be in-order. + + :arg clauses: (required) Array of one or more other span type queries. + :arg in_order: Controls whether matches are required to be in-order. + :arg slop: Controls the maximum number of intervening unmatched + positions permitted. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "span_near" + + def __init__( + self, + *, + clauses: Union[ + Sequence["types.SpanQuery"], Sequence[Dict[str, Any]], "DefaultType" + ] = DEFAULT, + in_order: Union[bool, "DefaultType"] = DEFAULT, + slop: Union[int, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + clauses=clauses, + in_order=in_order, + slop=slop, + boost=boost, + _name=_name, + **kwargs, + ) + + +class SpanNot(Query): + """ + Removes matches which overlap with another span query or which are + within x tokens before (controlled by the parameter `pre`) or y tokens + after (controlled by the parameter `post`) another span query. + + :arg exclude: (required) Span query whose matches must not overlap + those returned. + :arg include: (required) Span query whose matches are filtered. + :arg dist: The number of tokens from within the include span that + can’t have overlap with the exclude span. Equivalent to setting + both `pre` and `post`. + :arg post: The number of tokens after the include span that can’t have + overlap with the exclude span. + :arg pre: The number of tokens before the include span that can’t have + overlap with the exclude span. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "span_not" + + def __init__( + self, + *, + exclude: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + include: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + dist: Union[int, "DefaultType"] = DEFAULT, + post: Union[int, "DefaultType"] = DEFAULT, + pre: Union[int, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + exclude=exclude, + include=include, + dist=dist, + post=post, + pre=pre, + boost=boost, + _name=_name, + **kwargs, + ) + + +class SpanOr(Query): + """ + Matches the union of its span clauses. + + :arg clauses: (required) Array of one or more other span type queries. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "span_or" + + def __init__( + self, + *, + clauses: Union[ + Sequence["types.SpanQuery"], Sequence[Dict[str, Any]], "DefaultType" + ] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(clauses=clauses, boost=boost, _name=_name, **kwargs) + + +class SpanTerm(Query): + """ + Matches spans containing a term. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + + name = "span_term" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union["types.SpanTermQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) + + +class SpanWithin(Query): + """ + Returns matches which are enclosed inside another span query. + + :arg big: (required) Can be any span query. Matching spans from + `little` that are enclosed within `big` are returned. + :arg little: (required) Can be any span query. Matching spans from + `little` that are enclosed within `big` are returned. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "span_within" + + def __init__( + self, + *, + big: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + little: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(big=big, little=little, boost=boost, _name=_name, **kwargs) + + +class SparseVector(Query): + """ + Using input query vectors or a natural language processing model to + convert a query into a list of token-weight pairs, queries against a + sparse vector field. + + :arg field: (required) The name of the field that contains the token- + weight pairs to be searched against. This field must be a mapped + sparse_vector field. + :arg query_vector: Dictionary of precomputed sparse vectors and their + associated weights. Only one of inference_id or query_vector may + be supplied in a request. + :arg inference_id: The inference ID to use to convert the query text + into token-weight pairs. It must be the same inference ID that was + used to create the tokens from the input text. Only one of + inference_id and query_vector is allowed. If inference_id is + specified, query must also be specified. Only one of inference_id + or query_vector may be supplied in a request. + :arg query: The query text you want to use for search. If inference_id + is specified, query must also be specified. + :arg prune: Whether to perform pruning, omitting the non-significant + tokens from the query to improve query performance. If prune is + true but the pruning_config is not specified, pruning will occur + but default values will be used. Default: false + :arg pruning_config: Optional pruning configuration. If enabled, this + will omit non-significant tokens from the query in order to + improve query performance. This is only used if prune is set to + true. If prune is set to true but pruning_config is not specified, + default values will be used. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "sparse_vector" + + def __init__( + self, + *, + field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + query_vector: Union[Mapping[str, float], "DefaultType"] = DEFAULT, + inference_id: Union[str, "DefaultType"] = DEFAULT, + query: Union[str, "DefaultType"] = DEFAULT, + prune: Union[bool, "DefaultType"] = DEFAULT, + pruning_config: Union[ + "types.TokenPruningConfig", Dict[str, Any], "DefaultType" + ] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__( + field=field, + query_vector=query_vector, + inference_id=inference_id, + query=query, + prune=prune, + pruning_config=pruning_config, + boost=boost, + _name=_name, + **kwargs, + ) + + +class Term(Query): + """ + Returns documents that contain an exact term in a provided field. To + return a document, the query term must exactly match the queried + field's value, including whitespace and capitalization. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + + name = "term" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union["types.TermQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) + + +class Terms(Query): + """ + Returns documents that contain one or more exact terms in a provided + field. To return a document, one or more terms must exactly match a + field value, including whitespace and capitalization. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "terms" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union[ + Sequence[Union[int, float, str, bool, None, Any]], + "types.TermsLookup", + Dict[str, Any], + "DefaultType", + ] = DEFAULT, + *, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(boost=boost, _name=_name, **kwargs) + + def _setattr(self, name: str, value: Any) -> None: + # here we convert any iterables that are not strings to lists + if hasattr(value, "__iter__") and not isinstance(value, (str, list, dict)): + value = list(value) + super()._setattr(name, value) + + +class TermsSet(Query): + """ + Returns documents that contain a minimum number of exact terms in a + provided field. To return a document, a required number of terms must + exactly match the field values, including whitespace and + capitalization. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + + name = "terms_set" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union["types.TermsSetQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) + + +class TextExpansion(Query): + """ + Uses a natural language processing model to convert the query text + into a list of token-weight pairs which are then used in a query + against a sparse vector or rank features field. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + + name = "text_expansion" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union[ + "types.TextExpansionQuery", Dict[str, Any], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) + + +class WeightedTokens(Query): + """ + Supports returning text_expansion query results by sending in + precomputed tokens with the query. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + + name = "weighted_tokens" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union[ + "types.WeightedTokensQuery", Dict[str, Any], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) + + +class Wildcard(Query): + """ + Returns documents that contain terms matching a wildcard pattern. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + + name = "wildcard" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union["types.WildcardQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) + + +class Wrapper(Query): + """ + A query that accepts any other query as base64 encoded string. + + :arg query: (required) A base64 encoded query. The binary data format + can be any of JSON, YAML, CBOR or SMILE encodings + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "wrapper" + + def __init__( + self, + *, + query: Union[str, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(query=query, boost=boost, _name=_name, **kwargs) + + +class Type(Query): + """ + :arg value: (required) + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + name = "type" + + def __init__( + self, + *, + value: Union[str, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + _name: Union[str, "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + super().__init__(value=value, boost=boost, _name=_name, **kwargs) diff --git a/elasticsearch/dsl/response/__init__.py b/elasticsearch/dsl/response/__init__.py new file mode 100644 index 000000000..f6f3d551d --- /dev/null +++ b/elasticsearch/dsl/response/__init__.py @@ -0,0 +1,354 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Generic, + Iterator, + List, + Mapping, + Optional, + Sequence, + Tuple, + Union, + cast, +) + +from ..utils import _R, AttrDict, AttrList, _wrap +from .hit import Hit, HitMeta + +if TYPE_CHECKING: + from .. import types + from ..aggs import Agg + from ..faceted_search_base import FacetedSearchBase + from ..search_base import Request, SearchBase + from ..update_by_query_base import UpdateByQueryBase + +__all__ = [ + "Response", + "AggResponse", + "UpdateByQueryResponse", + "Hit", + "HitMeta", + "AggregateResponseType", +] + + +class Response(AttrDict[Any], Generic[_R]): + """An Elasticsearch search response. + + :arg took: (required) + :arg timed_out: (required) + :arg _shards: (required) + :arg hits: search results + :arg aggregations: aggregation results + :arg _clusters: + :arg fields: + :arg max_score: + :arg num_reduce_phases: + :arg profile: + :arg pit_id: + :arg _scroll_id: + :arg suggest: + :arg terminated_early: + """ + + _search: "SearchBase[_R]" + _faceted_search: "FacetedSearchBase[_R]" + _doc_class: Optional[_R] + _hits: List[_R] + + took: int + timed_out: bool + _shards: "types.ShardStatistics" + _clusters: "types.ClusterStatistics" + fields: Mapping[str, Any] + max_score: float + num_reduce_phases: int + profile: "types.Profile" + pit_id: str + _scroll_id: str + suggest: Mapping[ + str, + Sequence[ + Union["types.CompletionSuggest", "types.PhraseSuggest", "types.TermSuggest"] + ], + ] + terminated_early: bool + + def __init__( + self, + search: "Request[_R]", + response: Dict[str, Any], + doc_class: Optional[_R] = None, + ): + super(AttrDict, self).__setattr__("_search", search) + super(AttrDict, self).__setattr__("_doc_class", doc_class) + super().__init__(response) + + def __iter__(self) -> Iterator[_R]: # type: ignore[override] + return iter(self.hits) + + def __getitem__(self, key: Union[slice, int, str]) -> Any: + if isinstance(key, (slice, int)): + # for slicing etc + return self.hits[key] + return super().__getitem__(key) + + def __nonzero__(self) -> bool: + return bool(self.hits) + + __bool__ = __nonzero__ + + def __repr__(self) -> str: + return "" % (self.hits or self.aggregations) + + def __len__(self) -> int: + return len(self.hits) + + def __getstate__(self) -> Tuple[Dict[str, Any], "Request[_R]", Optional[_R]]: # type: ignore[override] + return self._d_, self._search, self._doc_class + + def __setstate__( + self, state: Tuple[Dict[str, Any], "Request[_R]", Optional[_R]] # type: ignore[override] + ) -> None: + super(AttrDict, self).__setattr__("_d_", state[0]) + super(AttrDict, self).__setattr__("_search", state[1]) + super(AttrDict, self).__setattr__("_doc_class", state[2]) + + def success(self) -> bool: + return self._shards.total == self._shards.successful and not self.timed_out + + @property + def hits(self) -> List[_R]: + if not hasattr(self, "_hits"): + h = cast(AttrDict[Any], self._d_["hits"]) + + try: + hits = AttrList(list(map(self._search._get_result, h["hits"]))) + except AttributeError as e: + # avoid raising AttributeError since it will be hidden by the property + raise TypeError("Could not parse hits.", e) + + # avoid assigning _hits into self._d_ + super(AttrDict, self).__setattr__("_hits", hits) + for k in h: + setattr(self._hits, k, _wrap(h[k])) + return self._hits + + @property + def aggregations(self) -> "AggResponse[_R]": + return self.aggs + + @property + def aggs(self) -> "AggResponse[_R]": + if not hasattr(self, "_aggs"): + aggs = AggResponse[_R]( + cast("Agg[_R]", self._search.aggs), + self._search, + cast(Dict[str, Any], self._d_.get("aggregations", {})), + ) + + # avoid assigning _aggs into self._d_ + super(AttrDict, self).__setattr__("_aggs", aggs) + return cast("AggResponse[_R]", self._aggs) + + def search_after(self) -> "SearchBase[_R]": + """ + Return a ``Search`` instance that retrieves the next page of results. + + This method provides an easy way to paginate a long list of results using + the ``search_after`` option. For example:: + + page_size = 20 + s = Search()[:page_size].sort("date") + + while True: + # get a page of results + r = await s.execute() + + # do something with this page of results + + # exit the loop if we reached the end + if len(r.hits) < page_size: + break + + # get a search object with the next page of results + s = r.search_after() + + Note that the ``search_after`` option requires the search to have an + explicit ``sort`` order. + """ + if len(self.hits) == 0: + raise ValueError("Cannot use search_after when there are no search results") + if not hasattr(self.hits[-1].meta, "sort"): # type: ignore[attr-defined] + raise ValueError("Cannot use search_after when results are not sorted") + return self._search.extra(search_after=self.hits[-1].meta.sort) # type: ignore[attr-defined] + + +AggregateResponseType = Union[ + "types.CardinalityAggregate", + "types.HdrPercentilesAggregate", + "types.HdrPercentileRanksAggregate", + "types.TDigestPercentilesAggregate", + "types.TDigestPercentileRanksAggregate", + "types.PercentilesBucketAggregate", + "types.MedianAbsoluteDeviationAggregate", + "types.MinAggregate", + "types.MaxAggregate", + "types.SumAggregate", + "types.AvgAggregate", + "types.WeightedAvgAggregate", + "types.ValueCountAggregate", + "types.SimpleValueAggregate", + "types.DerivativeAggregate", + "types.BucketMetricValueAggregate", + "types.StatsAggregate", + "types.StatsBucketAggregate", + "types.ExtendedStatsAggregate", + "types.ExtendedStatsBucketAggregate", + "types.GeoBoundsAggregate", + "types.GeoCentroidAggregate", + "types.HistogramAggregate", + "types.DateHistogramAggregate", + "types.AutoDateHistogramAggregate", + "types.VariableWidthHistogramAggregate", + "types.StringTermsAggregate", + "types.LongTermsAggregate", + "types.DoubleTermsAggregate", + "types.UnmappedTermsAggregate", + "types.LongRareTermsAggregate", + "types.StringRareTermsAggregate", + "types.UnmappedRareTermsAggregate", + "types.MultiTermsAggregate", + "types.MissingAggregate", + "types.NestedAggregate", + "types.ReverseNestedAggregate", + "types.GlobalAggregate", + "types.FilterAggregate", + "types.ChildrenAggregate", + "types.ParentAggregate", + "types.SamplerAggregate", + "types.UnmappedSamplerAggregate", + "types.GeoHashGridAggregate", + "types.GeoTileGridAggregate", + "types.GeoHexGridAggregate", + "types.RangeAggregate", + "types.DateRangeAggregate", + "types.GeoDistanceAggregate", + "types.IpRangeAggregate", + "types.IpPrefixAggregate", + "types.FiltersAggregate", + "types.AdjacencyMatrixAggregate", + "types.SignificantLongTermsAggregate", + "types.SignificantStringTermsAggregate", + "types.UnmappedSignificantTermsAggregate", + "types.CompositeAggregate", + "types.FrequentItemSetsAggregate", + "types.TimeSeriesAggregate", + "types.ScriptedMetricAggregate", + "types.TopHitsAggregate", + "types.InferenceAggregate", + "types.StringStatsAggregate", + "types.BoxPlotAggregate", + "types.TopMetricsAggregate", + "types.TTestAggregate", + "types.RateAggregate", + "types.CumulativeCardinalityAggregate", + "types.MatrixStatsAggregate", + "types.GeoLineAggregate", +] + + +class AggResponse(AttrDict[Any], Generic[_R]): + """An Elasticsearch aggregation response.""" + + _meta: Dict[str, Any] + + def __init__(self, aggs: "Agg[_R]", search: "Request[_R]", data: Dict[str, Any]): + super(AttrDict, self).__setattr__("_meta", {"search": search, "aggs": aggs}) + super().__init__(data) + + def __getitem__(self, attr_name: str) -> AggregateResponseType: + if attr_name in self._meta["aggs"]: + # don't do self._meta['aggs'][attr_name] to avoid copying + agg = self._meta["aggs"].aggs[attr_name] + return cast( + AggregateResponseType, + agg.result(self._meta["search"], self._d_[attr_name]), + ) + return super().__getitem__(attr_name) # type: ignore[no-any-return] + + def __iter__(self) -> Iterator[AggregateResponseType]: # type: ignore[override] + for name in self._meta["aggs"]: + yield self[name] + + +class UpdateByQueryResponse(AttrDict[Any], Generic[_R]): + """An Elasticsearch update by query response. + + :arg batches: + :arg failures: + :arg noops: + :arg deleted: + :arg requests_per_second: + :arg retries: + :arg task: + :arg timed_out: + :arg took: + :arg total: + :arg updated: + :arg version_conflicts: + :arg throttled: + :arg throttled_millis: + :arg throttled_until: + :arg throttled_until_millis: + """ + + _search: "UpdateByQueryBase[_R]" + + batches: int + failures: Sequence["types.BulkIndexByScrollFailure"] + noops: int + deleted: int + requests_per_second: float + retries: "types.Retries" + task: Union[str, int] + timed_out: bool + took: Any + total: int + updated: int + version_conflicts: int + throttled: Any + throttled_millis: Any + throttled_until: Any + throttled_until_millis: Any + + def __init__( + self, + search: "Request[_R]", + response: Dict[str, Any], + doc_class: Optional[_R] = None, + ): + super(AttrDict, self).__setattr__("_search", search) + super(AttrDict, self).__setattr__("_doc_class", doc_class) + super().__init__(response) + + def success(self) -> bool: + return not self.timed_out and not self.failures diff --git a/elasticsearch/dsl/response/aggs.py b/elasticsearch/dsl/response/aggs.py new file mode 100644 index 000000000..8994fa761 --- /dev/null +++ b/elasticsearch/dsl/response/aggs.py @@ -0,0 +1,100 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Union, cast + +from ..utils import _R, AttrDict, AttrList +from . import AggResponse, Response + +if TYPE_CHECKING: + from ..aggs import Agg + from ..field import Field + from ..search_base import SearchBase + + +class Bucket(AggResponse[_R]): + def __init__( + self, + aggs: "Agg[_R]", + search: "SearchBase[_R]", + data: Dict[str, Any], + field: Optional["Field"] = None, + ): + super().__init__(aggs, search, data) + + +class FieldBucket(Bucket[_R]): + def __init__( + self, + aggs: "Agg[_R]", + search: "SearchBase[_R]", + data: Dict[str, Any], + field: Optional["Field"] = None, + ): + if field: + data["key"] = field.deserialize(data["key"]) + super().__init__(aggs, search, data, field) + + +class BucketData(AggResponse[_R]): + _bucket_class = Bucket + _buckets: Union[AttrDict[Any], AttrList[Any]] + + def _wrap_bucket(self, data: Dict[str, Any]) -> Bucket[_R]: + return self._bucket_class( + self._meta["aggs"], + self._meta["search"], + data, + field=self._meta.get("field"), + ) + + def __iter__(self) -> Iterator["Agg"]: # type: ignore[override] + return iter(self.buckets) # type: ignore[arg-type] + + def __len__(self) -> int: + return len(self.buckets) + + def __getitem__(self, key: Any) -> Any: + if isinstance(key, (int, slice)): + return cast(AttrList[Any], self.buckets)[key] + return super().__getitem__(key) + + @property + def buckets(self) -> Union[AttrDict[Any], AttrList[Any]]: + if not hasattr(self, "_buckets"): + field = getattr(self._meta["aggs"], "field", None) + if field: + self._meta["field"] = self._meta["search"]._resolve_field(field) + bs = cast(Union[Dict[str, Any], List[Any]], self._d_["buckets"]) + if isinstance(bs, list): + ret = AttrList(bs, obj_wrapper=self._wrap_bucket) + else: + ret = AttrDict[Any]({k: self._wrap_bucket(bs[k]) for k in bs}) # type: ignore[assignment] + super(AttrDict, self).__setattr__("_buckets", ret) + return self._buckets + + +class FieldBucketData(BucketData[_R]): + _bucket_class = FieldBucket + + +class TopHitsData(Response[_R]): + def __init__(self, agg: "Agg[_R]", search: "SearchBase[_R]", data: Any): + super(AttrDict, self).__setattr__( + "meta", AttrDict({"agg": agg, "search": search}) + ) + super().__init__(search, data) diff --git a/elasticsearch/dsl/response/hit.py b/elasticsearch/dsl/response/hit.py new file mode 100644 index 000000000..a09d36e9c --- /dev/null +++ b/elasticsearch/dsl/response/hit.py @@ -0,0 +1,53 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import Any, Dict, List, Tuple, cast + +from ..utils import AttrDict, HitMeta + + +class Hit(AttrDict[Any]): + def __init__(self, document: Dict[str, Any]): + data: Dict[str, Any] = {} + if "_source" in document: + data = cast(Dict[str, Any], document["_source"]) + if "fields" in document: + data.update(cast(Dict[str, Any], document["fields"])) + + super().__init__(data) + # assign meta as attribute and not as key in self._d_ + super(AttrDict, self).__setattr__("meta", HitMeta(document)) + + def __getstate__(self) -> Tuple[Dict[str, Any], HitMeta]: # type: ignore[override] + # add self.meta since it is not in self.__dict__ + return super().__getstate__() + (self.meta,) + + def __setstate__(self, state: Tuple[Dict[str, Any], HitMeta]) -> None: # type: ignore[override] + super(AttrDict, self).__setattr__("meta", state[-1]) + super().__setstate__(state[:-1]) + + def __dir__(self) -> List[str]: + # be sure to expose meta in dir(self) + return super().__dir__() + ["meta"] + + def __repr__(self) -> str: + return "".format( + "/".join( + getattr(self.meta, key) for key in ("index", "id") if key in self.meta + ), + super().__repr__(), + ) diff --git a/elasticsearch/dsl/search.py b/elasticsearch/dsl/search.py new file mode 100644 index 000000000..bf2036ffd --- /dev/null +++ b/elasticsearch/dsl/search.py @@ -0,0 +1,20 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from ._async.search import AsyncEmptySearch, AsyncMultiSearch, AsyncSearch # noqa: F401 +from ._sync.search import EmptySearch, MultiSearch, Search # noqa: F401 +from .search_base import Q # noqa: F401 diff --git a/elasticsearch/dsl/search_base.py b/elasticsearch/dsl/search_base.py new file mode 100644 index 000000000..ad4a56059 --- /dev/null +++ b/elasticsearch/dsl/search_base.py @@ -0,0 +1,1040 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import collections.abc +import copy +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Generic, + Iterator, + List, + Optional, + Protocol, + Tuple, + Type, + Union, + cast, + overload, +) + +from typing_extensions import Self, TypeVar + +from .aggs import A, Agg, AggBase +from .document_base import InstrumentedField +from .exceptions import IllegalOperation +from .query import Bool, Q, Query +from .response import Hit, Response +from .utils import _R, AnyUsingType, AttrDict, DslBase, recursive_to_dict + +if TYPE_CHECKING: + from .field import Field, Object + + +class SupportsClone(Protocol): + def _clone(self) -> Self: ... + + +_S = TypeVar("_S", bound=SupportsClone) + + +class QueryProxy(Generic[_S]): + """ + Simple proxy around DSL objects (queries) that can be called + (to add query/post_filter) and also allows attribute access which is proxied to + the wrapped query. + """ + + def __init__(self, search: _S, attr_name: str): + self._search = search + self._proxied: Optional[Query] = None + self._attr_name = attr_name + + def __nonzero__(self) -> bool: + return self._proxied is not None + + __bool__ = __nonzero__ + + def __call__(self, *args: Any, **kwargs: Any) -> _S: + s = self._search._clone() + + # we cannot use self._proxied since we just cloned self._search and + # need to access the new self on the clone + proxied = getattr(s, self._attr_name) + if proxied._proxied is None: + proxied._proxied = Q(*args, **kwargs) + else: + proxied._proxied &= Q(*args, **kwargs) + + # always return search to be chainable + return s + + def __getattr__(self, attr_name: str) -> Any: + return getattr(self._proxied, attr_name) + + def __setattr__(self, attr_name: str, value: Any) -> None: + if not attr_name.startswith("_"): + if self._proxied is not None: + self._proxied = Q(self._proxied.to_dict()) + setattr(self._proxied, attr_name, value) + super().__setattr__(attr_name, value) + + def __getstate__(self) -> Tuple[_S, Optional[Query], str]: + return self._search, self._proxied, self._attr_name + + def __setstate__(self, state: Tuple[_S, Optional[Query], str]) -> None: + self._search, self._proxied, self._attr_name = state + + +class ProxyDescriptor(Generic[_S]): + """ + Simple descriptor to enable setting of queries and filters as: + + s = Search() + s.query = Q(...) + + """ + + def __init__(self, name: str): + self._attr_name = f"_{name}_proxy" + + def __get__(self, instance: Any, owner: object) -> QueryProxy[_S]: + return cast(QueryProxy[_S], getattr(instance, self._attr_name)) + + def __set__(self, instance: _S, value: Dict[str, Any]) -> None: + proxy: QueryProxy[_S] = getattr(instance, self._attr_name) + proxy._proxied = Q(value) + + +class AggsProxy(AggBase[_R], DslBase): + name = "aggs" + + def __init__(self, search: "SearchBase[_R]"): + self._base = cast("Agg[_R]", self) + self._search = search + self._params = {"aggs": {}} + + def to_dict(self) -> Dict[str, Any]: + return cast(Dict[str, Any], super().to_dict().get("aggs", {})) + + +class Request(Generic[_R]): + def __init__( + self, + using: AnyUsingType = "default", + index: Optional[Union[str, List[str]]] = None, + doc_type: Optional[ + Union[type, str, List[Union[type, str]], Dict[str, Union[type, str]]] + ] = None, + extra: Optional[Dict[str, Any]] = None, + ): + self._using = using + + self._index = None + if isinstance(index, (tuple, list)): + self._index = list(index) + elif index: + self._index = [index] + + self._doc_type: List[Union[type, str]] = [] + self._doc_type_map: Dict[str, Any] = {} + if isinstance(doc_type, (tuple, list)): + self._doc_type.extend(doc_type) + elif isinstance(doc_type, collections.abc.Mapping): + self._doc_type.extend(doc_type.keys()) + self._doc_type_map.update(doc_type) + elif doc_type: + self._doc_type.append(doc_type) + + self._params: Dict[str, Any] = {} + self._extra: Dict[str, Any] = extra or {} + + def __eq__(self, other: Any) -> bool: + return ( + isinstance(other, Request) + and other._params == self._params + and other._index == self._index + and other._doc_type == self._doc_type + and other.to_dict() == self.to_dict() + ) + + def __copy__(self) -> Self: + return self._clone() + + def params(self, **kwargs: Any) -> Self: + """ + Specify query params to be used when executing the search. All the + keyword arguments will override the current values. See + https://elasticsearch-py.readthedocs.io/en/latest/api/elasticsearch.html#elasticsearch.Elasticsearch.search + for all available parameters. + + Example:: + + s = Search() + s = s.params(routing='user-1', preference='local') + """ + s = self._clone() + s._params.update(kwargs) + return s + + def index(self, *index: Union[str, List[str], Tuple[str, ...]]) -> Self: + """ + Set the index for the search. If called empty it will remove all information. + + Example:: + + s = Search() + s = s.index('twitter-2015.01.01', 'twitter-2015.01.02') + s = s.index(['twitter-2015.01.01', 'twitter-2015.01.02']) + """ + # .index() resets + s = self._clone() + if not index: + s._index = None + else: + indexes = [] + for i in index: + if isinstance(i, str): + indexes.append(i) + elif isinstance(i, list): + indexes += i + elif isinstance(i, tuple): + indexes += list(i) + + s._index = (self._index or []) + indexes + + return s + + def _resolve_field(self, path: str) -> Optional["Field"]: + for dt in self._doc_type: + if not hasattr(dt, "_index"): + continue + field = dt._index.resolve_field(path) + if field is not None: + return cast("Field", field) + return None + + def _resolve_nested( + self, hit: AttrDict[Any], parent_class: Optional[type] = None + ) -> Type[_R]: + doc_class = Hit + + nested_path = [] + nesting = hit["_nested"] + while nesting and "field" in nesting: + nested_path.append(nesting["field"]) + nesting = nesting.get("_nested") + nested_path_str = ".".join(nested_path) + + nested_field: Optional["Object"] + if parent_class is not None and hasattr(parent_class, "_index"): + nested_field = cast( + Optional["Object"], parent_class._index.resolve_field(nested_path_str) + ) + else: + nested_field = cast( + Optional["Object"], self._resolve_field(nested_path_str) + ) + + if nested_field is not None: + return cast(Type[_R], nested_field._doc_class) + + return cast(Type[_R], doc_class) + + def _get_result( + self, hit: AttrDict[Any], parent_class: Optional[type] = None + ) -> _R: + doc_class: Any = Hit + dt = hit.get("_type") + + if "_nested" in hit: + doc_class = self._resolve_nested(hit, parent_class) + + elif dt in self._doc_type_map: + doc_class = self._doc_type_map[dt] + + else: + for doc_type in self._doc_type: + if hasattr(doc_type, "_matches") and doc_type._matches(hit): + doc_class = doc_type + break + + for t in hit.get("inner_hits", ()): + hit["inner_hits"][t] = Response[_R]( + self, hit["inner_hits"][t], doc_class=doc_class + ) + + callback = getattr(doc_class, "from_es", doc_class) + return cast(_R, callback(hit)) + + def doc_type( + self, *doc_type: Union[type, str], **kwargs: Callable[[AttrDict[Any]], Any] + ) -> Self: + """ + Set the type to search through. You can supply a single value or + multiple. Values can be strings or subclasses of ``Document``. + + You can also pass in any keyword arguments, mapping a doc_type to a + callback that should be used instead of the Hit class. + + If no doc_type is supplied any information stored on the instance will + be erased. + + Example: + + s = Search().doc_type('product', 'store', User, custom=my_callback) + """ + # .doc_type() resets + s = self._clone() + if not doc_type and not kwargs: + s._doc_type = [] + s._doc_type_map = {} + else: + s._doc_type.extend(doc_type) + s._doc_type.extend(kwargs.keys()) + s._doc_type_map.update(kwargs) + return s + + def using(self, client: AnyUsingType) -> Self: + """ + Associate the search request with an elasticsearch client. A fresh copy + will be returned with current instance remaining unchanged. + + :arg client: an instance of ``elasticsearch.Elasticsearch`` to use or + an alias to look up in ``elasticsearch.dsl.connections`` + + """ + s = self._clone() + s._using = client + return s + + def extra(self, **kwargs: Any) -> Self: + """ + Add extra keys to the request body. Mostly here for backwards + compatibility. + """ + s = self._clone() + if "from_" in kwargs: + kwargs["from"] = kwargs.pop("from_") + s._extra.update(kwargs) + return s + + def _clone(self) -> Self: + s = self.__class__( + using=self._using, index=self._index, doc_type=self._doc_type + ) + s._doc_type_map = self._doc_type_map.copy() + s._extra = self._extra.copy() + s._params = self._params.copy() + return s + + if TYPE_CHECKING: + + def to_dict(self) -> Dict[str, Any]: ... + + +class SearchBase(Request[_R]): + query = ProxyDescriptor[Self]("query") + post_filter = ProxyDescriptor[Self]("post_filter") + _response: Response[_R] + + def __init__(self, **kwargs: Any): + """ + Search request to elasticsearch. + + :arg using: `Elasticsearch` instance to use + :arg index: limit the search to index + :arg doc_type: only query this type. + + All the parameters supplied (or omitted) at creation type can be later + overridden by methods (`using`, `index` and `doc_type` respectively). + """ + super().__init__(**kwargs) + + self.aggs = AggsProxy[_R](self) + self._sort: List[Union[str, Dict[str, Dict[str, str]]]] = [] + self._knn: List[Dict[str, Any]] = [] + self._rank: Dict[str, Any] = {} + self._collapse: Dict[str, Any] = {} + self._source: Optional[Union[bool, List[str], Dict[str, List[str]]]] = None + self._highlight: Dict[str, Any] = {} + self._highlight_opts: Dict[str, Any] = {} + self._suggest: Dict[str, Any] = {} + self._script_fields: Dict[str, Any] = {} + self._response_class = Response[_R] + + self._query_proxy = QueryProxy(self, "query") + self._post_filter_proxy = QueryProxy(self, "post_filter") + + def filter(self, *args: Any, **kwargs: Any) -> Self: + return self.query(Bool(filter=[Q(*args, **kwargs)])) + + def exclude(self, *args: Any, **kwargs: Any) -> Self: + return self.query(Bool(filter=[~Q(*args, **kwargs)])) + + def __getitem__(self, n: Union[int, slice]) -> Self: + """ + Support slicing the `Search` instance for pagination. + + Slicing equates to the from/size parameters. E.g.:: + + s = Search().query(...)[0:25] + + is equivalent to:: + + s = Search().query(...).extra(from_=0, size=25) + + """ + s = self._clone() + + if isinstance(n, slice): + # If negative slicing, abort. + if n.start and n.start < 0 or n.stop and n.stop < 0: + raise ValueError("Search does not support negative slicing.") + slice_start = n.start + slice_stop = n.stop + else: # This is an index lookup, equivalent to slicing by [n:n+1]. + # If negative index, abort. + if n < 0: + raise ValueError("Search does not support negative indexing.") + slice_start = n + slice_stop = n + 1 + + old_from = s._extra.get("from") + old_to = None + if "size" in s._extra: + old_to = (old_from or 0) + s._extra["size"] + + new_from = old_from + if slice_start is not None: + new_from = (old_from or 0) + slice_start + new_to = old_to + if slice_stop is not None: + new_to = (old_from or 0) + slice_stop + if old_to is not None and old_to < new_to: + new_to = old_to + + if new_from is not None: + s._extra["from"] = new_from + if new_to is not None: + s._extra["size"] = max(0, new_to - (new_from or 0)) + return s + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> Self: + """ + Construct a new `Search` instance from a raw dict containing the search + body. Useful when migrating from raw dictionaries. + + Example:: + + s = Search.from_dict({ + "query": { + "bool": { + "must": [...] + } + }, + "aggs": {...} + }) + s = s.filter('term', published=True) + """ + s = cls() + s.update_from_dict(d) + return s + + def _clone(self) -> Self: + """ + Return a clone of the current search request. Performs a shallow copy + of all the underlying objects. Used internally by most state modifying + APIs. + """ + s = super()._clone() + + s._response_class = self._response_class + s._knn = [knn.copy() for knn in self._knn] + s._rank = self._rank.copy() + s._collapse = self._collapse.copy() + s._sort = self._sort[:] + s._source = copy.copy(self._source) if self._source is not None else None + s._highlight = self._highlight.copy() + s._highlight_opts = self._highlight_opts.copy() + s._suggest = self._suggest.copy() + s._script_fields = self._script_fields.copy() + for x in ("query", "post_filter"): + getattr(s, x)._proxied = getattr(self, x)._proxied + + # copy top-level bucket definitions + if self.aggs._params.get("aggs"): + s.aggs._params = {"aggs": self.aggs._params["aggs"].copy()} + return s + + def response_class(self, cls: Type[Response[_R]]) -> Self: + """ + Override the default wrapper used for the response. + """ + s = self._clone() + s._response_class = cls + return s + + def update_from_dict(self, d: Dict[str, Any]) -> Self: + """ + Apply options from a serialized body to the current instance. Modifies + the object in-place. Used mostly by ``from_dict``. + """ + d = d.copy() + if "query" in d: + self.query._proxied = Q(d.pop("query")) + if "post_filter" in d: + self.post_filter._proxied = Q(d.pop("post_filter")) + + aggs = d.pop("aggs", d.pop("aggregations", {})) + if aggs: + self.aggs._params = { + "aggs": {name: A(value) for (name, value) in aggs.items()} + } + if "knn" in d: + self._knn = d.pop("knn") + if isinstance(self._knn, dict): + self._knn = [self._knn] + if "rank" in d: + self._rank = d.pop("rank") + if "collapse" in d: + self._collapse = d.pop("collapse") + if "sort" in d: + self._sort = d.pop("sort") + if "_source" in d: + self._source = d.pop("_source") + if "highlight" in d: + high = d.pop("highlight").copy() + self._highlight = high.pop("fields") + self._highlight_opts = high + if "suggest" in d: + self._suggest = d.pop("suggest") + if "text" in self._suggest: + text = self._suggest.pop("text") + for s in self._suggest.values(): + s.setdefault("text", text) + if "script_fields" in d: + self._script_fields = d.pop("script_fields") + self._extra.update(d) + return self + + def script_fields(self, **kwargs: Any) -> Self: + """ + Define script fields to be calculated on hits. See + https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-script-fields.html + for more details. + + Example:: + + s = Search() + s = s.script_fields(times_two="doc['field'].value * 2") + s = s.script_fields( + times_three={ + 'script': { + 'lang': 'painless', + 'source': "doc['field'].value * params.n", + 'params': {'n': 3} + } + } + ) + + """ + s = self._clone() + for name in kwargs: + if isinstance(kwargs[name], str): + kwargs[name] = {"script": kwargs[name]} + s._script_fields.update(kwargs) + return s + + def knn( + self, + field: Union[str, "InstrumentedField"], + k: int, + num_candidates: int, + query_vector: Optional[List[float]] = None, + query_vector_builder: Optional[Dict[str, Any]] = None, + boost: Optional[float] = None, + filter: Optional[Query] = None, + similarity: Optional[float] = None, + inner_hits: Optional[Dict[str, Any]] = None, + ) -> Self: + """ + Add a k-nearest neighbor (kNN) search. + + :arg field: the vector field to search against as a string or document class attribute + :arg k: number of nearest neighbors to return as top hits + :arg num_candidates: number of nearest neighbor candidates to consider per shard + :arg query_vector: the vector to search for + :arg query_vector_builder: A dictionary indicating how to build a query vector + :arg boost: A floating-point boost factor for kNN scores + :arg filter: query to filter the documents that can match + :arg similarity: the minimum similarity required for a document to be considered a match, as a float value + :arg inner_hits: retrieve hits from nested field + + Example:: + + s = Search() + s = s.knn(field='embedding', k=5, num_candidates=10, query_vector=vector, + filter=Q('term', category='blog'))) + """ + s = self._clone() + s._knn.append( + { + "field": str(field), # str() is for InstrumentedField instances + "k": k, + "num_candidates": num_candidates, + } + ) + if query_vector is None and query_vector_builder is None: + raise ValueError("one of query_vector and query_vector_builder is required") + if query_vector is not None and query_vector_builder is not None: + raise ValueError( + "only one of query_vector and query_vector_builder must be given" + ) + if query_vector is not None: + s._knn[-1]["query_vector"] = cast(Any, query_vector) + if query_vector_builder is not None: + s._knn[-1]["query_vector_builder"] = query_vector_builder + if boost is not None: + s._knn[-1]["boost"] = boost + if filter is not None: + if isinstance(filter, Query): + s._knn[-1]["filter"] = filter.to_dict() + else: + s._knn[-1]["filter"] = filter + if similarity is not None: + s._knn[-1]["similarity"] = similarity + if inner_hits is not None: + s._knn[-1]["inner_hits"] = inner_hits + return s + + def rank(self, rrf: Optional[Union[bool, Dict[str, Any]]] = None) -> Self: + """ + Defines a method for combining and ranking results sets from a combination + of searches. Requires a minimum of 2 results sets. + + :arg rrf: Set to ``True`` or an options dictionary to set the rank method to reciprocal rank fusion (RRF). + + Example:: + + s = Search() + s = s.query('match', content='search text') + s = s.knn(field='embedding', k=5, num_candidates=10, query_vector=vector) + s = s.rank(rrf=True) + + Note: This option is in technical preview and may change in the future. The syntax will likely change before GA. + """ + s = self._clone() + s._rank = {} + if rrf is not None and rrf is not False: + s._rank["rrf"] = {} if rrf is True else rrf + return s + + def source( + self, + fields: Optional[ + Union[ + bool, + str, + "InstrumentedField", + List[Union[str, "InstrumentedField"]], + Dict[str, List[Union[str, "InstrumentedField"]]], + ] + ] = None, + **kwargs: Any, + ) -> Self: + """ + Selectively control how the _source field is returned. + + :arg fields: field name, wildcard string, list of field names or wildcards, + or dictionary of includes and excludes + :arg kwargs: ``includes`` or ``excludes`` arguments, when ``fields`` is ``None``. + + When no arguments are given, the entire document will be returned for + each hit. If ``fields`` is a string or list of strings, the field names or field + wildcards given will be included. If ``fields`` is a dictionary with keys of + 'includes' and/or 'excludes' the fields will be either included or excluded + appropriately. + + Calling this multiple times with the same named parameter will override the + previous values with the new ones. + + Example:: + + s = Search() + s = s.source(includes=['obj1.*'], excludes=["*.description"]) + + s = Search() + s = s.source(includes=['obj1.*']).source(excludes=["*.description"]) + + """ + s = self._clone() + + if fields and kwargs: + raise ValueError("You cannot specify fields and kwargs at the same time.") + + @overload + def ensure_strings(fields: str) -> str: ... + + @overload + def ensure_strings(fields: "InstrumentedField") -> str: ... + + @overload + def ensure_strings( + fields: List[Union[str, "InstrumentedField"]] + ) -> List[str]: ... + + @overload + def ensure_strings( + fields: Dict[str, List[Union[str, "InstrumentedField"]]] + ) -> Dict[str, List[str]]: ... + + def ensure_strings( + fields: Union[ + str, + "InstrumentedField", + List[Union[str, "InstrumentedField"]], + Dict[str, List[Union[str, "InstrumentedField"]]], + ] + ) -> Union[str, List[str], Dict[str, List[str]]]: + if isinstance(fields, dict): + return {k: ensure_strings(v) for k, v in fields.items()} + elif not isinstance(fields, (str, InstrumentedField)): + # we assume that if `fields` is not a any of [dict, str, + # InstrumentedField] then it is an iterable of strings or + # InstrumentedFields, so we convert them to a plain list of + # strings + return [str(f) for f in fields] + else: + return str(fields) + + if fields is not None: + s._source = fields if isinstance(fields, bool) else ensure_strings(fields) # type: ignore[assignment] + return s + + if kwargs and not isinstance(s._source, dict): + s._source = {} + + if isinstance(s._source, dict): + for key, value in kwargs.items(): + if value is None: + try: + del s._source[key] + except KeyError: + pass + else: + s._source[key] = ensure_strings(value) + + return s + + def sort( + self, *keys: Union[str, "InstrumentedField", Dict[str, Dict[str, str]]] + ) -> Self: + """ + Add sorting information to the search request. If called without + arguments it will remove all sort requirements. Otherwise it will + replace them. Acceptable arguments are:: + + 'some.field' + '-some.other.field' + {'different.field': {'any': 'dict'}} + + so for example:: + + s = Search().sort( + 'category', + '-title', + {"price" : {"order" : "asc", "mode" : "avg"}} + ) + + will sort by ``category``, ``title`` (in descending order) and + ``price`` in ascending order using the ``avg`` mode. + + The API returns a copy of the Search object and can thus be chained. + """ + s = self._clone() + s._sort = [] + for k in keys: + if not isinstance(k, dict): + sort_field = str(k) + if sort_field.startswith("-"): + if sort_field[1:] == "_score": + raise IllegalOperation("Sorting by `-_score` is not allowed.") + s._sort.append({sort_field[1:]: {"order": "desc"}}) + else: + s._sort.append(sort_field) + else: + s._sort.append(k) + return s + + def collapse( + self, + field: Optional[Union[str, "InstrumentedField"]] = None, + inner_hits: Optional[Dict[str, Any]] = None, + max_concurrent_group_searches: Optional[int] = None, + ) -> Self: + """ + Add collapsing information to the search request. + If called without providing ``field``, it will remove all collapse + requirements, otherwise it will replace them with the provided + arguments. + The API returns a copy of the Search object and can thus be chained. + """ + s = self._clone() + s._collapse = {} + + if field is None: + return s + + s._collapse["field"] = str(field) + if inner_hits: + s._collapse["inner_hits"] = inner_hits + if max_concurrent_group_searches: + s._collapse["max_concurrent_group_searches"] = max_concurrent_group_searches + return s + + def highlight_options(self, **kwargs: Any) -> Self: + """ + Update the global highlighting options used for this request. For + example:: + + s = Search() + s = s.highlight_options(order='score') + """ + s = self._clone() + s._highlight_opts.update(kwargs) + return s + + def highlight( + self, *fields: Union[str, "InstrumentedField"], **kwargs: Any + ) -> Self: + """ + Request highlighting of some fields. All keyword arguments passed in will be + used as parameters for all the fields in the ``fields`` parameter. Example:: + + Search().highlight('title', 'body', fragment_size=50) + + will produce the equivalent of:: + + { + "highlight": { + "fields": { + "body": {"fragment_size": 50}, + "title": {"fragment_size": 50} + } + } + } + + If you want to have different options for different fields + you can call ``highlight`` twice:: + + Search().highlight('title', fragment_size=50).highlight('body', fragment_size=100) + + which will produce:: + + { + "highlight": { + "fields": { + "body": {"fragment_size": 100}, + "title": {"fragment_size": 50} + } + } + } + + """ + s = self._clone() + for f in fields: + s._highlight[str(f)] = kwargs + return s + + def suggest( + self, + name: str, + text: Optional[str] = None, + regex: Optional[str] = None, + **kwargs: Any, + ) -> Self: + """ + Add a suggestions request to the search. + + :arg name: name of the suggestion + :arg text: text to suggest on + + All keyword arguments will be added to the suggestions body. For example:: + + s = Search() + s = s.suggest('suggestion-1', 'Elasticsearch', term={'field': 'body'}) + + # regex query for Completion Suggester + s = Search() + s = s.suggest('suggestion-1', regex='py[thon|py]', completion={'field': 'body'}) + """ + if text is None and regex is None: + raise ValueError('You have to pass "text" or "regex" argument.') + if text and regex: + raise ValueError('You can only pass either "text" or "regex" argument.') + if regex and "completion" not in kwargs: + raise ValueError( + '"regex" argument must be passed with "completion" keyword argument.' + ) + + s = self._clone() + if regex: + s._suggest[name] = {"regex": regex} + elif text: + if "completion" in kwargs: + s._suggest[name] = {"prefix": text} + else: + s._suggest[name] = {"text": text} + s._suggest[name].update(kwargs) + return s + + def search_after(self) -> Self: + """ + Return a ``Search`` instance that retrieves the next page of results. + + This method provides an easy way to paginate a long list of results using + the ``search_after`` option. For example:: + + page_size = 20 + s = Search()[:page_size].sort("date") + + while True: + # get a page of results + r = await s.execute() + + # do something with this page of results + + # exit the loop if we reached the end + if len(r.hits) < page_size: + break + + # get a search object with the next page of results + s = s.search_after() + + Note that the ``search_after`` option requires the search to have an + explicit ``sort`` order. + """ + if not hasattr(self, "_response"): + raise ValueError("A search must be executed before using search_after") + return cast(Self, self._response.search_after()) + + def to_dict(self, count: bool = False, **kwargs: Any) -> Dict[str, Any]: + """ + Serialize the search into the dictionary that will be sent over as the + request's body. + + :arg count: a flag to specify if we are interested in a body for count - + no aggregations, no pagination bounds etc. + + All additional keyword arguments will be included into the dictionary. + """ + d = {} + + if self.query: + d["query"] = recursive_to_dict(self.query) + + if self._knn: + if len(self._knn) == 1: + d["knn"] = self._knn[0] + else: + d["knn"] = self._knn + + if self._rank: + d["rank"] = self._rank + + # count request doesn't care for sorting and other things + if not count: + if self.post_filter: + d["post_filter"] = recursive_to_dict(self.post_filter.to_dict()) + + if self.aggs.aggs: + d.update(recursive_to_dict(self.aggs.to_dict())) + + if self._sort: + d["sort"] = self._sort + + if self._collapse: + d["collapse"] = self._collapse + + d.update(recursive_to_dict(self._extra)) + + if self._source not in (None, {}): + d["_source"] = self._source + + if self._highlight: + d["highlight"] = {"fields": self._highlight} + d["highlight"].update(self._highlight_opts) + + if self._suggest: + d["suggest"] = self._suggest + + if self._script_fields: + d["script_fields"] = self._script_fields + + d.update(recursive_to_dict(kwargs)) + return d + + +class MultiSearchBase(Request[_R]): + """ + Combine multiple :class:`~elasticsearch.dsl.Search` objects into a single + request. + """ + + def __init__(self, **kwargs: Any): + super().__init__(**kwargs) + self._searches: List[SearchBase[_R]] = [] + + def __getitem__(self, key: Union[int, slice]) -> Any: + return self._searches[key] + + def __iter__(self) -> Iterator[SearchBase[_R]]: + return iter(self._searches) + + def _clone(self) -> Self: + ms = super()._clone() + ms._searches = self._searches[:] + return ms + + def add(self, search: SearchBase[_R]) -> Self: + """ + Adds a new :class:`~elasticsearch.dsl.Search` object to the request:: + + ms = MultiSearch(index='my-index') + ms = ms.add(Search(doc_type=Category).filter('term', category='python')) + ms = ms.add(Search(doc_type=Blog)) + """ + ms = self._clone() + ms._searches.append(search) + return ms + + def to_dict(self) -> List[Dict[str, Any]]: # type: ignore[override] + out: List[Dict[str, Any]] = [] + for s in self._searches: + meta: Dict[str, Any] = {} + if s._index: + meta["index"] = cast(Any, s._index) + meta.update(s._params) + + out.append(meta) + out.append(s.to_dict()) + + return out diff --git a/elasticsearch/dsl/serializer.py b/elasticsearch/dsl/serializer.py new file mode 100644 index 000000000..3080f1dad --- /dev/null +++ b/elasticsearch/dsl/serializer.py @@ -0,0 +1,34 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import Any + +from elasticsearch.serializer import JSONSerializer + +from .utils import AttrList + + +class AttrJSONSerializer(JSONSerializer): + def default(self, data: Any) -> Any: + if isinstance(data, AttrList): + return data._l_ + if hasattr(data, "to_dict"): + return data.to_dict() + return super().default(data) + + +serializer = AttrJSONSerializer() diff --git a/elasticsearch/dsl/types.py b/elasticsearch/dsl/types.py new file mode 100644 index 000000000..ce639c4ed --- /dev/null +++ b/elasticsearch/dsl/types.py @@ -0,0 +1,6273 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import Any, Dict, Literal, Mapping, Sequence, Union + +from elastic_transport.client_utils import DEFAULT, DefaultType + +from . import Query +from .document_base import InstrumentedField +from .utils import AttrDict + +PipeSeparatedFlags = str + + +class AggregationRange(AttrDict[Any]): + """ + :arg from: Start of the range (inclusive). + :arg key: Custom key to return the range with. + :arg to: End of the range (exclusive). + """ + + from_: Union[float, None, DefaultType] + key: Union[str, DefaultType] + to: Union[float, None, DefaultType] + + def __init__( + self, + *, + from_: Union[float, None, DefaultType] = DEFAULT, + key: Union[str, DefaultType] = DEFAULT, + to: Union[float, None, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if from_ is not DEFAULT: + kwargs["from_"] = from_ + if key is not DEFAULT: + kwargs["key"] = key + if to is not DEFAULT: + kwargs["to"] = to + super().__init__(kwargs) + + +class BucketCorrelationFunction(AttrDict[Any]): + """ + :arg count_correlation: (required) The configuration to calculate a + count correlation. This function is designed for determining the + correlation of a term value and a given metric. + """ + + count_correlation: Union[ + "BucketCorrelationFunctionCountCorrelation", Dict[str, Any], DefaultType + ] + + def __init__( + self, + *, + count_correlation: Union[ + "BucketCorrelationFunctionCountCorrelation", Dict[str, Any], DefaultType + ] = DEFAULT, + **kwargs: Any, + ): + if count_correlation is not DEFAULT: + kwargs["count_correlation"] = count_correlation + super().__init__(kwargs) + + +class BucketCorrelationFunctionCountCorrelation(AttrDict[Any]): + """ + :arg indicator: (required) The indicator with which to correlate the + configured `bucket_path` values. + """ + + indicator: Union[ + "BucketCorrelationFunctionCountCorrelationIndicator", + Dict[str, Any], + DefaultType, + ] + + def __init__( + self, + *, + indicator: Union[ + "BucketCorrelationFunctionCountCorrelationIndicator", + Dict[str, Any], + DefaultType, + ] = DEFAULT, + **kwargs: Any, + ): + if indicator is not DEFAULT: + kwargs["indicator"] = indicator + super().__init__(kwargs) + + +class BucketCorrelationFunctionCountCorrelationIndicator(AttrDict[Any]): + """ + :arg doc_count: (required) The total number of documents that + initially created the expectations. It’s required to be greater + than or equal to the sum of all values in the buckets_path as this + is the originating superset of data to which the term values are + correlated. + :arg expectations: (required) An array of numbers with which to + correlate the configured `bucket_path` values. The length of this + value must always equal the number of buckets returned by the + `bucket_path`. + :arg fractions: An array of fractions to use when averaging and + calculating variance. This should be used if the pre-calculated + data and the buckets_path have known gaps. The length of + fractions, if provided, must equal expectations. + """ + + doc_count: Union[int, DefaultType] + expectations: Union[Sequence[float], DefaultType] + fractions: Union[Sequence[float], DefaultType] + + def __init__( + self, + *, + doc_count: Union[int, DefaultType] = DEFAULT, + expectations: Union[Sequence[float], DefaultType] = DEFAULT, + fractions: Union[Sequence[float], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if doc_count is not DEFAULT: + kwargs["doc_count"] = doc_count + if expectations is not DEFAULT: + kwargs["expectations"] = expectations + if fractions is not DEFAULT: + kwargs["fractions"] = fractions + super().__init__(kwargs) + + +class ChiSquareHeuristic(AttrDict[Any]): + """ + :arg background_is_superset: (required) Set to `false` if you defined + a custom background filter that represents a different set of + documents that you want to compare to. + :arg include_negatives: (required) Set to `false` to filter out the + terms that appear less often in the subset than in documents + outside the subset. + """ + + background_is_superset: Union[bool, DefaultType] + include_negatives: Union[bool, DefaultType] + + def __init__( + self, + *, + background_is_superset: Union[bool, DefaultType] = DEFAULT, + include_negatives: Union[bool, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if background_is_superset is not DEFAULT: + kwargs["background_is_superset"] = background_is_superset + if include_negatives is not DEFAULT: + kwargs["include_negatives"] = include_negatives + super().__init__(kwargs) + + +class ClassificationInferenceOptions(AttrDict[Any]): + """ + :arg num_top_classes: Specifies the number of top class predictions to + return. Defaults to 0. + :arg num_top_feature_importance_values: Specifies the maximum number + of feature importance values per document. + :arg prediction_field_type: Specifies the type of the predicted field + to write. Acceptable values are: string, number, boolean. When + boolean is provided 1.0 is transformed to true and 0.0 to false. + :arg results_field: The field that is added to incoming documents to + contain the inference prediction. Defaults to predicted_value. + :arg top_classes_results_field: Specifies the field to which the top + classes are written. Defaults to top_classes. + """ + + num_top_classes: Union[int, DefaultType] + num_top_feature_importance_values: Union[int, DefaultType] + prediction_field_type: Union[str, DefaultType] + results_field: Union[str, DefaultType] + top_classes_results_field: Union[str, DefaultType] + + def __init__( + self, + *, + num_top_classes: Union[int, DefaultType] = DEFAULT, + num_top_feature_importance_values: Union[int, DefaultType] = DEFAULT, + prediction_field_type: Union[str, DefaultType] = DEFAULT, + results_field: Union[str, DefaultType] = DEFAULT, + top_classes_results_field: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if num_top_classes is not DEFAULT: + kwargs["num_top_classes"] = num_top_classes + if num_top_feature_importance_values is not DEFAULT: + kwargs["num_top_feature_importance_values"] = ( + num_top_feature_importance_values + ) + if prediction_field_type is not DEFAULT: + kwargs["prediction_field_type"] = prediction_field_type + if results_field is not DEFAULT: + kwargs["results_field"] = results_field + if top_classes_results_field is not DEFAULT: + kwargs["top_classes_results_field"] = top_classes_results_field + super().__init__(kwargs) + + +class CommonTermsQuery(AttrDict[Any]): + """ + :arg query: (required) + :arg analyzer: + :arg cutoff_frequency: + :arg high_freq_operator: + :arg low_freq_operator: + :arg minimum_should_match: + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + query: Union[str, DefaultType] + analyzer: Union[str, DefaultType] + cutoff_frequency: Union[float, DefaultType] + high_freq_operator: Union[Literal["and", "or"], DefaultType] + low_freq_operator: Union[Literal["and", "or"], DefaultType] + minimum_should_match: Union[int, str, DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + query: Union[str, DefaultType] = DEFAULT, + analyzer: Union[str, DefaultType] = DEFAULT, + cutoff_frequency: Union[float, DefaultType] = DEFAULT, + high_freq_operator: Union[Literal["and", "or"], DefaultType] = DEFAULT, + low_freq_operator: Union[Literal["and", "or"], DefaultType] = DEFAULT, + minimum_should_match: Union[int, str, DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if query is not DEFAULT: + kwargs["query"] = query + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if cutoff_frequency is not DEFAULT: + kwargs["cutoff_frequency"] = cutoff_frequency + if high_freq_operator is not DEFAULT: + kwargs["high_freq_operator"] = high_freq_operator + if low_freq_operator is not DEFAULT: + kwargs["low_freq_operator"] = low_freq_operator + if minimum_should_match is not DEFAULT: + kwargs["minimum_should_match"] = minimum_should_match + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + +class CoordsGeoBounds(AttrDict[Any]): + """ + :arg top: (required) + :arg bottom: (required) + :arg left: (required) + :arg right: (required) + """ + + top: Union[float, DefaultType] + bottom: Union[float, DefaultType] + left: Union[float, DefaultType] + right: Union[float, DefaultType] + + def __init__( + self, + *, + top: Union[float, DefaultType] = DEFAULT, + bottom: Union[float, DefaultType] = DEFAULT, + left: Union[float, DefaultType] = DEFAULT, + right: Union[float, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if top is not DEFAULT: + kwargs["top"] = top + if bottom is not DEFAULT: + kwargs["bottom"] = bottom + if left is not DEFAULT: + kwargs["left"] = left + if right is not DEFAULT: + kwargs["right"] = right + super().__init__(kwargs) + + +class CustomCategorizeTextAnalyzer(AttrDict[Any]): + """ + :arg char_filter: + :arg tokenizer: + :arg filter: + """ + + char_filter: Union[Sequence[str], DefaultType] + tokenizer: Union[str, DefaultType] + filter: Union[Sequence[str], DefaultType] + + def __init__( + self, + *, + char_filter: Union[Sequence[str], DefaultType] = DEFAULT, + tokenizer: Union[str, DefaultType] = DEFAULT, + filter: Union[Sequence[str], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if char_filter is not DEFAULT: + kwargs["char_filter"] = char_filter + if tokenizer is not DEFAULT: + kwargs["tokenizer"] = tokenizer + if filter is not DEFAULT: + kwargs["filter"] = filter + super().__init__(kwargs) + + +class DateRangeExpression(AttrDict[Any]): + """ + :arg from: Start of the range (inclusive). + :arg key: Custom key to return the range with. + :arg to: End of the range (exclusive). + """ + + from_: Union[str, float, DefaultType] + key: Union[str, DefaultType] + to: Union[str, float, DefaultType] + + def __init__( + self, + *, + from_: Union[str, float, DefaultType] = DEFAULT, + key: Union[str, DefaultType] = DEFAULT, + to: Union[str, float, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if from_ is not DEFAULT: + kwargs["from_"] = from_ + if key is not DEFAULT: + kwargs["key"] = key + if to is not DEFAULT: + kwargs["to"] = to + super().__init__(kwargs) + + +class EmptyObject(AttrDict[Any]): + """ + For empty Class assignments + """ + + def __init__(self, **kwargs: Any): + super().__init__(kwargs) + + +class EwmaModelSettings(AttrDict[Any]): + """ + :arg alpha: + """ + + alpha: Union[float, DefaultType] + + def __init__(self, *, alpha: Union[float, DefaultType] = DEFAULT, **kwargs: Any): + if alpha is not DEFAULT: + kwargs["alpha"] = alpha + super().__init__(kwargs) + + +class ExtendedBounds(AttrDict[Any]): + """ + :arg max: Maximum value for the bound. + :arg min: Minimum value for the bound. + """ + + max: Any + min: Any + + def __init__(self, *, max: Any = DEFAULT, min: Any = DEFAULT, **kwargs: Any): + if max is not DEFAULT: + kwargs["max"] = max + if min is not DEFAULT: + kwargs["min"] = min + super().__init__(kwargs) + + +class FieldAndFormat(AttrDict[Any]): + """ + A reference to a field with formatting instructions on how to return + the value + + :arg field: (required) Wildcard pattern. The request returns values + for field names matching this pattern. + :arg format: Format in which the values are returned. + :arg include_unmapped: + """ + + field: Union[str, InstrumentedField, DefaultType] + format: Union[str, DefaultType] + include_unmapped: Union[bool, DefaultType] + + def __init__( + self, + *, + field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + format: Union[str, DefaultType] = DEFAULT, + include_unmapped: Union[bool, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if field is not DEFAULT: + kwargs["field"] = str(field) + if format is not DEFAULT: + kwargs["format"] = format + if include_unmapped is not DEFAULT: + kwargs["include_unmapped"] = include_unmapped + super().__init__(kwargs) + + +class FieldCollapse(AttrDict[Any]): + """ + :arg field: (required) The field to collapse the result set on + :arg inner_hits: The number of inner hits and their sort order + :arg max_concurrent_group_searches: The number of concurrent requests + allowed to retrieve the inner_hits per group + :arg collapse: + """ + + field: Union[str, InstrumentedField, DefaultType] + inner_hits: Union[ + "InnerHits", Sequence["InnerHits"], Sequence[Dict[str, Any]], DefaultType + ] + max_concurrent_group_searches: Union[int, DefaultType] + collapse: Union["FieldCollapse", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + inner_hits: Union[ + "InnerHits", Sequence["InnerHits"], Sequence[Dict[str, Any]], DefaultType + ] = DEFAULT, + max_concurrent_group_searches: Union[int, DefaultType] = DEFAULT, + collapse: Union["FieldCollapse", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if field is not DEFAULT: + kwargs["field"] = str(field) + if inner_hits is not DEFAULT: + kwargs["inner_hits"] = inner_hits + if max_concurrent_group_searches is not DEFAULT: + kwargs["max_concurrent_group_searches"] = max_concurrent_group_searches + if collapse is not DEFAULT: + kwargs["collapse"] = collapse + super().__init__(kwargs) + + +class FieldLookup(AttrDict[Any]): + """ + :arg id: (required) `id` of the document. + :arg index: Index from which to retrieve the document. + :arg path: Name of the field. + :arg routing: Custom routing value. + """ + + id: Union[str, DefaultType] + index: Union[str, DefaultType] + path: Union[str, InstrumentedField, DefaultType] + routing: Union[str, DefaultType] + + def __init__( + self, + *, + id: Union[str, DefaultType] = DEFAULT, + index: Union[str, DefaultType] = DEFAULT, + path: Union[str, InstrumentedField, DefaultType] = DEFAULT, + routing: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if id is not DEFAULT: + kwargs["id"] = id + if index is not DEFAULT: + kwargs["index"] = index + if path is not DEFAULT: + kwargs["path"] = str(path) + if routing is not DEFAULT: + kwargs["routing"] = routing + super().__init__(kwargs) + + +class FieldSort(AttrDict[Any]): + """ + :arg missing: + :arg mode: + :arg nested: + :arg order: + :arg unmapped_type: + :arg numeric_type: + :arg format: + """ + + missing: Union[str, int, float, bool, DefaultType] + mode: Union[Literal["min", "max", "sum", "avg", "median"], DefaultType] + nested: Union["NestedSortValue", Dict[str, Any], DefaultType] + order: Union[Literal["asc", "desc"], DefaultType] + unmapped_type: Union[ + Literal[ + "none", + "geo_point", + "geo_shape", + "ip", + "binary", + "keyword", + "text", + "search_as_you_type", + "date", + "date_nanos", + "boolean", + "completion", + "nested", + "object", + "version", + "murmur3", + "token_count", + "percolator", + "integer", + "long", + "short", + "byte", + "float", + "half_float", + "scaled_float", + "double", + "integer_range", + "float_range", + "long_range", + "double_range", + "date_range", + "ip_range", + "alias", + "join", + "rank_feature", + "rank_features", + "flattened", + "shape", + "histogram", + "constant_keyword", + "aggregate_metric_double", + "dense_vector", + "semantic_text", + "sparse_vector", + "match_only_text", + "icu_collation_keyword", + ], + DefaultType, + ] + numeric_type: Union[Literal["long", "double", "date", "date_nanos"], DefaultType] + format: Union[str, DefaultType] + + def __init__( + self, + *, + missing: Union[str, int, float, bool, DefaultType] = DEFAULT, + mode: Union[ + Literal["min", "max", "sum", "avg", "median"], DefaultType + ] = DEFAULT, + nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT, + order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT, + unmapped_type: Union[ + Literal[ + "none", + "geo_point", + "geo_shape", + "ip", + "binary", + "keyword", + "text", + "search_as_you_type", + "date", + "date_nanos", + "boolean", + "completion", + "nested", + "object", + "version", + "murmur3", + "token_count", + "percolator", + "integer", + "long", + "short", + "byte", + "float", + "half_float", + "scaled_float", + "double", + "integer_range", + "float_range", + "long_range", + "double_range", + "date_range", + "ip_range", + "alias", + "join", + "rank_feature", + "rank_features", + "flattened", + "shape", + "histogram", + "constant_keyword", + "aggregate_metric_double", + "dense_vector", + "semantic_text", + "sparse_vector", + "match_only_text", + "icu_collation_keyword", + ], + DefaultType, + ] = DEFAULT, + numeric_type: Union[ + Literal["long", "double", "date", "date_nanos"], DefaultType + ] = DEFAULT, + format: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if missing is not DEFAULT: + kwargs["missing"] = missing + if mode is not DEFAULT: + kwargs["mode"] = mode + if nested is not DEFAULT: + kwargs["nested"] = nested + if order is not DEFAULT: + kwargs["order"] = order + if unmapped_type is not DEFAULT: + kwargs["unmapped_type"] = unmapped_type + if numeric_type is not DEFAULT: + kwargs["numeric_type"] = numeric_type + if format is not DEFAULT: + kwargs["format"] = format + super().__init__(kwargs) + + +class FrequentItemSetsField(AttrDict[Any]): + """ + :arg field: (required) + :arg exclude: Values to exclude. Can be regular expression strings or + arrays of strings of exact terms. + :arg include: Values to include. Can be regular expression strings or + arrays of strings of exact terms. + """ + + field: Union[str, InstrumentedField, DefaultType] + exclude: Union[str, Sequence[str], DefaultType] + include: Union[str, Sequence[str], "TermsPartition", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + exclude: Union[str, Sequence[str], DefaultType] = DEFAULT, + include: Union[ + str, Sequence[str], "TermsPartition", Dict[str, Any], DefaultType + ] = DEFAULT, + **kwargs: Any, + ): + if field is not DEFAULT: + kwargs["field"] = str(field) + if exclude is not DEFAULT: + kwargs["exclude"] = exclude + if include is not DEFAULT: + kwargs["include"] = include + super().__init__(kwargs) + + +class FuzzyQuery(AttrDict[Any]): + """ + :arg value: (required) Term you wish to find in the provided field. + :arg max_expansions: Maximum number of variations created. Defaults to + `50` if omitted. + :arg prefix_length: Number of beginning characters left unchanged when + creating expansions. + :arg rewrite: Number of beginning characters left unchanged when + creating expansions. Defaults to `constant_score` if omitted. + :arg transpositions: Indicates whether edits include transpositions of + two adjacent characters (for example `ab` to `ba`). Defaults to + `True` if omitted. + :arg fuzziness: Maximum edit distance allowed for matching. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + value: Union[str, float, bool, DefaultType] + max_expansions: Union[int, DefaultType] + prefix_length: Union[int, DefaultType] + rewrite: Union[str, DefaultType] + transpositions: Union[bool, DefaultType] + fuzziness: Union[str, int, DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + value: Union[str, float, bool, DefaultType] = DEFAULT, + max_expansions: Union[int, DefaultType] = DEFAULT, + prefix_length: Union[int, DefaultType] = DEFAULT, + rewrite: Union[str, DefaultType] = DEFAULT, + transpositions: Union[bool, DefaultType] = DEFAULT, + fuzziness: Union[str, int, DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if value is not DEFAULT: + kwargs["value"] = value + if max_expansions is not DEFAULT: + kwargs["max_expansions"] = max_expansions + if prefix_length is not DEFAULT: + kwargs["prefix_length"] = prefix_length + if rewrite is not DEFAULT: + kwargs["rewrite"] = rewrite + if transpositions is not DEFAULT: + kwargs["transpositions"] = transpositions + if fuzziness is not DEFAULT: + kwargs["fuzziness"] = fuzziness + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + +class GeoDistanceSort(AttrDict[Any]): + """ + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + :arg mode: + :arg distance_type: + :arg ignore_unmapped: + :arg order: + :arg unit: + :arg nested: + """ + + _field: Union[str, "InstrumentedField", "DefaultType"] + _value: Union[ + Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str], + Sequence[Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str]], + Dict[str, Any], + "DefaultType", + ] + mode: Union[Literal["min", "max", "sum", "avg", "median"], DefaultType] + distance_type: Union[Literal["arc", "plane"], DefaultType] + ignore_unmapped: Union[bool, DefaultType] + order: Union[Literal["asc", "desc"], DefaultType] + unit: Union[ + Literal["in", "ft", "yd", "mi", "nmi", "km", "m", "cm", "mm"], DefaultType + ] + nested: Union["NestedSortValue", Dict[str, Any], DefaultType] + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union[ + Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str], + Sequence[ + Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str] + ], + Dict[str, Any], + "DefaultType", + ] = DEFAULT, + *, + mode: Union[ + Literal["min", "max", "sum", "avg", "median"], DefaultType + ] = DEFAULT, + distance_type: Union[Literal["arc", "plane"], DefaultType] = DEFAULT, + ignore_unmapped: Union[bool, DefaultType] = DEFAULT, + order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT, + unit: Union[ + Literal["in", "ft", "yd", "mi", "nmi", "km", "m", "cm", "mm"], DefaultType + ] = DEFAULT, + nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + if mode is not DEFAULT: + kwargs["mode"] = mode + if distance_type is not DEFAULT: + kwargs["distance_type"] = distance_type + if ignore_unmapped is not DEFAULT: + kwargs["ignore_unmapped"] = ignore_unmapped + if order is not DEFAULT: + kwargs["order"] = order + if unit is not DEFAULT: + kwargs["unit"] = unit + if nested is not DEFAULT: + kwargs["nested"] = nested + super().__init__(kwargs) + + +class GeoHashLocation(AttrDict[Any]): + """ + :arg geohash: (required) + """ + + geohash: Union[str, DefaultType] + + def __init__(self, *, geohash: Union[str, DefaultType] = DEFAULT, **kwargs: Any): + if geohash is not DEFAULT: + kwargs["geohash"] = geohash + super().__init__(kwargs) + + +class GeoLinePoint(AttrDict[Any]): + """ + :arg field: (required) The name of the geo_point field. + """ + + field: Union[str, InstrumentedField, DefaultType] + + def __init__( + self, + *, + field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if field is not DEFAULT: + kwargs["field"] = str(field) + super().__init__(kwargs) + + +class GeoLineSort(AttrDict[Any]): + """ + :arg field: (required) The name of the numeric field to use as the + sort key for ordering the points. + """ + + field: Union[str, InstrumentedField, DefaultType] + + def __init__( + self, + *, + field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if field is not DEFAULT: + kwargs["field"] = str(field) + super().__init__(kwargs) + + +class GeoPolygonPoints(AttrDict[Any]): + """ + :arg points: (required) + """ + + points: Union[ + Sequence[Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str]], + Dict[str, Any], + DefaultType, + ] + + def __init__( + self, + *, + points: Union[ + Sequence[ + Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str] + ], + Dict[str, Any], + DefaultType, + ] = DEFAULT, + **kwargs: Any, + ): + if points is not DEFAULT: + kwargs["points"] = points + super().__init__(kwargs) + + +class GeoShapeFieldQuery(AttrDict[Any]): + """ + :arg shape: + :arg indexed_shape: Query using an indexed shape retrieved from the + the specified document and path. + :arg relation: Spatial relation operator used to search a geo field. + Defaults to `intersects` if omitted. + """ + + shape: Any + indexed_shape: Union["FieldLookup", Dict[str, Any], DefaultType] + relation: Union[ + Literal["intersects", "disjoint", "within", "contains"], DefaultType + ] + + def __init__( + self, + *, + shape: Any = DEFAULT, + indexed_shape: Union["FieldLookup", Dict[str, Any], DefaultType] = DEFAULT, + relation: Union[ + Literal["intersects", "disjoint", "within", "contains"], DefaultType + ] = DEFAULT, + **kwargs: Any, + ): + if shape is not DEFAULT: + kwargs["shape"] = shape + if indexed_shape is not DEFAULT: + kwargs["indexed_shape"] = indexed_shape + if relation is not DEFAULT: + kwargs["relation"] = relation + super().__init__(kwargs) + + +class GoogleNormalizedDistanceHeuristic(AttrDict[Any]): + """ + :arg background_is_superset: Set to `false` if you defined a custom + background filter that represents a different set of documents + that you want to compare to. + """ + + background_is_superset: Union[bool, DefaultType] + + def __init__( + self, + *, + background_is_superset: Union[bool, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if background_is_superset is not DEFAULT: + kwargs["background_is_superset"] = background_is_superset + super().__init__(kwargs) + + +class HdrMethod(AttrDict[Any]): + """ + :arg number_of_significant_value_digits: Specifies the resolution of + values for the histogram in number of significant digits. + """ + + number_of_significant_value_digits: Union[int, DefaultType] + + def __init__( + self, + *, + number_of_significant_value_digits: Union[int, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if number_of_significant_value_digits is not DEFAULT: + kwargs["number_of_significant_value_digits"] = ( + number_of_significant_value_digits + ) + super().__init__(kwargs) + + +class Highlight(AttrDict[Any]): + """ + :arg fields: (required) + :arg encoder: + :arg type: + :arg boundary_chars: A string that contains each boundary character. + Defaults to `.,!? \t\n` if omitted. + :arg boundary_max_scan: How far to scan for boundary characters. + Defaults to `20` if omitted. + :arg boundary_scanner: Specifies how to break the highlighted + fragments: chars, sentence, or word. Only valid for the unified + and fvh highlighters. Defaults to `sentence` for the `unified` + highlighter. Defaults to `chars` for the `fvh` highlighter. + :arg boundary_scanner_locale: Controls which locale is used to search + for sentence and word boundaries. This parameter takes a form of a + language tag, for example: `"en-US"`, `"fr-FR"`, `"ja-JP"`. + Defaults to `Locale.ROOT` if omitted. + :arg force_source: + :arg fragmenter: Specifies how text should be broken up in highlight + snippets: `simple` or `span`. Only valid for the `plain` + highlighter. Defaults to `span` if omitted. + :arg fragment_size: The size of the highlighted fragment in + characters. Defaults to `100` if omitted. + :arg highlight_filter: + :arg highlight_query: Highlight matches for a query other than the + search query. This is especially useful if you use a rescore query + because those are not taken into account by highlighting by + default. + :arg max_fragment_length: + :arg max_analyzed_offset: If set to a non-negative value, highlighting + stops at this defined maximum limit. The rest of the text is not + processed, thus not highlighted and no error is returned The + `max_analyzed_offset` query setting does not override the + `index.highlight.max_analyzed_offset` setting, which prevails when + it’s set to lower value than the query setting. + :arg no_match_size: The amount of text you want to return from the + beginning of the field if there are no matching fragments to + highlight. + :arg number_of_fragments: The maximum number of fragments to return. + If the number of fragments is set to `0`, no fragments are + returned. Instead, the entire field contents are highlighted and + returned. This can be handy when you need to highlight short texts + such as a title or address, but fragmentation is not required. If + `number_of_fragments` is `0`, `fragment_size` is ignored. Defaults + to `5` if omitted. + :arg options: + :arg order: Sorts highlighted fragments by score when set to `score`. + By default, fragments will be output in the order they appear in + the field (order: `none`). Setting this option to `score` will + output the most relevant fragments first. Each highlighter applies + its own logic to compute relevancy scores. Defaults to `none` if + omitted. + :arg phrase_limit: Controls the number of matching phrases in a + document that are considered. Prevents the `fvh` highlighter from + analyzing too many phrases and consuming too much memory. When + using `matched_fields`, `phrase_limit` phrases per matched field + are considered. Raising the limit increases query time and + consumes more memory. Only supported by the `fvh` highlighter. + Defaults to `256` if omitted. + :arg post_tags: Use in conjunction with `pre_tags` to define the HTML + tags to use for the highlighted text. By default, highlighted text + is wrapped in `` and `` tags. + :arg pre_tags: Use in conjunction with `post_tags` to define the HTML + tags to use for the highlighted text. By default, highlighted text + is wrapped in `` and `` tags. + :arg require_field_match: By default, only fields that contains a + query match are highlighted. Set to `false` to highlight all + fields. Defaults to `True` if omitted. + :arg tags_schema: Set to `styled` to use the built-in tag schema. + """ + + fields: Union[ + Mapping[Union[str, InstrumentedField], "HighlightField"], + Dict[str, Any], + DefaultType, + ] + encoder: Union[Literal["default", "html"], DefaultType] + type: Union[Literal["plain", "fvh", "unified"], DefaultType] + boundary_chars: Union[str, DefaultType] + boundary_max_scan: Union[int, DefaultType] + boundary_scanner: Union[Literal["chars", "sentence", "word"], DefaultType] + boundary_scanner_locale: Union[str, DefaultType] + force_source: Union[bool, DefaultType] + fragmenter: Union[Literal["simple", "span"], DefaultType] + fragment_size: Union[int, DefaultType] + highlight_filter: Union[bool, DefaultType] + highlight_query: Union[Query, DefaultType] + max_fragment_length: Union[int, DefaultType] + max_analyzed_offset: Union[int, DefaultType] + no_match_size: Union[int, DefaultType] + number_of_fragments: Union[int, DefaultType] + options: Union[Mapping[str, Any], DefaultType] + order: Union[Literal["score"], DefaultType] + phrase_limit: Union[int, DefaultType] + post_tags: Union[Sequence[str], DefaultType] + pre_tags: Union[Sequence[str], DefaultType] + require_field_match: Union[bool, DefaultType] + tags_schema: Union[Literal["styled"], DefaultType] + + def __init__( + self, + *, + fields: Union[ + Mapping[Union[str, InstrumentedField], "HighlightField"], + Dict[str, Any], + DefaultType, + ] = DEFAULT, + encoder: Union[Literal["default", "html"], DefaultType] = DEFAULT, + type: Union[Literal["plain", "fvh", "unified"], DefaultType] = DEFAULT, + boundary_chars: Union[str, DefaultType] = DEFAULT, + boundary_max_scan: Union[int, DefaultType] = DEFAULT, + boundary_scanner: Union[ + Literal["chars", "sentence", "word"], DefaultType + ] = DEFAULT, + boundary_scanner_locale: Union[str, DefaultType] = DEFAULT, + force_source: Union[bool, DefaultType] = DEFAULT, + fragmenter: Union[Literal["simple", "span"], DefaultType] = DEFAULT, + fragment_size: Union[int, DefaultType] = DEFAULT, + highlight_filter: Union[bool, DefaultType] = DEFAULT, + highlight_query: Union[Query, DefaultType] = DEFAULT, + max_fragment_length: Union[int, DefaultType] = DEFAULT, + max_analyzed_offset: Union[int, DefaultType] = DEFAULT, + no_match_size: Union[int, DefaultType] = DEFAULT, + number_of_fragments: Union[int, DefaultType] = DEFAULT, + options: Union[Mapping[str, Any], DefaultType] = DEFAULT, + order: Union[Literal["score"], DefaultType] = DEFAULT, + phrase_limit: Union[int, DefaultType] = DEFAULT, + post_tags: Union[Sequence[str], DefaultType] = DEFAULT, + pre_tags: Union[Sequence[str], DefaultType] = DEFAULT, + require_field_match: Union[bool, DefaultType] = DEFAULT, + tags_schema: Union[Literal["styled"], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if fields is not DEFAULT: + kwargs["fields"] = str(fields) + if encoder is not DEFAULT: + kwargs["encoder"] = encoder + if type is not DEFAULT: + kwargs["type"] = type + if boundary_chars is not DEFAULT: + kwargs["boundary_chars"] = boundary_chars + if boundary_max_scan is not DEFAULT: + kwargs["boundary_max_scan"] = boundary_max_scan + if boundary_scanner is not DEFAULT: + kwargs["boundary_scanner"] = boundary_scanner + if boundary_scanner_locale is not DEFAULT: + kwargs["boundary_scanner_locale"] = boundary_scanner_locale + if force_source is not DEFAULT: + kwargs["force_source"] = force_source + if fragmenter is not DEFAULT: + kwargs["fragmenter"] = fragmenter + if fragment_size is not DEFAULT: + kwargs["fragment_size"] = fragment_size + if highlight_filter is not DEFAULT: + kwargs["highlight_filter"] = highlight_filter + if highlight_query is not DEFAULT: + kwargs["highlight_query"] = highlight_query + if max_fragment_length is not DEFAULT: + kwargs["max_fragment_length"] = max_fragment_length + if max_analyzed_offset is not DEFAULT: + kwargs["max_analyzed_offset"] = max_analyzed_offset + if no_match_size is not DEFAULT: + kwargs["no_match_size"] = no_match_size + if number_of_fragments is not DEFAULT: + kwargs["number_of_fragments"] = number_of_fragments + if options is not DEFAULT: + kwargs["options"] = options + if order is not DEFAULT: + kwargs["order"] = order + if phrase_limit is not DEFAULT: + kwargs["phrase_limit"] = phrase_limit + if post_tags is not DEFAULT: + kwargs["post_tags"] = post_tags + if pre_tags is not DEFAULT: + kwargs["pre_tags"] = pre_tags + if require_field_match is not DEFAULT: + kwargs["require_field_match"] = require_field_match + if tags_schema is not DEFAULT: + kwargs["tags_schema"] = tags_schema + super().__init__(kwargs) + + +class HighlightField(AttrDict[Any]): + """ + :arg fragment_offset: + :arg matched_fields: + :arg type: + :arg boundary_chars: A string that contains each boundary character. + Defaults to `.,!? \t\n` if omitted. + :arg boundary_max_scan: How far to scan for boundary characters. + Defaults to `20` if omitted. + :arg boundary_scanner: Specifies how to break the highlighted + fragments: chars, sentence, or word. Only valid for the unified + and fvh highlighters. Defaults to `sentence` for the `unified` + highlighter. Defaults to `chars` for the `fvh` highlighter. + :arg boundary_scanner_locale: Controls which locale is used to search + for sentence and word boundaries. This parameter takes a form of a + language tag, for example: `"en-US"`, `"fr-FR"`, `"ja-JP"`. + Defaults to `Locale.ROOT` if omitted. + :arg force_source: + :arg fragmenter: Specifies how text should be broken up in highlight + snippets: `simple` or `span`. Only valid for the `plain` + highlighter. Defaults to `span` if omitted. + :arg fragment_size: The size of the highlighted fragment in + characters. Defaults to `100` if omitted. + :arg highlight_filter: + :arg highlight_query: Highlight matches for a query other than the + search query. This is especially useful if you use a rescore query + because those are not taken into account by highlighting by + default. + :arg max_fragment_length: + :arg max_analyzed_offset: If set to a non-negative value, highlighting + stops at this defined maximum limit. The rest of the text is not + processed, thus not highlighted and no error is returned The + `max_analyzed_offset` query setting does not override the + `index.highlight.max_analyzed_offset` setting, which prevails when + it’s set to lower value than the query setting. + :arg no_match_size: The amount of text you want to return from the + beginning of the field if there are no matching fragments to + highlight. + :arg number_of_fragments: The maximum number of fragments to return. + If the number of fragments is set to `0`, no fragments are + returned. Instead, the entire field contents are highlighted and + returned. This can be handy when you need to highlight short texts + such as a title or address, but fragmentation is not required. If + `number_of_fragments` is `0`, `fragment_size` is ignored. Defaults + to `5` if omitted. + :arg options: + :arg order: Sorts highlighted fragments by score when set to `score`. + By default, fragments will be output in the order they appear in + the field (order: `none`). Setting this option to `score` will + output the most relevant fragments first. Each highlighter applies + its own logic to compute relevancy scores. Defaults to `none` if + omitted. + :arg phrase_limit: Controls the number of matching phrases in a + document that are considered. Prevents the `fvh` highlighter from + analyzing too many phrases and consuming too much memory. When + using `matched_fields`, `phrase_limit` phrases per matched field + are considered. Raising the limit increases query time and + consumes more memory. Only supported by the `fvh` highlighter. + Defaults to `256` if omitted. + :arg post_tags: Use in conjunction with `pre_tags` to define the HTML + tags to use for the highlighted text. By default, highlighted text + is wrapped in `` and `` tags. + :arg pre_tags: Use in conjunction with `post_tags` to define the HTML + tags to use for the highlighted text. By default, highlighted text + is wrapped in `` and `` tags. + :arg require_field_match: By default, only fields that contains a + query match are highlighted. Set to `false` to highlight all + fields. Defaults to `True` if omitted. + :arg tags_schema: Set to `styled` to use the built-in tag schema. + """ + + fragment_offset: Union[int, DefaultType] + matched_fields: Union[ + Union[str, InstrumentedField], + Sequence[Union[str, InstrumentedField]], + DefaultType, + ] + type: Union[Literal["plain", "fvh", "unified"], DefaultType] + boundary_chars: Union[str, DefaultType] + boundary_max_scan: Union[int, DefaultType] + boundary_scanner: Union[Literal["chars", "sentence", "word"], DefaultType] + boundary_scanner_locale: Union[str, DefaultType] + force_source: Union[bool, DefaultType] + fragmenter: Union[Literal["simple", "span"], DefaultType] + fragment_size: Union[int, DefaultType] + highlight_filter: Union[bool, DefaultType] + highlight_query: Union[Query, DefaultType] + max_fragment_length: Union[int, DefaultType] + max_analyzed_offset: Union[int, DefaultType] + no_match_size: Union[int, DefaultType] + number_of_fragments: Union[int, DefaultType] + options: Union[Mapping[str, Any], DefaultType] + order: Union[Literal["score"], DefaultType] + phrase_limit: Union[int, DefaultType] + post_tags: Union[Sequence[str], DefaultType] + pre_tags: Union[Sequence[str], DefaultType] + require_field_match: Union[bool, DefaultType] + tags_schema: Union[Literal["styled"], DefaultType] + + def __init__( + self, + *, + fragment_offset: Union[int, DefaultType] = DEFAULT, + matched_fields: Union[ + Union[str, InstrumentedField], + Sequence[Union[str, InstrumentedField]], + DefaultType, + ] = DEFAULT, + type: Union[Literal["plain", "fvh", "unified"], DefaultType] = DEFAULT, + boundary_chars: Union[str, DefaultType] = DEFAULT, + boundary_max_scan: Union[int, DefaultType] = DEFAULT, + boundary_scanner: Union[ + Literal["chars", "sentence", "word"], DefaultType + ] = DEFAULT, + boundary_scanner_locale: Union[str, DefaultType] = DEFAULT, + force_source: Union[bool, DefaultType] = DEFAULT, + fragmenter: Union[Literal["simple", "span"], DefaultType] = DEFAULT, + fragment_size: Union[int, DefaultType] = DEFAULT, + highlight_filter: Union[bool, DefaultType] = DEFAULT, + highlight_query: Union[Query, DefaultType] = DEFAULT, + max_fragment_length: Union[int, DefaultType] = DEFAULT, + max_analyzed_offset: Union[int, DefaultType] = DEFAULT, + no_match_size: Union[int, DefaultType] = DEFAULT, + number_of_fragments: Union[int, DefaultType] = DEFAULT, + options: Union[Mapping[str, Any], DefaultType] = DEFAULT, + order: Union[Literal["score"], DefaultType] = DEFAULT, + phrase_limit: Union[int, DefaultType] = DEFAULT, + post_tags: Union[Sequence[str], DefaultType] = DEFAULT, + pre_tags: Union[Sequence[str], DefaultType] = DEFAULT, + require_field_match: Union[bool, DefaultType] = DEFAULT, + tags_schema: Union[Literal["styled"], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if fragment_offset is not DEFAULT: + kwargs["fragment_offset"] = fragment_offset + if matched_fields is not DEFAULT: + kwargs["matched_fields"] = str(matched_fields) + if type is not DEFAULT: + kwargs["type"] = type + if boundary_chars is not DEFAULT: + kwargs["boundary_chars"] = boundary_chars + if boundary_max_scan is not DEFAULT: + kwargs["boundary_max_scan"] = boundary_max_scan + if boundary_scanner is not DEFAULT: + kwargs["boundary_scanner"] = boundary_scanner + if boundary_scanner_locale is not DEFAULT: + kwargs["boundary_scanner_locale"] = boundary_scanner_locale + if force_source is not DEFAULT: + kwargs["force_source"] = force_source + if fragmenter is not DEFAULT: + kwargs["fragmenter"] = fragmenter + if fragment_size is not DEFAULT: + kwargs["fragment_size"] = fragment_size + if highlight_filter is not DEFAULT: + kwargs["highlight_filter"] = highlight_filter + if highlight_query is not DEFAULT: + kwargs["highlight_query"] = highlight_query + if max_fragment_length is not DEFAULT: + kwargs["max_fragment_length"] = max_fragment_length + if max_analyzed_offset is not DEFAULT: + kwargs["max_analyzed_offset"] = max_analyzed_offset + if no_match_size is not DEFAULT: + kwargs["no_match_size"] = no_match_size + if number_of_fragments is not DEFAULT: + kwargs["number_of_fragments"] = number_of_fragments + if options is not DEFAULT: + kwargs["options"] = options + if order is not DEFAULT: + kwargs["order"] = order + if phrase_limit is not DEFAULT: + kwargs["phrase_limit"] = phrase_limit + if post_tags is not DEFAULT: + kwargs["post_tags"] = post_tags + if pre_tags is not DEFAULT: + kwargs["pre_tags"] = pre_tags + if require_field_match is not DEFAULT: + kwargs["require_field_match"] = require_field_match + if tags_schema is not DEFAULT: + kwargs["tags_schema"] = tags_schema + super().__init__(kwargs) + + +class HoltLinearModelSettings(AttrDict[Any]): + """ + :arg alpha: + :arg beta: + """ + + alpha: Union[float, DefaultType] + beta: Union[float, DefaultType] + + def __init__( + self, + *, + alpha: Union[float, DefaultType] = DEFAULT, + beta: Union[float, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if alpha is not DEFAULT: + kwargs["alpha"] = alpha + if beta is not DEFAULT: + kwargs["beta"] = beta + super().__init__(kwargs) + + +class HoltWintersModelSettings(AttrDict[Any]): + """ + :arg alpha: + :arg beta: + :arg gamma: + :arg pad: + :arg period: + :arg type: + """ + + alpha: Union[float, DefaultType] + beta: Union[float, DefaultType] + gamma: Union[float, DefaultType] + pad: Union[bool, DefaultType] + period: Union[int, DefaultType] + type: Union[Literal["add", "mult"], DefaultType] + + def __init__( + self, + *, + alpha: Union[float, DefaultType] = DEFAULT, + beta: Union[float, DefaultType] = DEFAULT, + gamma: Union[float, DefaultType] = DEFAULT, + pad: Union[bool, DefaultType] = DEFAULT, + period: Union[int, DefaultType] = DEFAULT, + type: Union[Literal["add", "mult"], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if alpha is not DEFAULT: + kwargs["alpha"] = alpha + if beta is not DEFAULT: + kwargs["beta"] = beta + if gamma is not DEFAULT: + kwargs["gamma"] = gamma + if pad is not DEFAULT: + kwargs["pad"] = pad + if period is not DEFAULT: + kwargs["period"] = period + if type is not DEFAULT: + kwargs["type"] = type + super().__init__(kwargs) + + +class InferenceConfigContainer(AttrDict[Any]): + """ + :arg regression: Regression configuration for inference. + :arg classification: Classification configuration for inference. + """ + + regression: Union["RegressionInferenceOptions", Dict[str, Any], DefaultType] + classification: Union["ClassificationInferenceOptions", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + regression: Union[ + "RegressionInferenceOptions", Dict[str, Any], DefaultType + ] = DEFAULT, + classification: Union[ + "ClassificationInferenceOptions", Dict[str, Any], DefaultType + ] = DEFAULT, + **kwargs: Any, + ): + if regression is not DEFAULT: + kwargs["regression"] = regression + if classification is not DEFAULT: + kwargs["classification"] = classification + super().__init__(kwargs) + + +class InnerHits(AttrDict[Any]): + """ + :arg name: The name for the particular inner hit definition in the + response. Useful when a search request contains multiple inner + hits. + :arg size: The maximum number of hits to return per `inner_hits`. + Defaults to `3` if omitted. + :arg from: Inner hit starting document offset. + :arg collapse: + :arg docvalue_fields: + :arg explain: + :arg highlight: + :arg ignore_unmapped: + :arg script_fields: + :arg seq_no_primary_term: + :arg fields: + :arg sort: How the inner hits should be sorted per `inner_hits`. By + default, inner hits are sorted by score. + :arg _source: + :arg stored_fields: + :arg track_scores: + :arg version: + """ + + name: Union[str, DefaultType] + size: Union[int, DefaultType] + from_: Union[int, DefaultType] + collapse: Union["FieldCollapse", Dict[str, Any], DefaultType] + docvalue_fields: Union[ + Sequence["FieldAndFormat"], Sequence[Dict[str, Any]], DefaultType + ] + explain: Union[bool, DefaultType] + highlight: Union["Highlight", Dict[str, Any], DefaultType] + ignore_unmapped: Union[bool, DefaultType] + script_fields: Union[ + Mapping[Union[str, InstrumentedField], "ScriptField"], + Dict[str, Any], + DefaultType, + ] + seq_no_primary_term: Union[bool, DefaultType] + fields: Union[ + Union[str, InstrumentedField], + Sequence[Union[str, InstrumentedField]], + DefaultType, + ] + sort: Union[ + Union[Union[str, InstrumentedField], "SortOptions"], + Sequence[Union[Union[str, InstrumentedField], "SortOptions"]], + Dict[str, Any], + DefaultType, + ] + _source: Union[bool, "SourceFilter", Dict[str, Any], DefaultType] + stored_fields: Union[ + Union[str, InstrumentedField], + Sequence[Union[str, InstrumentedField]], + DefaultType, + ] + track_scores: Union[bool, DefaultType] + version: Union[bool, DefaultType] + + def __init__( + self, + *, + name: Union[str, DefaultType] = DEFAULT, + size: Union[int, DefaultType] = DEFAULT, + from_: Union[int, DefaultType] = DEFAULT, + collapse: Union["FieldCollapse", Dict[str, Any], DefaultType] = DEFAULT, + docvalue_fields: Union[ + Sequence["FieldAndFormat"], Sequence[Dict[str, Any]], DefaultType + ] = DEFAULT, + explain: Union[bool, DefaultType] = DEFAULT, + highlight: Union["Highlight", Dict[str, Any], DefaultType] = DEFAULT, + ignore_unmapped: Union[bool, DefaultType] = DEFAULT, + script_fields: Union[ + Mapping[Union[str, InstrumentedField], "ScriptField"], + Dict[str, Any], + DefaultType, + ] = DEFAULT, + seq_no_primary_term: Union[bool, DefaultType] = DEFAULT, + fields: Union[ + Union[str, InstrumentedField], + Sequence[Union[str, InstrumentedField]], + DefaultType, + ] = DEFAULT, + sort: Union[ + Union[Union[str, InstrumentedField], "SortOptions"], + Sequence[Union[Union[str, InstrumentedField], "SortOptions"]], + Dict[str, Any], + DefaultType, + ] = DEFAULT, + _source: Union[bool, "SourceFilter", Dict[str, Any], DefaultType] = DEFAULT, + stored_fields: Union[ + Union[str, InstrumentedField], + Sequence[Union[str, InstrumentedField]], + DefaultType, + ] = DEFAULT, + track_scores: Union[bool, DefaultType] = DEFAULT, + version: Union[bool, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if name is not DEFAULT: + kwargs["name"] = name + if size is not DEFAULT: + kwargs["size"] = size + if from_ is not DEFAULT: + kwargs["from_"] = from_ + if collapse is not DEFAULT: + kwargs["collapse"] = collapse + if docvalue_fields is not DEFAULT: + kwargs["docvalue_fields"] = docvalue_fields + if explain is not DEFAULT: + kwargs["explain"] = explain + if highlight is not DEFAULT: + kwargs["highlight"] = highlight + if ignore_unmapped is not DEFAULT: + kwargs["ignore_unmapped"] = ignore_unmapped + if script_fields is not DEFAULT: + kwargs["script_fields"] = str(script_fields) + if seq_no_primary_term is not DEFAULT: + kwargs["seq_no_primary_term"] = seq_no_primary_term + if fields is not DEFAULT: + kwargs["fields"] = str(fields) + if sort is not DEFAULT: + kwargs["sort"] = str(sort) + if _source is not DEFAULT: + kwargs["_source"] = _source + if stored_fields is not DEFAULT: + kwargs["stored_fields"] = str(stored_fields) + if track_scores is not DEFAULT: + kwargs["track_scores"] = track_scores + if version is not DEFAULT: + kwargs["version"] = version + super().__init__(kwargs) + + +class IntervalsAllOf(AttrDict[Any]): + """ + :arg intervals: (required) An array of rules to combine. All rules + must produce a match in a document for the overall source to + match. + :arg max_gaps: Maximum number of positions between the matching terms. + Intervals produced by the rules further apart than this are not + considered matches. Defaults to `-1` if omitted. + :arg ordered: If `true`, intervals produced by the rules should appear + in the order in which they are specified. + :arg filter: Rule used to filter returned intervals. + """ + + intervals: Union[ + Sequence["IntervalsContainer"], Sequence[Dict[str, Any]], DefaultType + ] + max_gaps: Union[int, DefaultType] + ordered: Union[bool, DefaultType] + filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + intervals: Union[ + Sequence["IntervalsContainer"], Sequence[Dict[str, Any]], DefaultType + ] = DEFAULT, + max_gaps: Union[int, DefaultType] = DEFAULT, + ordered: Union[bool, DefaultType] = DEFAULT, + filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if intervals is not DEFAULT: + kwargs["intervals"] = intervals + if max_gaps is not DEFAULT: + kwargs["max_gaps"] = max_gaps + if ordered is not DEFAULT: + kwargs["ordered"] = ordered + if filter is not DEFAULT: + kwargs["filter"] = filter + super().__init__(kwargs) + + +class IntervalsAnyOf(AttrDict[Any]): + """ + :arg intervals: (required) An array of rules to match. + :arg filter: Rule used to filter returned intervals. + """ + + intervals: Union[ + Sequence["IntervalsContainer"], Sequence[Dict[str, Any]], DefaultType + ] + filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + intervals: Union[ + Sequence["IntervalsContainer"], Sequence[Dict[str, Any]], DefaultType + ] = DEFAULT, + filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if intervals is not DEFAULT: + kwargs["intervals"] = intervals + if filter is not DEFAULT: + kwargs["filter"] = filter + super().__init__(kwargs) + + +class IntervalsContainer(AttrDict[Any]): + """ + :arg all_of: Returns matches that span a combination of other rules. + :arg any_of: Returns intervals produced by any of its sub-rules. + :arg fuzzy: Matches analyzed text. + :arg match: Matches analyzed text. + :arg prefix: Matches terms that start with a specified set of + characters. + :arg wildcard: Matches terms using a wildcard pattern. + """ + + all_of: Union["IntervalsAllOf", Dict[str, Any], DefaultType] + any_of: Union["IntervalsAnyOf", Dict[str, Any], DefaultType] + fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType] + match: Union["IntervalsMatch", Dict[str, Any], DefaultType] + prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType] + wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + all_of: Union["IntervalsAllOf", Dict[str, Any], DefaultType] = DEFAULT, + any_of: Union["IntervalsAnyOf", Dict[str, Any], DefaultType] = DEFAULT, + fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType] = DEFAULT, + match: Union["IntervalsMatch", Dict[str, Any], DefaultType] = DEFAULT, + prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType] = DEFAULT, + wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if all_of is not DEFAULT: + kwargs["all_of"] = all_of + if any_of is not DEFAULT: + kwargs["any_of"] = any_of + if fuzzy is not DEFAULT: + kwargs["fuzzy"] = fuzzy + if match is not DEFAULT: + kwargs["match"] = match + if prefix is not DEFAULT: + kwargs["prefix"] = prefix + if wildcard is not DEFAULT: + kwargs["wildcard"] = wildcard + super().__init__(kwargs) + + +class IntervalsFilter(AttrDict[Any]): + """ + :arg after: Query used to return intervals that follow an interval + from the `filter` rule. + :arg before: Query used to return intervals that occur before an + interval from the `filter` rule. + :arg contained_by: Query used to return intervals contained by an + interval from the `filter` rule. + :arg containing: Query used to return intervals that contain an + interval from the `filter` rule. + :arg not_contained_by: Query used to return intervals that are **not** + contained by an interval from the `filter` rule. + :arg not_containing: Query used to return intervals that do **not** + contain an interval from the `filter` rule. + :arg not_overlapping: Query used to return intervals that do **not** + overlap with an interval from the `filter` rule. + :arg overlapping: Query used to return intervals that overlap with an + interval from the `filter` rule. + :arg script: Script used to return matching documents. This script + must return a boolean value: `true` or `false`. + """ + + after: Union["IntervalsContainer", Dict[str, Any], DefaultType] + before: Union["IntervalsContainer", Dict[str, Any], DefaultType] + contained_by: Union["IntervalsContainer", Dict[str, Any], DefaultType] + containing: Union["IntervalsContainer", Dict[str, Any], DefaultType] + not_contained_by: Union["IntervalsContainer", Dict[str, Any], DefaultType] + not_containing: Union["IntervalsContainer", Dict[str, Any], DefaultType] + not_overlapping: Union["IntervalsContainer", Dict[str, Any], DefaultType] + overlapping: Union["IntervalsContainer", Dict[str, Any], DefaultType] + script: Union["Script", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + after: Union["IntervalsContainer", Dict[str, Any], DefaultType] = DEFAULT, + before: Union["IntervalsContainer", Dict[str, Any], DefaultType] = DEFAULT, + contained_by: Union[ + "IntervalsContainer", Dict[str, Any], DefaultType + ] = DEFAULT, + containing: Union["IntervalsContainer", Dict[str, Any], DefaultType] = DEFAULT, + not_contained_by: Union[ + "IntervalsContainer", Dict[str, Any], DefaultType + ] = DEFAULT, + not_containing: Union[ + "IntervalsContainer", Dict[str, Any], DefaultType + ] = DEFAULT, + not_overlapping: Union[ + "IntervalsContainer", Dict[str, Any], DefaultType + ] = DEFAULT, + overlapping: Union["IntervalsContainer", Dict[str, Any], DefaultType] = DEFAULT, + script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if after is not DEFAULT: + kwargs["after"] = after + if before is not DEFAULT: + kwargs["before"] = before + if contained_by is not DEFAULT: + kwargs["contained_by"] = contained_by + if containing is not DEFAULT: + kwargs["containing"] = containing + if not_contained_by is not DEFAULT: + kwargs["not_contained_by"] = not_contained_by + if not_containing is not DEFAULT: + kwargs["not_containing"] = not_containing + if not_overlapping is not DEFAULT: + kwargs["not_overlapping"] = not_overlapping + if overlapping is not DEFAULT: + kwargs["overlapping"] = overlapping + if script is not DEFAULT: + kwargs["script"] = script + super().__init__(kwargs) + + +class IntervalsFuzzy(AttrDict[Any]): + """ + :arg term: (required) The term to match. + :arg analyzer: Analyzer used to normalize the term. + :arg fuzziness: Maximum edit distance allowed for matching. Defaults + to `auto` if omitted. + :arg prefix_length: Number of beginning characters left unchanged when + creating expansions. + :arg transpositions: Indicates whether edits include transpositions of + two adjacent characters (for example, `ab` to `ba`). Defaults to + `True` if omitted. + :arg use_field: If specified, match intervals from this field rather + than the top-level field. The `term` is normalized using the + search analyzer from this field, unless `analyzer` is specified + separately. + """ + + term: Union[str, DefaultType] + analyzer: Union[str, DefaultType] + fuzziness: Union[str, int, DefaultType] + prefix_length: Union[int, DefaultType] + transpositions: Union[bool, DefaultType] + use_field: Union[str, InstrumentedField, DefaultType] + + def __init__( + self, + *, + term: Union[str, DefaultType] = DEFAULT, + analyzer: Union[str, DefaultType] = DEFAULT, + fuzziness: Union[str, int, DefaultType] = DEFAULT, + prefix_length: Union[int, DefaultType] = DEFAULT, + transpositions: Union[bool, DefaultType] = DEFAULT, + use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if term is not DEFAULT: + kwargs["term"] = term + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if fuzziness is not DEFAULT: + kwargs["fuzziness"] = fuzziness + if prefix_length is not DEFAULT: + kwargs["prefix_length"] = prefix_length + if transpositions is not DEFAULT: + kwargs["transpositions"] = transpositions + if use_field is not DEFAULT: + kwargs["use_field"] = str(use_field) + super().__init__(kwargs) + + +class IntervalsMatch(AttrDict[Any]): + """ + :arg query: (required) Text you wish to find in the provided field. + :arg analyzer: Analyzer used to analyze terms in the query. + :arg max_gaps: Maximum number of positions between the matching terms. + Terms further apart than this are not considered matches. Defaults + to `-1` if omitted. + :arg ordered: If `true`, matching terms must appear in their specified + order. + :arg use_field: If specified, match intervals from this field rather + than the top-level field. The `term` is normalized using the + search analyzer from this field, unless `analyzer` is specified + separately. + :arg filter: An optional interval filter. + """ + + query: Union[str, DefaultType] + analyzer: Union[str, DefaultType] + max_gaps: Union[int, DefaultType] + ordered: Union[bool, DefaultType] + use_field: Union[str, InstrumentedField, DefaultType] + filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + query: Union[str, DefaultType] = DEFAULT, + analyzer: Union[str, DefaultType] = DEFAULT, + max_gaps: Union[int, DefaultType] = DEFAULT, + ordered: Union[bool, DefaultType] = DEFAULT, + use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if query is not DEFAULT: + kwargs["query"] = query + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if max_gaps is not DEFAULT: + kwargs["max_gaps"] = max_gaps + if ordered is not DEFAULT: + kwargs["ordered"] = ordered + if use_field is not DEFAULT: + kwargs["use_field"] = str(use_field) + if filter is not DEFAULT: + kwargs["filter"] = filter + super().__init__(kwargs) + + +class IntervalsPrefix(AttrDict[Any]): + """ + :arg prefix: (required) Beginning characters of terms you wish to find + in the top-level field. + :arg analyzer: Analyzer used to analyze the `prefix`. + :arg use_field: If specified, match intervals from this field rather + than the top-level field. The `prefix` is normalized using the + search analyzer from this field, unless `analyzer` is specified + separately. + """ + + prefix: Union[str, DefaultType] + analyzer: Union[str, DefaultType] + use_field: Union[str, InstrumentedField, DefaultType] + + def __init__( + self, + *, + prefix: Union[str, DefaultType] = DEFAULT, + analyzer: Union[str, DefaultType] = DEFAULT, + use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if prefix is not DEFAULT: + kwargs["prefix"] = prefix + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if use_field is not DEFAULT: + kwargs["use_field"] = str(use_field) + super().__init__(kwargs) + + +class IntervalsQuery(AttrDict[Any]): + """ + :arg all_of: Returns matches that span a combination of other rules. + :arg any_of: Returns intervals produced by any of its sub-rules. + :arg fuzzy: Matches terms that are similar to the provided term, + within an edit distance defined by `fuzziness`. + :arg match: Matches analyzed text. + :arg prefix: Matches terms that start with a specified set of + characters. + :arg wildcard: Matches terms using a wildcard pattern. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + all_of: Union["IntervalsAllOf", Dict[str, Any], DefaultType] + any_of: Union["IntervalsAnyOf", Dict[str, Any], DefaultType] + fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType] + match: Union["IntervalsMatch", Dict[str, Any], DefaultType] + prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType] + wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + all_of: Union["IntervalsAllOf", Dict[str, Any], DefaultType] = DEFAULT, + any_of: Union["IntervalsAnyOf", Dict[str, Any], DefaultType] = DEFAULT, + fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType] = DEFAULT, + match: Union["IntervalsMatch", Dict[str, Any], DefaultType] = DEFAULT, + prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType] = DEFAULT, + wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if all_of is not DEFAULT: + kwargs["all_of"] = all_of + if any_of is not DEFAULT: + kwargs["any_of"] = any_of + if fuzzy is not DEFAULT: + kwargs["fuzzy"] = fuzzy + if match is not DEFAULT: + kwargs["match"] = match + if prefix is not DEFAULT: + kwargs["prefix"] = prefix + if wildcard is not DEFAULT: + kwargs["wildcard"] = wildcard + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + +class IntervalsWildcard(AttrDict[Any]): + """ + :arg pattern: (required) Wildcard pattern used to find matching terms. + :arg analyzer: Analyzer used to analyze the `pattern`. Defaults to the + top-level field's analyzer. + :arg use_field: If specified, match intervals from this field rather + than the top-level field. The `pattern` is normalized using the + search analyzer from this field, unless `analyzer` is specified + separately. + """ + + pattern: Union[str, DefaultType] + analyzer: Union[str, DefaultType] + use_field: Union[str, InstrumentedField, DefaultType] + + def __init__( + self, + *, + pattern: Union[str, DefaultType] = DEFAULT, + analyzer: Union[str, DefaultType] = DEFAULT, + use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if pattern is not DEFAULT: + kwargs["pattern"] = pattern + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if use_field is not DEFAULT: + kwargs["use_field"] = str(use_field) + super().__init__(kwargs) + + +class IpRangeAggregationRange(AttrDict[Any]): + """ + :arg from: Start of the range. + :arg mask: IP range defined as a CIDR mask. + :arg to: End of the range. + """ + + from_: Union[str, None, DefaultType] + mask: Union[str, DefaultType] + to: Union[str, None, DefaultType] + + def __init__( + self, + *, + from_: Union[str, None, DefaultType] = DEFAULT, + mask: Union[str, DefaultType] = DEFAULT, + to: Union[str, None, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if from_ is not DEFAULT: + kwargs["from_"] = from_ + if mask is not DEFAULT: + kwargs["mask"] = mask + if to is not DEFAULT: + kwargs["to"] = to + super().__init__(kwargs) + + +class LatLonGeoLocation(AttrDict[Any]): + """ + :arg lat: (required) Latitude + :arg lon: (required) Longitude + """ + + lat: Union[float, DefaultType] + lon: Union[float, DefaultType] + + def __init__( + self, + *, + lat: Union[float, DefaultType] = DEFAULT, + lon: Union[float, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if lat is not DEFAULT: + kwargs["lat"] = lat + if lon is not DEFAULT: + kwargs["lon"] = lon + super().__init__(kwargs) + + +class LikeDocument(AttrDict[Any]): + """ + :arg doc: A document not present in the index. + :arg fields: + :arg _id: ID of a document. + :arg _index: Index of a document. + :arg per_field_analyzer: Overrides the default analyzer. + :arg routing: + :arg version: + :arg version_type: Defaults to `'internal'` if omitted. + """ + + doc: Any + fields: Union[Sequence[Union[str, InstrumentedField]], DefaultType] + _id: Union[str, DefaultType] + _index: Union[str, DefaultType] + per_field_analyzer: Union[Mapping[Union[str, InstrumentedField], str], DefaultType] + routing: Union[str, DefaultType] + version: Union[int, DefaultType] + version_type: Union[ + Literal["internal", "external", "external_gte", "force"], DefaultType + ] + + def __init__( + self, + *, + doc: Any = DEFAULT, + fields: Union[Sequence[Union[str, InstrumentedField]], DefaultType] = DEFAULT, + _id: Union[str, DefaultType] = DEFAULT, + _index: Union[str, DefaultType] = DEFAULT, + per_field_analyzer: Union[ + Mapping[Union[str, InstrumentedField], str], DefaultType + ] = DEFAULT, + routing: Union[str, DefaultType] = DEFAULT, + version: Union[int, DefaultType] = DEFAULT, + version_type: Union[ + Literal["internal", "external", "external_gte", "force"], DefaultType + ] = DEFAULT, + **kwargs: Any, + ): + if doc is not DEFAULT: + kwargs["doc"] = doc + if fields is not DEFAULT: + kwargs["fields"] = str(fields) + if _id is not DEFAULT: + kwargs["_id"] = _id + if _index is not DEFAULT: + kwargs["_index"] = _index + if per_field_analyzer is not DEFAULT: + kwargs["per_field_analyzer"] = str(per_field_analyzer) + if routing is not DEFAULT: + kwargs["routing"] = routing + if version is not DEFAULT: + kwargs["version"] = version + if version_type is not DEFAULT: + kwargs["version_type"] = version_type + super().__init__(kwargs) + + +class MatchBoolPrefixQuery(AttrDict[Any]): + """ + :arg query: (required) Terms you wish to find in the provided field. + The last term is used in a prefix query. + :arg analyzer: Analyzer used to convert the text in the query value + into tokens. + :arg fuzziness: Maximum edit distance allowed for matching. Can be + applied to the term subqueries constructed for all terms but the + final term. + :arg fuzzy_rewrite: Method used to rewrite the query. Can be applied + to the term subqueries constructed for all terms but the final + term. + :arg fuzzy_transpositions: If `true`, edits for fuzzy matching include + transpositions of two adjacent characters (for example, `ab` to + `ba`). Can be applied to the term subqueries constructed for all + terms but the final term. Defaults to `True` if omitted. + :arg max_expansions: Maximum number of terms to which the query will + expand. Can be applied to the term subqueries constructed for all + terms but the final term. Defaults to `50` if omitted. + :arg minimum_should_match: Minimum number of clauses that must match + for a document to be returned. Applied to the constructed bool + query. + :arg operator: Boolean logic used to interpret text in the query + value. Applied to the constructed bool query. Defaults to `'or'` + if omitted. + :arg prefix_length: Number of beginning characters left unchanged for + fuzzy matching. Can be applied to the term subqueries constructed + for all terms but the final term. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + query: Union[str, DefaultType] + analyzer: Union[str, DefaultType] + fuzziness: Union[str, int, DefaultType] + fuzzy_rewrite: Union[str, DefaultType] + fuzzy_transpositions: Union[bool, DefaultType] + max_expansions: Union[int, DefaultType] + minimum_should_match: Union[int, str, DefaultType] + operator: Union[Literal["and", "or"], DefaultType] + prefix_length: Union[int, DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + query: Union[str, DefaultType] = DEFAULT, + analyzer: Union[str, DefaultType] = DEFAULT, + fuzziness: Union[str, int, DefaultType] = DEFAULT, + fuzzy_rewrite: Union[str, DefaultType] = DEFAULT, + fuzzy_transpositions: Union[bool, DefaultType] = DEFAULT, + max_expansions: Union[int, DefaultType] = DEFAULT, + minimum_should_match: Union[int, str, DefaultType] = DEFAULT, + operator: Union[Literal["and", "or"], DefaultType] = DEFAULT, + prefix_length: Union[int, DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if query is not DEFAULT: + kwargs["query"] = query + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if fuzziness is not DEFAULT: + kwargs["fuzziness"] = fuzziness + if fuzzy_rewrite is not DEFAULT: + kwargs["fuzzy_rewrite"] = fuzzy_rewrite + if fuzzy_transpositions is not DEFAULT: + kwargs["fuzzy_transpositions"] = fuzzy_transpositions + if max_expansions is not DEFAULT: + kwargs["max_expansions"] = max_expansions + if minimum_should_match is not DEFAULT: + kwargs["minimum_should_match"] = minimum_should_match + if operator is not DEFAULT: + kwargs["operator"] = operator + if prefix_length is not DEFAULT: + kwargs["prefix_length"] = prefix_length + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + +class MatchPhrasePrefixQuery(AttrDict[Any]): + """ + :arg query: (required) Text you wish to find in the provided field. + :arg analyzer: Analyzer used to convert text in the query value into + tokens. + :arg max_expansions: Maximum number of terms to which the last + provided term of the query value will expand. Defaults to `50` if + omitted. + :arg slop: Maximum number of positions allowed between matching + tokens. + :arg zero_terms_query: Indicates whether no documents are returned if + the analyzer removes all tokens, such as when using a `stop` + filter. Defaults to `none` if omitted. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + query: Union[str, DefaultType] + analyzer: Union[str, DefaultType] + max_expansions: Union[int, DefaultType] + slop: Union[int, DefaultType] + zero_terms_query: Union[Literal["all", "none"], DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + query: Union[str, DefaultType] = DEFAULT, + analyzer: Union[str, DefaultType] = DEFAULT, + max_expansions: Union[int, DefaultType] = DEFAULT, + slop: Union[int, DefaultType] = DEFAULT, + zero_terms_query: Union[Literal["all", "none"], DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if query is not DEFAULT: + kwargs["query"] = query + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if max_expansions is not DEFAULT: + kwargs["max_expansions"] = max_expansions + if slop is not DEFAULT: + kwargs["slop"] = slop + if zero_terms_query is not DEFAULT: + kwargs["zero_terms_query"] = zero_terms_query + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + +class MatchPhraseQuery(AttrDict[Any]): + """ + :arg query: (required) Query terms that are analyzed and turned into a + phrase query. + :arg analyzer: Analyzer used to convert the text in the query value + into tokens. + :arg slop: Maximum number of positions allowed between matching + tokens. + :arg zero_terms_query: Indicates whether no documents are returned if + the `analyzer` removes all tokens, such as when using a `stop` + filter. Defaults to `'none'` if omitted. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + query: Union[str, DefaultType] + analyzer: Union[str, DefaultType] + slop: Union[int, DefaultType] + zero_terms_query: Union[Literal["all", "none"], DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + query: Union[str, DefaultType] = DEFAULT, + analyzer: Union[str, DefaultType] = DEFAULT, + slop: Union[int, DefaultType] = DEFAULT, + zero_terms_query: Union[Literal["all", "none"], DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if query is not DEFAULT: + kwargs["query"] = query + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if slop is not DEFAULT: + kwargs["slop"] = slop + if zero_terms_query is not DEFAULT: + kwargs["zero_terms_query"] = zero_terms_query + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + +class MatchQuery(AttrDict[Any]): + """ + :arg query: (required) Text, number, boolean value or date you wish to + find in the provided field. + :arg analyzer: Analyzer used to convert the text in the query value + into tokens. + :arg auto_generate_synonyms_phrase_query: If `true`, match phrase + queries are automatically created for multi-term synonyms. + Defaults to `True` if omitted. + :arg cutoff_frequency: + :arg fuzziness: Maximum edit distance allowed for matching. + :arg fuzzy_rewrite: Method used to rewrite the query. + :arg fuzzy_transpositions: If `true`, edits for fuzzy matching include + transpositions of two adjacent characters (for example, `ab` to + `ba`). Defaults to `True` if omitted. + :arg lenient: If `true`, format-based errors, such as providing a text + query value for a numeric field, are ignored. + :arg max_expansions: Maximum number of terms to which the query will + expand. Defaults to `50` if omitted. + :arg minimum_should_match: Minimum number of clauses that must match + for a document to be returned. + :arg operator: Boolean logic used to interpret text in the query + value. Defaults to `'or'` if omitted. + :arg prefix_length: Number of beginning characters left unchanged for + fuzzy matching. + :arg zero_terms_query: Indicates whether no documents are returned if + the `analyzer` removes all tokens, such as when using a `stop` + filter. Defaults to `'none'` if omitted. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + query: Union[str, float, bool, DefaultType] + analyzer: Union[str, DefaultType] + auto_generate_synonyms_phrase_query: Union[bool, DefaultType] + cutoff_frequency: Union[float, DefaultType] + fuzziness: Union[str, int, DefaultType] + fuzzy_rewrite: Union[str, DefaultType] + fuzzy_transpositions: Union[bool, DefaultType] + lenient: Union[bool, DefaultType] + max_expansions: Union[int, DefaultType] + minimum_should_match: Union[int, str, DefaultType] + operator: Union[Literal["and", "or"], DefaultType] + prefix_length: Union[int, DefaultType] + zero_terms_query: Union[Literal["all", "none"], DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + query: Union[str, float, bool, DefaultType] = DEFAULT, + analyzer: Union[str, DefaultType] = DEFAULT, + auto_generate_synonyms_phrase_query: Union[bool, DefaultType] = DEFAULT, + cutoff_frequency: Union[float, DefaultType] = DEFAULT, + fuzziness: Union[str, int, DefaultType] = DEFAULT, + fuzzy_rewrite: Union[str, DefaultType] = DEFAULT, + fuzzy_transpositions: Union[bool, DefaultType] = DEFAULT, + lenient: Union[bool, DefaultType] = DEFAULT, + max_expansions: Union[int, DefaultType] = DEFAULT, + minimum_should_match: Union[int, str, DefaultType] = DEFAULT, + operator: Union[Literal["and", "or"], DefaultType] = DEFAULT, + prefix_length: Union[int, DefaultType] = DEFAULT, + zero_terms_query: Union[Literal["all", "none"], DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if query is not DEFAULT: + kwargs["query"] = query + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if auto_generate_synonyms_phrase_query is not DEFAULT: + kwargs["auto_generate_synonyms_phrase_query"] = ( + auto_generate_synonyms_phrase_query + ) + if cutoff_frequency is not DEFAULT: + kwargs["cutoff_frequency"] = cutoff_frequency + if fuzziness is not DEFAULT: + kwargs["fuzziness"] = fuzziness + if fuzzy_rewrite is not DEFAULT: + kwargs["fuzzy_rewrite"] = fuzzy_rewrite + if fuzzy_transpositions is not DEFAULT: + kwargs["fuzzy_transpositions"] = fuzzy_transpositions + if lenient is not DEFAULT: + kwargs["lenient"] = lenient + if max_expansions is not DEFAULT: + kwargs["max_expansions"] = max_expansions + if minimum_should_match is not DEFAULT: + kwargs["minimum_should_match"] = minimum_should_match + if operator is not DEFAULT: + kwargs["operator"] = operator + if prefix_length is not DEFAULT: + kwargs["prefix_length"] = prefix_length + if zero_terms_query is not DEFAULT: + kwargs["zero_terms_query"] = zero_terms_query + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + +class MultiTermLookup(AttrDict[Any]): + """ + :arg field: (required) A fields from which to retrieve terms. + :arg missing: The value to apply to documents that do not have a + value. By default, documents without a value are ignored. + """ + + field: Union[str, InstrumentedField, DefaultType] + missing: Union[str, int, float, bool, DefaultType] + + def __init__( + self, + *, + field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + missing: Union[str, int, float, bool, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if field is not DEFAULT: + kwargs["field"] = str(field) + if missing is not DEFAULT: + kwargs["missing"] = missing + super().__init__(kwargs) + + +class MutualInformationHeuristic(AttrDict[Any]): + """ + :arg background_is_superset: Set to `false` if you defined a custom + background filter that represents a different set of documents + that you want to compare to. + :arg include_negatives: Set to `false` to filter out the terms that + appear less often in the subset than in documents outside the + subset. + """ + + background_is_superset: Union[bool, DefaultType] + include_negatives: Union[bool, DefaultType] + + def __init__( + self, + *, + background_is_superset: Union[bool, DefaultType] = DEFAULT, + include_negatives: Union[bool, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if background_is_superset is not DEFAULT: + kwargs["background_is_superset"] = background_is_superset + if include_negatives is not DEFAULT: + kwargs["include_negatives"] = include_negatives + super().__init__(kwargs) + + +class NestedSortValue(AttrDict[Any]): + """ + :arg path: (required) + :arg filter: + :arg max_children: + :arg nested: + """ + + path: Union[str, InstrumentedField, DefaultType] + filter: Union[Query, DefaultType] + max_children: Union[int, DefaultType] + nested: Union["NestedSortValue", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + path: Union[str, InstrumentedField, DefaultType] = DEFAULT, + filter: Union[Query, DefaultType] = DEFAULT, + max_children: Union[int, DefaultType] = DEFAULT, + nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if path is not DEFAULT: + kwargs["path"] = str(path) + if filter is not DEFAULT: + kwargs["filter"] = filter + if max_children is not DEFAULT: + kwargs["max_children"] = max_children + if nested is not DEFAULT: + kwargs["nested"] = nested + super().__init__(kwargs) + + +class PercentageScoreHeuristic(AttrDict[Any]): + pass + + +class PinnedDoc(AttrDict[Any]): + """ + :arg _id: (required) The unique document ID. + :arg _index: (required) The index that contains the document. + """ + + _id: Union[str, DefaultType] + _index: Union[str, DefaultType] + + def __init__( + self, + *, + _id: Union[str, DefaultType] = DEFAULT, + _index: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if _id is not DEFAULT: + kwargs["_id"] = _id + if _index is not DEFAULT: + kwargs["_index"] = _index + super().__init__(kwargs) + + +class PrefixQuery(AttrDict[Any]): + """ + :arg value: (required) Beginning characters of terms you wish to find + in the provided field. + :arg rewrite: Method used to rewrite the query. + :arg case_insensitive: Allows ASCII case insensitive matching of the + value with the indexed field values when set to `true`. Default is + `false` which means the case sensitivity of matching depends on + the underlying field’s mapping. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + value: Union[str, DefaultType] + rewrite: Union[str, DefaultType] + case_insensitive: Union[bool, DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + value: Union[str, DefaultType] = DEFAULT, + rewrite: Union[str, DefaultType] = DEFAULT, + case_insensitive: Union[bool, DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if value is not DEFAULT: + kwargs["value"] = value + if rewrite is not DEFAULT: + kwargs["rewrite"] = rewrite + if case_insensitive is not DEFAULT: + kwargs["case_insensitive"] = case_insensitive + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + +class QueryVectorBuilder(AttrDict[Any]): + """ + :arg text_embedding: + """ + + text_embedding: Union["TextEmbedding", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + text_embedding: Union["TextEmbedding", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if text_embedding is not DEFAULT: + kwargs["text_embedding"] = text_embedding + super().__init__(kwargs) + + +class RankFeatureFunctionLinear(AttrDict[Any]): + pass + + +class RankFeatureFunctionLogarithm(AttrDict[Any]): + """ + :arg scaling_factor: (required) Configurable scaling factor. + """ + + scaling_factor: Union[float, DefaultType] + + def __init__( + self, *, scaling_factor: Union[float, DefaultType] = DEFAULT, **kwargs: Any + ): + if scaling_factor is not DEFAULT: + kwargs["scaling_factor"] = scaling_factor + super().__init__(kwargs) + + +class RankFeatureFunctionSaturation(AttrDict[Any]): + """ + :arg pivot: Configurable pivot value so that the result will be less + than 0.5. + """ + + pivot: Union[float, DefaultType] + + def __init__(self, *, pivot: Union[float, DefaultType] = DEFAULT, **kwargs: Any): + if pivot is not DEFAULT: + kwargs["pivot"] = pivot + super().__init__(kwargs) + + +class RankFeatureFunctionSigmoid(AttrDict[Any]): + """ + :arg pivot: (required) Configurable pivot value so that the result + will be less than 0.5. + :arg exponent: (required) Configurable Exponent. + """ + + pivot: Union[float, DefaultType] + exponent: Union[float, DefaultType] + + def __init__( + self, + *, + pivot: Union[float, DefaultType] = DEFAULT, + exponent: Union[float, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if pivot is not DEFAULT: + kwargs["pivot"] = pivot + if exponent is not DEFAULT: + kwargs["exponent"] = exponent + super().__init__(kwargs) + + +class RegexpQuery(AttrDict[Any]): + """ + :arg value: (required) Regular expression for terms you wish to find + in the provided field. + :arg case_insensitive: Allows case insensitive matching of the regular + expression value with the indexed field values when set to `true`. + When `false`, case sensitivity of matching depends on the + underlying field’s mapping. + :arg flags: Enables optional operators for the regular expression. + :arg max_determinized_states: Maximum number of automaton states + required for the query. Defaults to `10000` if omitted. + :arg rewrite: Method used to rewrite the query. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + value: Union[str, DefaultType] + case_insensitive: Union[bool, DefaultType] + flags: Union[str, DefaultType] + max_determinized_states: Union[int, DefaultType] + rewrite: Union[str, DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + value: Union[str, DefaultType] = DEFAULT, + case_insensitive: Union[bool, DefaultType] = DEFAULT, + flags: Union[str, DefaultType] = DEFAULT, + max_determinized_states: Union[int, DefaultType] = DEFAULT, + rewrite: Union[str, DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if value is not DEFAULT: + kwargs["value"] = value + if case_insensitive is not DEFAULT: + kwargs["case_insensitive"] = case_insensitive + if flags is not DEFAULT: + kwargs["flags"] = flags + if max_determinized_states is not DEFAULT: + kwargs["max_determinized_states"] = max_determinized_states + if rewrite is not DEFAULT: + kwargs["rewrite"] = rewrite + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + +class RegressionInferenceOptions(AttrDict[Any]): + """ + :arg results_field: The field that is added to incoming documents to + contain the inference prediction. Defaults to predicted_value. + :arg num_top_feature_importance_values: Specifies the maximum number + of feature importance values per document. + """ + + results_field: Union[str, InstrumentedField, DefaultType] + num_top_feature_importance_values: Union[int, DefaultType] + + def __init__( + self, + *, + results_field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + num_top_feature_importance_values: Union[int, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if results_field is not DEFAULT: + kwargs["results_field"] = str(results_field) + if num_top_feature_importance_values is not DEFAULT: + kwargs["num_top_feature_importance_values"] = ( + num_top_feature_importance_values + ) + super().__init__(kwargs) + + +class ScoreSort(AttrDict[Any]): + """ + :arg order: + """ + + order: Union[Literal["asc", "desc"], DefaultType] + + def __init__( + self, + *, + order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if order is not DEFAULT: + kwargs["order"] = order + super().__init__(kwargs) + + +class Script(AttrDict[Any]): + """ + :arg source: The script source. + :arg id: The `id` for a stored script. + :arg params: Specifies any named parameters that are passed into the + script as variables. Use parameters instead of hard-coded values + to decrease compile time. + :arg lang: Specifies the language the script is written in. Defaults + to `painless` if omitted. + :arg options: + """ + + source: Union[str, DefaultType] + id: Union[str, DefaultType] + params: Union[Mapping[str, Any], DefaultType] + lang: Union[Literal["painless", "expression", "mustache", "java"], DefaultType] + options: Union[Mapping[str, str], DefaultType] + + def __init__( + self, + *, + source: Union[str, DefaultType] = DEFAULT, + id: Union[str, DefaultType] = DEFAULT, + params: Union[Mapping[str, Any], DefaultType] = DEFAULT, + lang: Union[ + Literal["painless", "expression", "mustache", "java"], DefaultType + ] = DEFAULT, + options: Union[Mapping[str, str], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if source is not DEFAULT: + kwargs["source"] = source + if id is not DEFAULT: + kwargs["id"] = id + if params is not DEFAULT: + kwargs["params"] = params + if lang is not DEFAULT: + kwargs["lang"] = lang + if options is not DEFAULT: + kwargs["options"] = options + super().__init__(kwargs) + + +class ScriptField(AttrDict[Any]): + """ + :arg script: (required) + :arg ignore_failure: + """ + + script: Union["Script", Dict[str, Any], DefaultType] + ignore_failure: Union[bool, DefaultType] + + def __init__( + self, + *, + script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT, + ignore_failure: Union[bool, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if script is not DEFAULT: + kwargs["script"] = script + if ignore_failure is not DEFAULT: + kwargs["ignore_failure"] = ignore_failure + super().__init__(kwargs) + + +class ScriptSort(AttrDict[Any]): + """ + :arg script: (required) + :arg order: + :arg type: + :arg mode: + :arg nested: + """ + + script: Union["Script", Dict[str, Any], DefaultType] + order: Union[Literal["asc", "desc"], DefaultType] + type: Union[Literal["string", "number", "version"], DefaultType] + mode: Union[Literal["min", "max", "sum", "avg", "median"], DefaultType] + nested: Union["NestedSortValue", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT, + order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT, + type: Union[Literal["string", "number", "version"], DefaultType] = DEFAULT, + mode: Union[ + Literal["min", "max", "sum", "avg", "median"], DefaultType + ] = DEFAULT, + nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if script is not DEFAULT: + kwargs["script"] = script + if order is not DEFAULT: + kwargs["order"] = order + if type is not DEFAULT: + kwargs["type"] = type + if mode is not DEFAULT: + kwargs["mode"] = mode + if nested is not DEFAULT: + kwargs["nested"] = nested + super().__init__(kwargs) + + +class ScriptedHeuristic(AttrDict[Any]): + """ + :arg script: (required) + """ + + script: Union["Script", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if script is not DEFAULT: + kwargs["script"] = script + super().__init__(kwargs) + + +class ShapeFieldQuery(AttrDict[Any]): + """ + :arg indexed_shape: Queries using a pre-indexed shape. + :arg relation: Spatial relation between the query shape and the + document shape. + :arg shape: Queries using an inline shape definition in GeoJSON or + Well Known Text (WKT) format. + """ + + indexed_shape: Union["FieldLookup", Dict[str, Any], DefaultType] + relation: Union[ + Literal["intersects", "disjoint", "within", "contains"], DefaultType + ] + shape: Any + + def __init__( + self, + *, + indexed_shape: Union["FieldLookup", Dict[str, Any], DefaultType] = DEFAULT, + relation: Union[ + Literal["intersects", "disjoint", "within", "contains"], DefaultType + ] = DEFAULT, + shape: Any = DEFAULT, + **kwargs: Any, + ): + if indexed_shape is not DEFAULT: + kwargs["indexed_shape"] = indexed_shape + if relation is not DEFAULT: + kwargs["relation"] = relation + if shape is not DEFAULT: + kwargs["shape"] = shape + super().__init__(kwargs) + + +class SortOptions(AttrDict[Any]): + """ + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + :arg _score: + :arg _doc: + :arg _geo_distance: + :arg _script: + """ + + _field: Union[str, "InstrumentedField", "DefaultType"] + _value: Union["FieldSort", Dict[str, Any], "DefaultType"] + _score: Union["ScoreSort", Dict[str, Any], DefaultType] + _doc: Union["ScoreSort", Dict[str, Any], DefaultType] + _geo_distance: Union["GeoDistanceSort", Dict[str, Any], DefaultType] + _script: Union["ScriptSort", Dict[str, Any], DefaultType] + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union["FieldSort", Dict[str, Any], "DefaultType"] = DEFAULT, + *, + _score: Union["ScoreSort", Dict[str, Any], DefaultType] = DEFAULT, + _doc: Union["ScoreSort", Dict[str, Any], DefaultType] = DEFAULT, + _geo_distance: Union["GeoDistanceSort", Dict[str, Any], DefaultType] = DEFAULT, + _script: Union["ScriptSort", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + if _score is not DEFAULT: + kwargs["_score"] = _score + if _doc is not DEFAULT: + kwargs["_doc"] = _doc + if _geo_distance is not DEFAULT: + kwargs["_geo_distance"] = _geo_distance + if _script is not DEFAULT: + kwargs["_script"] = _script + super().__init__(kwargs) + + +class SourceFilter(AttrDict[Any]): + """ + :arg excludes: + :arg includes: + """ + + excludes: Union[ + Union[str, InstrumentedField], + Sequence[Union[str, InstrumentedField]], + DefaultType, + ] + includes: Union[ + Union[str, InstrumentedField], + Sequence[Union[str, InstrumentedField]], + DefaultType, + ] + + def __init__( + self, + *, + excludes: Union[ + Union[str, InstrumentedField], + Sequence[Union[str, InstrumentedField]], + DefaultType, + ] = DEFAULT, + includes: Union[ + Union[str, InstrumentedField], + Sequence[Union[str, InstrumentedField]], + DefaultType, + ] = DEFAULT, + **kwargs: Any, + ): + if excludes is not DEFAULT: + kwargs["excludes"] = str(excludes) + if includes is not DEFAULT: + kwargs["includes"] = str(includes) + super().__init__(kwargs) + + +class SpanContainingQuery(AttrDict[Any]): + """ + :arg big: (required) Can be any span query. Matching spans from `big` + that contain matches from `little` are returned. + :arg little: (required) Can be any span query. Matching spans from + `big` that contain matches from `little` are returned. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + big: Union["SpanQuery", Dict[str, Any], DefaultType] + little: Union["SpanQuery", Dict[str, Any], DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + big: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT, + little: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if big is not DEFAULT: + kwargs["big"] = big + if little is not DEFAULT: + kwargs["little"] = little + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + +class SpanFieldMaskingQuery(AttrDict[Any]): + """ + :arg field: (required) + :arg query: (required) + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + field: Union[str, InstrumentedField, DefaultType] + query: Union["SpanQuery", Dict[str, Any], DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + query: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if field is not DEFAULT: + kwargs["field"] = str(field) + if query is not DEFAULT: + kwargs["query"] = query + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + +class SpanFirstQuery(AttrDict[Any]): + """ + :arg end: (required) Controls the maximum end position permitted in a + match. + :arg match: (required) Can be any other span type query. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + end: Union[int, DefaultType] + match: Union["SpanQuery", Dict[str, Any], DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + end: Union[int, DefaultType] = DEFAULT, + match: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if end is not DEFAULT: + kwargs["end"] = end + if match is not DEFAULT: + kwargs["match"] = match + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + +class SpanMultiTermQuery(AttrDict[Any]): + """ + :arg match: (required) Should be a multi term query (one of + `wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query). + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + match: Union[Query, DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + match: Union[Query, DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if match is not DEFAULT: + kwargs["match"] = match + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + +class SpanNearQuery(AttrDict[Any]): + """ + :arg clauses: (required) Array of one or more other span type queries. + :arg in_order: Controls whether matches are required to be in-order. + :arg slop: Controls the maximum number of intervening unmatched + positions permitted. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + clauses: Union[Sequence["SpanQuery"], Sequence[Dict[str, Any]], DefaultType] + in_order: Union[bool, DefaultType] + slop: Union[int, DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + clauses: Union[ + Sequence["SpanQuery"], Sequence[Dict[str, Any]], DefaultType + ] = DEFAULT, + in_order: Union[bool, DefaultType] = DEFAULT, + slop: Union[int, DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if clauses is not DEFAULT: + kwargs["clauses"] = clauses + if in_order is not DEFAULT: + kwargs["in_order"] = in_order + if slop is not DEFAULT: + kwargs["slop"] = slop + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + +class SpanNotQuery(AttrDict[Any]): + """ + :arg exclude: (required) Span query whose matches must not overlap + those returned. + :arg include: (required) Span query whose matches are filtered. + :arg dist: The number of tokens from within the include span that + can’t have overlap with the exclude span. Equivalent to setting + both `pre` and `post`. + :arg post: The number of tokens after the include span that can’t have + overlap with the exclude span. + :arg pre: The number of tokens before the include span that can’t have + overlap with the exclude span. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + exclude: Union["SpanQuery", Dict[str, Any], DefaultType] + include: Union["SpanQuery", Dict[str, Any], DefaultType] + dist: Union[int, DefaultType] + post: Union[int, DefaultType] + pre: Union[int, DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + exclude: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT, + include: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT, + dist: Union[int, DefaultType] = DEFAULT, + post: Union[int, DefaultType] = DEFAULT, + pre: Union[int, DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if exclude is not DEFAULT: + kwargs["exclude"] = exclude + if include is not DEFAULT: + kwargs["include"] = include + if dist is not DEFAULT: + kwargs["dist"] = dist + if post is not DEFAULT: + kwargs["post"] = post + if pre is not DEFAULT: + kwargs["pre"] = pre + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + +class SpanOrQuery(AttrDict[Any]): + """ + :arg clauses: (required) Array of one or more other span type queries. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + clauses: Union[Sequence["SpanQuery"], Sequence[Dict[str, Any]], DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + clauses: Union[ + Sequence["SpanQuery"], Sequence[Dict[str, Any]], DefaultType + ] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if clauses is not DEFAULT: + kwargs["clauses"] = clauses + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + +class SpanQuery(AttrDict[Any]): + """ + :arg span_containing: Accepts a list of span queries, but only returns + those spans which also match a second span query. + :arg span_field_masking: Allows queries like `span_near` or `span_or` + across different fields. + :arg span_first: Accepts another span query whose matches must appear + within the first N positions of the field. + :arg span_gap: + :arg span_multi: Wraps a `term`, `range`, `prefix`, `wildcard`, + `regexp`, or `fuzzy` query. + :arg span_near: Accepts multiple span queries whose matches must be + within the specified distance of each other, and possibly in the + same order. + :arg span_not: Wraps another span query, and excludes any documents + which match that query. + :arg span_or: Combines multiple span queries and returns documents + which match any of the specified queries. + :arg span_term: The equivalent of the `term` query but for use with + other span queries. + :arg span_within: The result from a single span query is returned as + long is its span falls within the spans returned by a list of + other span queries. + """ + + span_containing: Union["SpanContainingQuery", Dict[str, Any], DefaultType] + span_field_masking: Union["SpanFieldMaskingQuery", Dict[str, Any], DefaultType] + span_first: Union["SpanFirstQuery", Dict[str, Any], DefaultType] + span_gap: Union[Mapping[Union[str, InstrumentedField], int], DefaultType] + span_multi: Union["SpanMultiTermQuery", Dict[str, Any], DefaultType] + span_near: Union["SpanNearQuery", Dict[str, Any], DefaultType] + span_not: Union["SpanNotQuery", Dict[str, Any], DefaultType] + span_or: Union["SpanOrQuery", Dict[str, Any], DefaultType] + span_term: Union[ + Mapping[Union[str, InstrumentedField], "SpanTermQuery"], + Dict[str, Any], + DefaultType, + ] + span_within: Union["SpanWithinQuery", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + span_containing: Union[ + "SpanContainingQuery", Dict[str, Any], DefaultType + ] = DEFAULT, + span_field_masking: Union[ + "SpanFieldMaskingQuery", Dict[str, Any], DefaultType + ] = DEFAULT, + span_first: Union["SpanFirstQuery", Dict[str, Any], DefaultType] = DEFAULT, + span_gap: Union[ + Mapping[Union[str, InstrumentedField], int], DefaultType + ] = DEFAULT, + span_multi: Union["SpanMultiTermQuery", Dict[str, Any], DefaultType] = DEFAULT, + span_near: Union["SpanNearQuery", Dict[str, Any], DefaultType] = DEFAULT, + span_not: Union["SpanNotQuery", Dict[str, Any], DefaultType] = DEFAULT, + span_or: Union["SpanOrQuery", Dict[str, Any], DefaultType] = DEFAULT, + span_term: Union[ + Mapping[Union[str, InstrumentedField], "SpanTermQuery"], + Dict[str, Any], + DefaultType, + ] = DEFAULT, + span_within: Union["SpanWithinQuery", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if span_containing is not DEFAULT: + kwargs["span_containing"] = span_containing + if span_field_masking is not DEFAULT: + kwargs["span_field_masking"] = span_field_masking + if span_first is not DEFAULT: + kwargs["span_first"] = span_first + if span_gap is not DEFAULT: + kwargs["span_gap"] = str(span_gap) + if span_multi is not DEFAULT: + kwargs["span_multi"] = span_multi + if span_near is not DEFAULT: + kwargs["span_near"] = span_near + if span_not is not DEFAULT: + kwargs["span_not"] = span_not + if span_or is not DEFAULT: + kwargs["span_or"] = span_or + if span_term is not DEFAULT: + kwargs["span_term"] = str(span_term) + if span_within is not DEFAULT: + kwargs["span_within"] = span_within + super().__init__(kwargs) + + +class SpanTermQuery(AttrDict[Any]): + """ + :arg value: (required) + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + value: Union[str, DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + value: Union[str, DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if value is not DEFAULT: + kwargs["value"] = value + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + +class SpanWithinQuery(AttrDict[Any]): + """ + :arg big: (required) Can be any span query. Matching spans from + `little` that are enclosed within `big` are returned. + :arg little: (required) Can be any span query. Matching spans from + `little` that are enclosed within `big` are returned. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + big: Union["SpanQuery", Dict[str, Any], DefaultType] + little: Union["SpanQuery", Dict[str, Any], DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + big: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT, + little: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if big is not DEFAULT: + kwargs["big"] = big + if little is not DEFAULT: + kwargs["little"] = little + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + +class TDigest(AttrDict[Any]): + """ + :arg compression: Limits the maximum number of nodes used by the + underlying TDigest algorithm to `20 * compression`, enabling + control of memory usage and approximation error. + """ + + compression: Union[int, DefaultType] + + def __init__( + self, *, compression: Union[int, DefaultType] = DEFAULT, **kwargs: Any + ): + if compression is not DEFAULT: + kwargs["compression"] = compression + super().__init__(kwargs) + + +class TermQuery(AttrDict[Any]): + """ + :arg value: (required) Term you wish to find in the provided field. + :arg case_insensitive: Allows ASCII case insensitive matching of the + value with the indexed field values when set to `true`. When + `false`, the case sensitivity of matching depends on the + underlying field’s mapping. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + value: Union[int, float, str, bool, None, Any, DefaultType] + case_insensitive: Union[bool, DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + value: Union[int, float, str, bool, None, Any, DefaultType] = DEFAULT, + case_insensitive: Union[bool, DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if value is not DEFAULT: + kwargs["value"] = value + if case_insensitive is not DEFAULT: + kwargs["case_insensitive"] = case_insensitive + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + +class TermsLookup(AttrDict[Any]): + """ + :arg index: (required) + :arg id: (required) + :arg path: (required) + :arg routing: + """ + + index: Union[str, DefaultType] + id: Union[str, DefaultType] + path: Union[str, InstrumentedField, DefaultType] + routing: Union[str, DefaultType] + + def __init__( + self, + *, + index: Union[str, DefaultType] = DEFAULT, + id: Union[str, DefaultType] = DEFAULT, + path: Union[str, InstrumentedField, DefaultType] = DEFAULT, + routing: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if index is not DEFAULT: + kwargs["index"] = index + if id is not DEFAULT: + kwargs["id"] = id + if path is not DEFAULT: + kwargs["path"] = str(path) + if routing is not DEFAULT: + kwargs["routing"] = routing + super().__init__(kwargs) + + +class TermsPartition(AttrDict[Any]): + """ + :arg num_partitions: (required) The number of partitions. + :arg partition: (required) The partition number for this request. + """ + + num_partitions: Union[int, DefaultType] + partition: Union[int, DefaultType] + + def __init__( + self, + *, + num_partitions: Union[int, DefaultType] = DEFAULT, + partition: Union[int, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if num_partitions is not DEFAULT: + kwargs["num_partitions"] = num_partitions + if partition is not DEFAULT: + kwargs["partition"] = partition + super().__init__(kwargs) + + +class TermsSetQuery(AttrDict[Any]): + """ + :arg terms: (required) Array of terms you wish to find in the provided + field. + :arg minimum_should_match: Specification describing number of matching + terms required to return a document. + :arg minimum_should_match_field: Numeric field containing the number + of matching terms required to return a document. + :arg minimum_should_match_script: Custom script containing the number + of matching terms required to return a document. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + terms: Union[Sequence[str], DefaultType] + minimum_should_match: Union[int, str, DefaultType] + minimum_should_match_field: Union[str, InstrumentedField, DefaultType] + minimum_should_match_script: Union["Script", Dict[str, Any], DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + terms: Union[Sequence[str], DefaultType] = DEFAULT, + minimum_should_match: Union[int, str, DefaultType] = DEFAULT, + minimum_should_match_field: Union[ + str, InstrumentedField, DefaultType + ] = DEFAULT, + minimum_should_match_script: Union[ + "Script", Dict[str, Any], DefaultType + ] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if terms is not DEFAULT: + kwargs["terms"] = terms + if minimum_should_match is not DEFAULT: + kwargs["minimum_should_match"] = minimum_should_match + if minimum_should_match_field is not DEFAULT: + kwargs["minimum_should_match_field"] = str(minimum_should_match_field) + if minimum_should_match_script is not DEFAULT: + kwargs["minimum_should_match_script"] = minimum_should_match_script + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + +class TestPopulation(AttrDict[Any]): + """ + :arg field: (required) The field to aggregate. + :arg script: + :arg filter: A filter used to define a set of records to run unpaired + t-test on. + """ + + field: Union[str, InstrumentedField, DefaultType] + script: Union["Script", Dict[str, Any], DefaultType] + filter: Union[Query, DefaultType] + + def __init__( + self, + *, + field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT, + filter: Union[Query, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if field is not DEFAULT: + kwargs["field"] = str(field) + if script is not DEFAULT: + kwargs["script"] = script + if filter is not DEFAULT: + kwargs["filter"] = filter + super().__init__(kwargs) + + +class TextEmbedding(AttrDict[Any]): + """ + :arg model_id: (required) + :arg model_text: (required) + """ + + model_id: Union[str, DefaultType] + model_text: Union[str, DefaultType] + + def __init__( + self, + *, + model_id: Union[str, DefaultType] = DEFAULT, + model_text: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if model_id is not DEFAULT: + kwargs["model_id"] = model_id + if model_text is not DEFAULT: + kwargs["model_text"] = model_text + super().__init__(kwargs) + + +class TextExpansionQuery(AttrDict[Any]): + """ + :arg model_id: (required) The text expansion NLP model to use + :arg model_text: (required) The query text + :arg pruning_config: Token pruning configurations + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + model_id: Union[str, DefaultType] + model_text: Union[str, DefaultType] + pruning_config: Union["TokenPruningConfig", Dict[str, Any], DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + model_id: Union[str, DefaultType] = DEFAULT, + model_text: Union[str, DefaultType] = DEFAULT, + pruning_config: Union[ + "TokenPruningConfig", Dict[str, Any], DefaultType + ] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if model_id is not DEFAULT: + kwargs["model_id"] = model_id + if model_text is not DEFAULT: + kwargs["model_text"] = model_text + if pruning_config is not DEFAULT: + kwargs["pruning_config"] = pruning_config + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + +class TokenPruningConfig(AttrDict[Any]): + """ + :arg tokens_freq_ratio_threshold: Tokens whose frequency is more than + this threshold times the average frequency of all tokens in the + specified field are considered outliers and pruned. Defaults to + `5` if omitted. + :arg tokens_weight_threshold: Tokens whose weight is less than this + threshold are considered nonsignificant and pruned. Defaults to + `0.4` if omitted. + :arg only_score_pruned_tokens: Whether to only score pruned tokens, vs + only scoring kept tokens. + """ + + tokens_freq_ratio_threshold: Union[int, DefaultType] + tokens_weight_threshold: Union[float, DefaultType] + only_score_pruned_tokens: Union[bool, DefaultType] + + def __init__( + self, + *, + tokens_freq_ratio_threshold: Union[int, DefaultType] = DEFAULT, + tokens_weight_threshold: Union[float, DefaultType] = DEFAULT, + only_score_pruned_tokens: Union[bool, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if tokens_freq_ratio_threshold is not DEFAULT: + kwargs["tokens_freq_ratio_threshold"] = tokens_freq_ratio_threshold + if tokens_weight_threshold is not DEFAULT: + kwargs["tokens_weight_threshold"] = tokens_weight_threshold + if only_score_pruned_tokens is not DEFAULT: + kwargs["only_score_pruned_tokens"] = only_score_pruned_tokens + super().__init__(kwargs) + + +class TopLeftBottomRightGeoBounds(AttrDict[Any]): + """ + :arg top_left: (required) + :arg bottom_right: (required) + """ + + top_left: Union[ + "LatLonGeoLocation", + "GeoHashLocation", + Sequence[float], + str, + Dict[str, Any], + DefaultType, + ] + bottom_right: Union[ + "LatLonGeoLocation", + "GeoHashLocation", + Sequence[float], + str, + Dict[str, Any], + DefaultType, + ] + + def __init__( + self, + *, + top_left: Union[ + "LatLonGeoLocation", + "GeoHashLocation", + Sequence[float], + str, + Dict[str, Any], + DefaultType, + ] = DEFAULT, + bottom_right: Union[ + "LatLonGeoLocation", + "GeoHashLocation", + Sequence[float], + str, + Dict[str, Any], + DefaultType, + ] = DEFAULT, + **kwargs: Any, + ): + if top_left is not DEFAULT: + kwargs["top_left"] = top_left + if bottom_right is not DEFAULT: + kwargs["bottom_right"] = bottom_right + super().__init__(kwargs) + + +class TopMetricsValue(AttrDict[Any]): + """ + :arg field: (required) A field to return as a metric. + """ + + field: Union[str, InstrumentedField, DefaultType] + + def __init__( + self, + *, + field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if field is not DEFAULT: + kwargs["field"] = str(field) + super().__init__(kwargs) + + +class TopRightBottomLeftGeoBounds(AttrDict[Any]): + """ + :arg top_right: (required) + :arg bottom_left: (required) + """ + + top_right: Union[ + "LatLonGeoLocation", + "GeoHashLocation", + Sequence[float], + str, + Dict[str, Any], + DefaultType, + ] + bottom_left: Union[ + "LatLonGeoLocation", + "GeoHashLocation", + Sequence[float], + str, + Dict[str, Any], + DefaultType, + ] + + def __init__( + self, + *, + top_right: Union[ + "LatLonGeoLocation", + "GeoHashLocation", + Sequence[float], + str, + Dict[str, Any], + DefaultType, + ] = DEFAULT, + bottom_left: Union[ + "LatLonGeoLocation", + "GeoHashLocation", + Sequence[float], + str, + Dict[str, Any], + DefaultType, + ] = DEFAULT, + **kwargs: Any, + ): + if top_right is not DEFAULT: + kwargs["top_right"] = top_right + if bottom_left is not DEFAULT: + kwargs["bottom_left"] = bottom_left + super().__init__(kwargs) + + +class WeightedAverageValue(AttrDict[Any]): + """ + :arg field: The field from which to extract the values or weights. + :arg missing: A value or weight to use if the field is missing. + :arg script: + """ + + field: Union[str, InstrumentedField, DefaultType] + missing: Union[float, DefaultType] + script: Union["Script", Dict[str, Any], DefaultType] + + def __init__( + self, + *, + field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + missing: Union[float, DefaultType] = DEFAULT, + script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if field is not DEFAULT: + kwargs["field"] = str(field) + if missing is not DEFAULT: + kwargs["missing"] = missing + if script is not DEFAULT: + kwargs["script"] = script + super().__init__(kwargs) + + +class WeightedTokensQuery(AttrDict[Any]): + """ + :arg tokens: (required) The tokens representing this query + :arg pruning_config: Token pruning configurations + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + tokens: Union[Mapping[str, float], DefaultType] + pruning_config: Union["TokenPruningConfig", Dict[str, Any], DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + tokens: Union[Mapping[str, float], DefaultType] = DEFAULT, + pruning_config: Union[ + "TokenPruningConfig", Dict[str, Any], DefaultType + ] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if tokens is not DEFAULT: + kwargs["tokens"] = tokens + if pruning_config is not DEFAULT: + kwargs["pruning_config"] = pruning_config + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + +class WildcardQuery(AttrDict[Any]): + """ + :arg case_insensitive: Allows case insensitive matching of the pattern + with the indexed field values when set to true. Default is false + which means the case sensitivity of matching depends on the + underlying field’s mapping. + :arg rewrite: Method used to rewrite the query. + :arg value: Wildcard pattern for terms you wish to find in the + provided field. Required, when wildcard is not set. + :arg wildcard: Wildcard pattern for terms you wish to find in the + provided field. Required, when value is not set. + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + case_insensitive: Union[bool, DefaultType] + rewrite: Union[str, DefaultType] + value: Union[str, DefaultType] + wildcard: Union[str, DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + case_insensitive: Union[bool, DefaultType] = DEFAULT, + rewrite: Union[str, DefaultType] = DEFAULT, + value: Union[str, DefaultType] = DEFAULT, + wildcard: Union[str, DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if case_insensitive is not DEFAULT: + kwargs["case_insensitive"] = case_insensitive + if rewrite is not DEFAULT: + kwargs["rewrite"] = rewrite + if value is not DEFAULT: + kwargs["value"] = value + if wildcard is not DEFAULT: + kwargs["wildcard"] = wildcard + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + +class WktGeoBounds(AttrDict[Any]): + """ + :arg wkt: (required) + """ + + wkt: Union[str, DefaultType] + + def __init__(self, *, wkt: Union[str, DefaultType] = DEFAULT, **kwargs: Any): + if wkt is not DEFAULT: + kwargs["wkt"] = wkt + super().__init__(kwargs) + + +class AdjacencyMatrixAggregate(AttrDict[Any]): + """ + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + buckets: Sequence["AdjacencyMatrixBucket"] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, "AdjacencyMatrixBucket"]: + return self.buckets # type: ignore[return-value] + + +class AdjacencyMatrixBucket(AttrDict[Any]): + """ + :arg key: (required) + :arg doc_count: (required) + """ + + key: str + doc_count: int + + +class AggregationBreakdown(AttrDict[Any]): + """ + :arg build_aggregation: (required) + :arg build_aggregation_count: (required) + :arg build_leaf_collector: (required) + :arg build_leaf_collector_count: (required) + :arg collect: (required) + :arg collect_count: (required) + :arg initialize: (required) + :arg initialize_count: (required) + :arg reduce: (required) + :arg reduce_count: (required) + :arg post_collection: + :arg post_collection_count: + """ + + build_aggregation: int + build_aggregation_count: int + build_leaf_collector: int + build_leaf_collector_count: int + collect: int + collect_count: int + initialize: int + initialize_count: int + reduce: int + reduce_count: int + post_collection: int + post_collection_count: int + + +class AggregationProfile(AttrDict[Any]): + """ + :arg breakdown: (required) + :arg description: (required) + :arg time_in_nanos: (required) + :arg type: (required) + :arg debug: + :arg children: + """ + + breakdown: "AggregationBreakdown" + description: str + time_in_nanos: Any + type: str + debug: "AggregationProfileDebug" + children: Sequence["AggregationProfile"] + + +class AggregationProfileDebug(AttrDict[Any]): + """ + :arg segments_with_multi_valued_ords: + :arg collection_strategy: + :arg segments_with_single_valued_ords: + :arg total_buckets: + :arg built_buckets: + :arg result_strategy: + :arg has_filter: + :arg delegate: + :arg delegate_debug: + :arg chars_fetched: + :arg extract_count: + :arg extract_ns: + :arg values_fetched: + :arg collect_analyzed_ns: + :arg collect_analyzed_count: + :arg surviving_buckets: + :arg ordinals_collectors_used: + :arg ordinals_collectors_overhead_too_high: + :arg string_hashing_collectors_used: + :arg numeric_collectors_used: + :arg empty_collectors_used: + :arg deferred_aggregators: + :arg segments_with_doc_count_field: + :arg segments_with_deleted_docs: + :arg filters: + :arg segments_counted: + :arg segments_collected: + :arg map_reducer: + :arg brute_force_used: + :arg dynamic_pruning_attempted: + :arg dynamic_pruning_used: + :arg skipped_due_to_no_data: + """ + + segments_with_multi_valued_ords: int + collection_strategy: str + segments_with_single_valued_ords: int + total_buckets: int + built_buckets: int + result_strategy: str + has_filter: bool + delegate: str + delegate_debug: "AggregationProfileDebug" + chars_fetched: int + extract_count: int + extract_ns: int + values_fetched: int + collect_analyzed_ns: int + collect_analyzed_count: int + surviving_buckets: int + ordinals_collectors_used: int + ordinals_collectors_overhead_too_high: int + string_hashing_collectors_used: int + numeric_collectors_used: int + empty_collectors_used: int + deferred_aggregators: Sequence[str] + segments_with_doc_count_field: int + segments_with_deleted_docs: int + filters: Sequence["AggregationProfileDelegateDebugFilter"] + segments_counted: int + segments_collected: int + map_reducer: str + brute_force_used: int + dynamic_pruning_attempted: int + dynamic_pruning_used: int + skipped_due_to_no_data: int + + +class AggregationProfileDelegateDebugFilter(AttrDict[Any]): + """ + :arg results_from_metadata: + :arg query: + :arg specialized_for: + :arg segments_counted_in_constant_time: + """ + + results_from_metadata: int + query: str + specialized_for: str + segments_counted_in_constant_time: int + + +class ArrayPercentilesItem(AttrDict[Any]): + """ + :arg key: (required) + :arg value: (required) + :arg value_as_string: + """ + + key: str + value: Union[float, None] + value_as_string: str + + +class AutoDateHistogramAggregate(AttrDict[Any]): + """ + :arg interval: (required) + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + interval: str + buckets: Sequence["DateHistogramBucket"] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, "DateHistogramBucket"]: + return self.buckets # type: ignore[return-value] + + +class AvgAggregate(AttrDict[Any]): + """ + :arg value: (required) The metric value. A missing value generally + means that there was no data to aggregate, unless specified + otherwise. + :arg value_as_string: + :arg meta: + """ + + value: Union[float, None] + value_as_string: str + meta: Mapping[str, Any] + + +class BoxPlotAggregate(AttrDict[Any]): + """ + :arg min: (required) + :arg max: (required) + :arg q1: (required) + :arg q2: (required) + :arg q3: (required) + :arg lower: (required) + :arg upper: (required) + :arg min_as_string: + :arg max_as_string: + :arg q1_as_string: + :arg q2_as_string: + :arg q3_as_string: + :arg lower_as_string: + :arg upper_as_string: + :arg meta: + """ + + min: float + max: float + q1: float + q2: float + q3: float + lower: float + upper: float + min_as_string: str + max_as_string: str + q1_as_string: str + q2_as_string: str + q3_as_string: str + lower_as_string: str + upper_as_string: str + meta: Mapping[str, Any] + + +class BucketMetricValueAggregate(AttrDict[Any]): + """ + :arg keys: (required) + :arg value: (required) The metric value. A missing value generally + means that there was no data to aggregate, unless specified + otherwise. + :arg value_as_string: + :arg meta: + """ + + keys: Sequence[str] # type: ignore[assignment] + value: Union[float, None] + value_as_string: str + meta: Mapping[str, Any] + + +class BulkIndexByScrollFailure(AttrDict[Any]): + """ + :arg cause: (required) + :arg id: (required) + :arg index: (required) + :arg status: (required) + :arg type: (required) + """ + + cause: "ErrorCause" + id: str + index: str + status: int + type: str + + +class CardinalityAggregate(AttrDict[Any]): + """ + :arg value: (required) + :arg meta: + """ + + value: int + meta: Mapping[str, Any] + + +class ChildrenAggregate(AttrDict[Any]): + """ + :arg doc_count: (required) + :arg meta: + """ + + doc_count: int + meta: Mapping[str, Any] + + +class ClusterDetails(AttrDict[Any]): + """ + :arg status: (required) + :arg indices: (required) + :arg timed_out: (required) + :arg took: + :arg _shards: + :arg failures: + """ + + status: Literal["running", "successful", "partial", "skipped", "failed"] + indices: str + timed_out: bool + took: Any + _shards: "ShardStatistics" + failures: Sequence["ShardFailure"] + + +class ClusterStatistics(AttrDict[Any]): + """ + :arg skipped: (required) + :arg successful: (required) + :arg total: (required) + :arg running: (required) + :arg partial: (required) + :arg failed: (required) + :arg details: + """ + + skipped: int + successful: int + total: int + running: int + partial: int + failed: int + details: Mapping[str, "ClusterDetails"] + + +class Collector(AttrDict[Any]): + """ + :arg name: (required) + :arg reason: (required) + :arg time_in_nanos: (required) + :arg children: + """ + + name: str + reason: str + time_in_nanos: Any + children: Sequence["Collector"] + + +class CompletionSuggest(AttrDict[Any]): + """ + :arg options: (required) + :arg length: (required) + :arg offset: (required) + :arg text: (required) + """ + + options: Sequence["CompletionSuggestOption"] + length: int + offset: int + text: str + + +class CompletionSuggestOption(AttrDict[Any]): + """ + :arg text: (required) + :arg collate_match: + :arg contexts: + :arg fields: + :arg _id: + :arg _index: + :arg _routing: + :arg _score: + :arg _source: + :arg score: + """ + + text: str + collate_match: bool + contexts: Mapping[ + str, + Sequence[ + Union[ + str, Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str] + ] + ], + ] + fields: Mapping[str, Any] + _id: str + _index: str + _routing: str + _score: float + _source: Any + score: float + + +class CompositeAggregate(AttrDict[Any]): + """ + :arg after_key: + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + after_key: Mapping[str, Union[int, float, str, bool, None, Any]] + buckets: Sequence["CompositeBucket"] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, "CompositeBucket"]: + return self.buckets # type: ignore[return-value] + + +class CompositeBucket(AttrDict[Any]): + """ + :arg key: (required) + :arg doc_count: (required) + """ + + key: Mapping[str, Union[int, float, str, bool, None, Any]] + doc_count: int + + +class CumulativeCardinalityAggregate(AttrDict[Any]): + """ + Result of the `cumulative_cardinality` aggregation + + :arg value: (required) + :arg value_as_string: + :arg meta: + """ + + value: int + value_as_string: str + meta: Mapping[str, Any] + + +class DateHistogramAggregate(AttrDict[Any]): + """ + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + buckets: Sequence["DateHistogramBucket"] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, "DateHistogramBucket"]: + return self.buckets # type: ignore[return-value] + + +class DateHistogramBucket(AttrDict[Any]): + """ + :arg key: (required) + :arg doc_count: (required) + :arg key_as_string: + """ + + key: Any + doc_count: int + key_as_string: str + + +class DateRangeAggregate(AttrDict[Any]): + """ + Result of a `date_range` aggregation. Same format as a for a `range` + aggregation: `from` and `to` in `buckets` are milliseconds since the + Epoch, represented as a floating point number. + + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + buckets: Sequence["RangeBucket"] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, "RangeBucket"]: + return self.buckets # type: ignore[return-value] + + +class DerivativeAggregate(AttrDict[Any]): + """ + :arg value: (required) The metric value. A missing value generally + means that there was no data to aggregate, unless specified + otherwise. + :arg normalized_value: + :arg normalized_value_as_string: + :arg value_as_string: + :arg meta: + """ + + value: Union[float, None] + normalized_value: float + normalized_value_as_string: str + value_as_string: str + meta: Mapping[str, Any] + + +class DfsKnnProfile(AttrDict[Any]): + """ + :arg query: (required) + :arg rewrite_time: (required) + :arg collector: (required) + :arg vector_operations_count: + """ + + query: Sequence["KnnQueryProfileResult"] + rewrite_time: int + collector: Sequence["KnnCollectorResult"] + vector_operations_count: int + + +class DfsProfile(AttrDict[Any]): + """ + :arg statistics: + :arg knn: + """ + + statistics: "DfsStatisticsProfile" + knn: Sequence["DfsKnnProfile"] + + +class DfsStatisticsBreakdown(AttrDict[Any]): + """ + :arg collection_statistics: (required) + :arg collection_statistics_count: (required) + :arg create_weight: (required) + :arg create_weight_count: (required) + :arg rewrite: (required) + :arg rewrite_count: (required) + :arg term_statistics: (required) + :arg term_statistics_count: (required) + """ + + collection_statistics: int + collection_statistics_count: int + create_weight: int + create_weight_count: int + rewrite: int + rewrite_count: int + term_statistics: int + term_statistics_count: int + + +class DfsStatisticsProfile(AttrDict[Any]): + """ + :arg type: (required) + :arg description: (required) + :arg time_in_nanos: (required) + :arg breakdown: (required) + :arg time: + :arg debug: + :arg children: + """ + + type: str + description: str + time_in_nanos: Any + breakdown: "DfsStatisticsBreakdown" + time: Any + debug: Mapping[str, Any] + children: Sequence["DfsStatisticsProfile"] + + +class DoubleTermsAggregate(AttrDict[Any]): + """ + Result of a `terms` aggregation when the field is some kind of decimal + number like a float, double, or distance. + + :arg doc_count_error_upper_bound: + :arg sum_other_doc_count: + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + doc_count_error_upper_bound: int + sum_other_doc_count: int + buckets: Sequence["DoubleTermsBucket"] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, "DoubleTermsBucket"]: + return self.buckets # type: ignore[return-value] + + +class DoubleTermsBucket(AttrDict[Any]): + """ + :arg key: (required) + :arg doc_count: (required) + :arg key_as_string: + :arg doc_count_error_upper_bound: + """ + + key: float + doc_count: int + key_as_string: str + doc_count_error_upper_bound: int + + +class ErrorCause(AttrDict[Any]): + """ + Cause and details about a request failure. This class defines the + properties common to all error types. Additional details are also + provided, that depend on the error type. + + :arg type: (required) The type of error + :arg reason: A human-readable explanation of the error, in English. + :arg stack_trace: The server stack trace. Present only if the + `error_trace=true` parameter was sent with the request. + :arg caused_by: + :arg root_cause: + :arg suppressed: + """ + + type: str + reason: str + stack_trace: str + caused_by: "ErrorCause" + root_cause: Sequence["ErrorCause"] + suppressed: Sequence["ErrorCause"] + + +class Explanation(AttrDict[Any]): + """ + :arg description: (required) + :arg details: (required) + :arg value: (required) + """ + + description: str + details: Sequence["ExplanationDetail"] + value: float + + +class ExplanationDetail(AttrDict[Any]): + """ + :arg description: (required) + :arg value: (required) + :arg details: + """ + + description: str + value: float + details: Sequence["ExplanationDetail"] + + +class ExtendedStatsAggregate(AttrDict[Any]): + """ + :arg sum_of_squares: (required) + :arg variance: (required) + :arg variance_population: (required) + :arg variance_sampling: (required) + :arg std_deviation: (required) + :arg std_deviation_population: (required) + :arg std_deviation_sampling: (required) + :arg count: (required) + :arg min: (required) + :arg max: (required) + :arg avg: (required) + :arg sum: (required) + :arg std_deviation_bounds: + :arg sum_of_squares_as_string: + :arg variance_as_string: + :arg variance_population_as_string: + :arg variance_sampling_as_string: + :arg std_deviation_as_string: + :arg std_deviation_bounds_as_string: + :arg min_as_string: + :arg max_as_string: + :arg avg_as_string: + :arg sum_as_string: + :arg meta: + """ + + sum_of_squares: Union[float, None] + variance: Union[float, None] + variance_population: Union[float, None] + variance_sampling: Union[float, None] + std_deviation: Union[float, None] + std_deviation_population: Union[float, None] + std_deviation_sampling: Union[float, None] + count: int + min: Union[float, None] + max: Union[float, None] + avg: Union[float, None] + sum: float + std_deviation_bounds: "StandardDeviationBounds" + sum_of_squares_as_string: str + variance_as_string: str + variance_population_as_string: str + variance_sampling_as_string: str + std_deviation_as_string: str + std_deviation_bounds_as_string: "StandardDeviationBoundsAsString" + min_as_string: str + max_as_string: str + avg_as_string: str + sum_as_string: str + meta: Mapping[str, Any] + + +class ExtendedStatsBucketAggregate(AttrDict[Any]): + """ + :arg sum_of_squares: (required) + :arg variance: (required) + :arg variance_population: (required) + :arg variance_sampling: (required) + :arg std_deviation: (required) + :arg std_deviation_population: (required) + :arg std_deviation_sampling: (required) + :arg count: (required) + :arg min: (required) + :arg max: (required) + :arg avg: (required) + :arg sum: (required) + :arg std_deviation_bounds: + :arg sum_of_squares_as_string: + :arg variance_as_string: + :arg variance_population_as_string: + :arg variance_sampling_as_string: + :arg std_deviation_as_string: + :arg std_deviation_bounds_as_string: + :arg min_as_string: + :arg max_as_string: + :arg avg_as_string: + :arg sum_as_string: + :arg meta: + """ + + sum_of_squares: Union[float, None] + variance: Union[float, None] + variance_population: Union[float, None] + variance_sampling: Union[float, None] + std_deviation: Union[float, None] + std_deviation_population: Union[float, None] + std_deviation_sampling: Union[float, None] + count: int + min: Union[float, None] + max: Union[float, None] + avg: Union[float, None] + sum: float + std_deviation_bounds: "StandardDeviationBounds" + sum_of_squares_as_string: str + variance_as_string: str + variance_population_as_string: str + variance_sampling_as_string: str + std_deviation_as_string: str + std_deviation_bounds_as_string: "StandardDeviationBoundsAsString" + min_as_string: str + max_as_string: str + avg_as_string: str + sum_as_string: str + meta: Mapping[str, Any] + + +class FetchProfile(AttrDict[Any]): + """ + :arg type: (required) + :arg description: (required) + :arg time_in_nanos: (required) + :arg breakdown: (required) + :arg debug: + :arg children: + """ + + type: str + description: str + time_in_nanos: Any + breakdown: "FetchProfileBreakdown" + debug: "FetchProfileDebug" + children: Sequence["FetchProfile"] + + +class FetchProfileBreakdown(AttrDict[Any]): + """ + :arg load_source: + :arg load_source_count: + :arg load_stored_fields: + :arg load_stored_fields_count: + :arg next_reader: + :arg next_reader_count: + :arg process_count: + :arg process: + """ + + load_source: int + load_source_count: int + load_stored_fields: int + load_stored_fields_count: int + next_reader: int + next_reader_count: int + process_count: int + process: int + + +class FetchProfileDebug(AttrDict[Any]): + """ + :arg stored_fields: + :arg fast_path: + """ + + stored_fields: Sequence[str] + fast_path: int + + +class FilterAggregate(AttrDict[Any]): + """ + :arg doc_count: (required) + :arg meta: + """ + + doc_count: int + meta: Mapping[str, Any] + + +class FiltersAggregate(AttrDict[Any]): + """ + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + buckets: Sequence["FiltersBucket"] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, "FiltersBucket"]: + return self.buckets # type: ignore[return-value] + + +class FiltersBucket(AttrDict[Any]): + """ + :arg doc_count: (required) + """ + + doc_count: int + + +class FrequentItemSetsAggregate(AttrDict[Any]): + """ + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + buckets: Sequence["FrequentItemSetsBucket"] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, "FrequentItemSetsBucket"]: + return self.buckets # type: ignore[return-value] + + +class FrequentItemSetsBucket(AttrDict[Any]): + """ + :arg key: (required) + :arg support: (required) + :arg doc_count: (required) + """ + + key: Mapping[str, Sequence[str]] + support: float + doc_count: int + + +class GeoBoundsAggregate(AttrDict[Any]): + """ + :arg bounds: + :arg meta: + """ + + bounds: Union[ + "CoordsGeoBounds", + "TopLeftBottomRightGeoBounds", + "TopRightBottomLeftGeoBounds", + "WktGeoBounds", + ] + meta: Mapping[str, Any] + + +class GeoCentroidAggregate(AttrDict[Any]): + """ + :arg count: (required) + :arg location: + :arg meta: + """ + + count: int + location: Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str] + meta: Mapping[str, Any] + + +class GeoDistanceAggregate(AttrDict[Any]): + """ + Result of a `geo_distance` aggregation. The unit for `from` and `to` + is meters by default. + + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + buckets: Sequence["RangeBucket"] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, "RangeBucket"]: + return self.buckets # type: ignore[return-value] + + +class GeoHashGridAggregate(AttrDict[Any]): + """ + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + buckets: Sequence["GeoHashGridBucket"] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, "GeoHashGridBucket"]: + return self.buckets # type: ignore[return-value] + + +class GeoHashGridBucket(AttrDict[Any]): + """ + :arg key: (required) + :arg doc_count: (required) + """ + + key: str + doc_count: int + + +class GeoHexGridAggregate(AttrDict[Any]): + """ + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + buckets: Sequence["GeoHexGridBucket"] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, "GeoHexGridBucket"]: + return self.buckets # type: ignore[return-value] + + +class GeoHexGridBucket(AttrDict[Any]): + """ + :arg key: (required) + :arg doc_count: (required) + """ + + key: str + doc_count: int + + +class GeoLine(AttrDict[Any]): + """ + A GeoJson GeoLine. + + :arg type: (required) Always `"LineString"` + :arg coordinates: (required) Array of `[lon, lat]` coordinates + """ + + type: str + coordinates: Sequence[Sequence[float]] + + +class GeoLineAggregate(AttrDict[Any]): + """ + :arg type: (required) + :arg geometry: (required) + :arg properties: (required) + :arg meta: + """ + + type: str + geometry: "GeoLine" + properties: Any + meta: Mapping[str, Any] + + +class GeoTileGridAggregate(AttrDict[Any]): + """ + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + buckets: Sequence["GeoTileGridBucket"] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, "GeoTileGridBucket"]: + return self.buckets # type: ignore[return-value] + + +class GeoTileGridBucket(AttrDict[Any]): + """ + :arg key: (required) + :arg doc_count: (required) + """ + + key: str + doc_count: int + + +class GlobalAggregate(AttrDict[Any]): + """ + :arg doc_count: (required) + :arg meta: + """ + + doc_count: int + meta: Mapping[str, Any] + + +class HdrPercentileRanksAggregate(AttrDict[Any]): + """ + :arg values: (required) + :arg meta: + """ + + values: Union[Mapping[str, Union[str, int, None]], Sequence["ArrayPercentilesItem"]] + meta: Mapping[str, Any] + + +class HdrPercentilesAggregate(AttrDict[Any]): + """ + :arg values: (required) + :arg meta: + """ + + values: Union[Mapping[str, Union[str, int, None]], Sequence["ArrayPercentilesItem"]] + meta: Mapping[str, Any] + + +class HistogramAggregate(AttrDict[Any]): + """ + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + buckets: Sequence["HistogramBucket"] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, "HistogramBucket"]: + return self.buckets # type: ignore[return-value] + + +class HistogramBucket(AttrDict[Any]): + """ + :arg key: (required) + :arg doc_count: (required) + :arg key_as_string: + """ + + key: float + doc_count: int + key_as_string: str + + +class Hit(AttrDict[Any]): + """ + :arg index: (required) + :arg id: + :arg score: + :arg explanation: + :arg fields: + :arg highlight: + :arg inner_hits: + :arg matched_queries: + :arg nested: + :arg ignored: + :arg ignored_field_values: + :arg shard: + :arg node: + :arg routing: + :arg source: + :arg rank: + :arg seq_no: + :arg primary_term: + :arg version: + :arg sort: + """ + + index: str + id: str + score: Union[float, None] + explanation: "Explanation" + fields: Mapping[str, Any] + highlight: Mapping[str, Sequence[str]] + inner_hits: Mapping[str, "InnerHitsResult"] + matched_queries: Union[Sequence[str], Mapping[str, float]] + nested: "NestedIdentity" + ignored: Sequence[str] + ignored_field_values: Mapping[ + str, Sequence[Union[int, float, str, bool, None, Any]] + ] + shard: str + node: str + routing: str + source: Any + rank: int + seq_no: int + primary_term: int + version: int + sort: Sequence[Union[int, float, str, bool, None, Any]] + + +class HitsMetadata(AttrDict[Any]): + """ + :arg hits: (required) + :arg total: Total hit count information, present only if + `track_total_hits` wasn't `false` in the search request. + :arg max_score: + """ + + hits: Sequence["Hit"] + total: Union["TotalHits", int] + max_score: Union[float, None] + + +class InferenceAggregate(AttrDict[Any]): + """ + :arg value: + :arg feature_importance: + :arg top_classes: + :arg warning: + :arg meta: + """ + + value: Union[int, float, str, bool, None, Any] + feature_importance: Sequence["InferenceFeatureImportance"] + top_classes: Sequence["InferenceTopClassEntry"] + warning: str + meta: Mapping[str, Any] + + +class InferenceClassImportance(AttrDict[Any]): + """ + :arg class_name: (required) + :arg importance: (required) + """ + + class_name: str + importance: float + + +class InferenceFeatureImportance(AttrDict[Any]): + """ + :arg feature_name: (required) + :arg importance: + :arg classes: + """ + + feature_name: str + importance: float + classes: Sequence["InferenceClassImportance"] + + +class InferenceTopClassEntry(AttrDict[Any]): + """ + :arg class_name: (required) + :arg class_probability: (required) + :arg class_score: (required) + """ + + class_name: Union[int, float, str, bool, None, Any] + class_probability: float + class_score: float + + +class InnerHitsResult(AttrDict[Any]): + """ + :arg hits: (required) + """ + + hits: "HitsMetadata" + + +class IpPrefixAggregate(AttrDict[Any]): + """ + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + buckets: Sequence["IpPrefixBucket"] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, "IpPrefixBucket"]: + return self.buckets # type: ignore[return-value] + + +class IpPrefixBucket(AttrDict[Any]): + """ + :arg is_ipv6: (required) + :arg key: (required) + :arg prefix_length: (required) + :arg doc_count: (required) + :arg netmask: + """ + + is_ipv6: bool + key: str + prefix_length: int + doc_count: int + netmask: str + + +class IpRangeAggregate(AttrDict[Any]): + """ + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + buckets: Sequence["IpRangeBucket"] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, "IpRangeBucket"]: + return self.buckets # type: ignore[return-value] + + +class IpRangeBucket(AttrDict[Any]): + """ + :arg doc_count: (required) + :arg key: + :arg from: + :arg to: + """ + + doc_count: int + key: str + from_: str + to: str + + +class KnnCollectorResult(AttrDict[Any]): + """ + :arg name: (required) + :arg reason: (required) + :arg time_in_nanos: (required) + :arg time: + :arg children: + """ + + name: str + reason: str + time_in_nanos: Any + time: Any + children: Sequence["KnnCollectorResult"] + + +class KnnQueryProfileBreakdown(AttrDict[Any]): + """ + :arg advance: (required) + :arg advance_count: (required) + :arg build_scorer: (required) + :arg build_scorer_count: (required) + :arg compute_max_score: (required) + :arg compute_max_score_count: (required) + :arg count_weight: (required) + :arg count_weight_count: (required) + :arg create_weight: (required) + :arg create_weight_count: (required) + :arg match: (required) + :arg match_count: (required) + :arg next_doc: (required) + :arg next_doc_count: (required) + :arg score: (required) + :arg score_count: (required) + :arg set_min_competitive_score: (required) + :arg set_min_competitive_score_count: (required) + :arg shallow_advance: (required) + :arg shallow_advance_count: (required) + """ + + advance: int + advance_count: int + build_scorer: int + build_scorer_count: int + compute_max_score: int + compute_max_score_count: int + count_weight: int + count_weight_count: int + create_weight: int + create_weight_count: int + match: int + match_count: int + next_doc: int + next_doc_count: int + score: int + score_count: int + set_min_competitive_score: int + set_min_competitive_score_count: int + shallow_advance: int + shallow_advance_count: int + + +class KnnQueryProfileResult(AttrDict[Any]): + """ + :arg type: (required) + :arg description: (required) + :arg time_in_nanos: (required) + :arg breakdown: (required) + :arg time: + :arg debug: + :arg children: + """ + + type: str + description: str + time_in_nanos: Any + breakdown: "KnnQueryProfileBreakdown" + time: Any + debug: Mapping[str, Any] + children: Sequence["KnnQueryProfileResult"] + + +class LongRareTermsAggregate(AttrDict[Any]): + """ + Result of the `rare_terms` aggregation when the field is some kind of + whole number like a integer, long, or a date. + + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + buckets: Sequence["LongRareTermsBucket"] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, "LongRareTermsBucket"]: + return self.buckets # type: ignore[return-value] + + +class LongRareTermsBucket(AttrDict[Any]): + """ + :arg key: (required) + :arg doc_count: (required) + :arg key_as_string: + """ + + key: int + doc_count: int + key_as_string: str + + +class LongTermsAggregate(AttrDict[Any]): + """ + Result of a `terms` aggregation when the field is some kind of whole + number like a integer, long, or a date. + + :arg doc_count_error_upper_bound: + :arg sum_other_doc_count: + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + doc_count_error_upper_bound: int + sum_other_doc_count: int + buckets: Sequence["LongTermsBucket"] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, "LongTermsBucket"]: + return self.buckets # type: ignore[return-value] + + +class LongTermsBucket(AttrDict[Any]): + """ + :arg key: (required) + :arg doc_count: (required) + :arg key_as_string: + :arg doc_count_error_upper_bound: + """ + + key: int + doc_count: int + key_as_string: str + doc_count_error_upper_bound: int + + +class MatrixStatsAggregate(AttrDict[Any]): + """ + :arg doc_count: (required) + :arg fields: + :arg meta: + """ + + doc_count: int + fields: Sequence["MatrixStatsFields"] + meta: Mapping[str, Any] + + +class MatrixStatsFields(AttrDict[Any]): + """ + :arg name: (required) + :arg count: (required) + :arg mean: (required) + :arg variance: (required) + :arg skewness: (required) + :arg kurtosis: (required) + :arg covariance: (required) + :arg correlation: (required) + """ + + name: str + count: int + mean: float + variance: float + skewness: float + kurtosis: float + covariance: Mapping[str, float] + correlation: Mapping[str, float] + + +class MaxAggregate(AttrDict[Any]): + """ + :arg value: (required) The metric value. A missing value generally + means that there was no data to aggregate, unless specified + otherwise. + :arg value_as_string: + :arg meta: + """ + + value: Union[float, None] + value_as_string: str + meta: Mapping[str, Any] + + +class MedianAbsoluteDeviationAggregate(AttrDict[Any]): + """ + :arg value: (required) The metric value. A missing value generally + means that there was no data to aggregate, unless specified + otherwise. + :arg value_as_string: + :arg meta: + """ + + value: Union[float, None] + value_as_string: str + meta: Mapping[str, Any] + + +class MinAggregate(AttrDict[Any]): + """ + :arg value: (required) The metric value. A missing value generally + means that there was no data to aggregate, unless specified + otherwise. + :arg value_as_string: + :arg meta: + """ + + value: Union[float, None] + value_as_string: str + meta: Mapping[str, Any] + + +class MissingAggregate(AttrDict[Any]): + """ + :arg doc_count: (required) + :arg meta: + """ + + doc_count: int + meta: Mapping[str, Any] + + +class MultiTermsAggregate(AttrDict[Any]): + """ + :arg doc_count_error_upper_bound: + :arg sum_other_doc_count: + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + doc_count_error_upper_bound: int + sum_other_doc_count: int + buckets: Sequence["MultiTermsBucket"] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, "MultiTermsBucket"]: + return self.buckets # type: ignore[return-value] + + +class MultiTermsBucket(AttrDict[Any]): + """ + :arg key: (required) + :arg doc_count: (required) + :arg key_as_string: + :arg doc_count_error_upper_bound: + """ + + key: Sequence[Union[int, float, str, bool, None, Any]] + doc_count: int + key_as_string: str + doc_count_error_upper_bound: int + + +class NestedAggregate(AttrDict[Any]): + """ + :arg doc_count: (required) + :arg meta: + """ + + doc_count: int + meta: Mapping[str, Any] + + +class NestedIdentity(AttrDict[Any]): + """ + :arg field: (required) + :arg offset: (required) + :arg _nested: + """ + + field: str + offset: int + _nested: "NestedIdentity" + + +class ParentAggregate(AttrDict[Any]): + """ + :arg doc_count: (required) + :arg meta: + """ + + doc_count: int + meta: Mapping[str, Any] + + +class PercentilesBucketAggregate(AttrDict[Any]): + """ + :arg values: (required) + :arg meta: + """ + + values: Union[Mapping[str, Union[str, int, None]], Sequence["ArrayPercentilesItem"]] + meta: Mapping[str, Any] + + +class PhraseSuggest(AttrDict[Any]): + """ + :arg options: (required) + :arg length: (required) + :arg offset: (required) + :arg text: (required) + """ + + options: Sequence["PhraseSuggestOption"] + length: int + offset: int + text: str + + +class PhraseSuggestOption(AttrDict[Any]): + """ + :arg text: (required) + :arg score: (required) + :arg highlighted: + :arg collate_match: + """ + + text: str + score: float + highlighted: str + collate_match: bool + + +class Profile(AttrDict[Any]): + """ + :arg shards: (required) + """ + + shards: Sequence["ShardProfile"] + + +class QueryBreakdown(AttrDict[Any]): + """ + :arg advance: (required) + :arg advance_count: (required) + :arg build_scorer: (required) + :arg build_scorer_count: (required) + :arg create_weight: (required) + :arg create_weight_count: (required) + :arg match: (required) + :arg match_count: (required) + :arg shallow_advance: (required) + :arg shallow_advance_count: (required) + :arg next_doc: (required) + :arg next_doc_count: (required) + :arg score: (required) + :arg score_count: (required) + :arg compute_max_score: (required) + :arg compute_max_score_count: (required) + :arg count_weight: (required) + :arg count_weight_count: (required) + :arg set_min_competitive_score: (required) + :arg set_min_competitive_score_count: (required) + """ + + advance: int + advance_count: int + build_scorer: int + build_scorer_count: int + create_weight: int + create_weight_count: int + match: int + match_count: int + shallow_advance: int + shallow_advance_count: int + next_doc: int + next_doc_count: int + score: int + score_count: int + compute_max_score: int + compute_max_score_count: int + count_weight: int + count_weight_count: int + set_min_competitive_score: int + set_min_competitive_score_count: int + + +class QueryProfile(AttrDict[Any]): + """ + :arg breakdown: (required) + :arg description: (required) + :arg time_in_nanos: (required) + :arg type: (required) + :arg children: + """ + + breakdown: "QueryBreakdown" + description: str + time_in_nanos: Any + type: str + children: Sequence["QueryProfile"] + + +class RangeAggregate(AttrDict[Any]): + """ + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + buckets: Sequence["RangeBucket"] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, "RangeBucket"]: + return self.buckets # type: ignore[return-value] + + +class RangeBucket(AttrDict[Any]): + """ + :arg doc_count: (required) + :arg from: + :arg to: + :arg from_as_string: + :arg to_as_string: + :arg key: The bucket key. Present if the aggregation is _not_ keyed + """ + + doc_count: int + from_: float + to: float + from_as_string: str + to_as_string: str + key: str + + +class RateAggregate(AttrDict[Any]): + """ + :arg value: (required) + :arg value_as_string: + :arg meta: + """ + + value: float + value_as_string: str + meta: Mapping[str, Any] + + +class Retries(AttrDict[Any]): + """ + :arg bulk: (required) + :arg search: (required) + """ + + bulk: int + search: int + + +class ReverseNestedAggregate(AttrDict[Any]): + """ + :arg doc_count: (required) + :arg meta: + """ + + doc_count: int + meta: Mapping[str, Any] + + +class SamplerAggregate(AttrDict[Any]): + """ + :arg doc_count: (required) + :arg meta: + """ + + doc_count: int + meta: Mapping[str, Any] + + +class ScriptedMetricAggregate(AttrDict[Any]): + """ + :arg value: (required) + :arg meta: + """ + + value: Any + meta: Mapping[str, Any] + + +class SearchProfile(AttrDict[Any]): + """ + :arg collector: (required) + :arg query: (required) + :arg rewrite_time: (required) + """ + + collector: Sequence["Collector"] + query: Sequence["QueryProfile"] + rewrite_time: int + + +class ShardFailure(AttrDict[Any]): + """ + :arg reason: (required) + :arg shard: (required) + :arg index: + :arg node: + :arg status: + """ + + reason: "ErrorCause" + shard: int + index: str + node: str + status: str + + +class ShardProfile(AttrDict[Any]): + """ + :arg aggregations: (required) + :arg cluster: (required) + :arg id: (required) + :arg index: (required) + :arg node_id: (required) + :arg searches: (required) + :arg shard_id: (required) + :arg dfs: + :arg fetch: + """ + + aggregations: Sequence["AggregationProfile"] + cluster: str + id: str + index: str + node_id: str + searches: Sequence["SearchProfile"] + shard_id: int + dfs: "DfsProfile" + fetch: "FetchProfile" + + +class ShardStatistics(AttrDict[Any]): + """ + :arg failed: (required) The number of shards the operation or search + attempted to run on but failed. + :arg successful: (required) The number of shards the operation or + search succeeded on. + :arg total: (required) The number of shards the operation or search + will run on overall. + :arg failures: + :arg skipped: + """ + + failed: int + successful: int + total: int + failures: Sequence["ShardFailure"] + skipped: int + + +class SignificantLongTermsAggregate(AttrDict[Any]): + """ + :arg bg_count: + :arg doc_count: + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + bg_count: int + doc_count: int + buckets: Sequence["SignificantLongTermsBucket"] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, "SignificantLongTermsBucket"]: + return self.buckets # type: ignore[return-value] + + +class SignificantLongTermsBucket(AttrDict[Any]): + """ + :arg key: (required) + :arg score: (required) + :arg bg_count: (required) + :arg doc_count: (required) + :arg key_as_string: + """ + + key: int + score: float + bg_count: int + doc_count: int + key_as_string: str + + +class SignificantStringTermsAggregate(AttrDict[Any]): + """ + :arg bg_count: + :arg doc_count: + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + bg_count: int + doc_count: int + buckets: Sequence["SignificantStringTermsBucket"] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, "SignificantStringTermsBucket"]: + return self.buckets # type: ignore[return-value] + + +class SignificantStringTermsBucket(AttrDict[Any]): + """ + :arg key: (required) + :arg score: (required) + :arg bg_count: (required) + :arg doc_count: (required) + """ + + key: str + score: float + bg_count: int + doc_count: int + + +class SimpleValueAggregate(AttrDict[Any]): + """ + :arg value: (required) The metric value. A missing value generally + means that there was no data to aggregate, unless specified + otherwise. + :arg value_as_string: + :arg meta: + """ + + value: Union[float, None] + value_as_string: str + meta: Mapping[str, Any] + + +class StandardDeviationBounds(AttrDict[Any]): + """ + :arg upper: (required) + :arg lower: (required) + :arg upper_population: (required) + :arg lower_population: (required) + :arg upper_sampling: (required) + :arg lower_sampling: (required) + """ + + upper: Union[float, None] + lower: Union[float, None] + upper_population: Union[float, None] + lower_population: Union[float, None] + upper_sampling: Union[float, None] + lower_sampling: Union[float, None] + + +class StandardDeviationBoundsAsString(AttrDict[Any]): + """ + :arg upper: (required) + :arg lower: (required) + :arg upper_population: (required) + :arg lower_population: (required) + :arg upper_sampling: (required) + :arg lower_sampling: (required) + """ + + upper: str + lower: str + upper_population: str + lower_population: str + upper_sampling: str + lower_sampling: str + + +class StatsAggregate(AttrDict[Any]): + """ + Statistics aggregation result. `min`, `max` and `avg` are missing if + there were no values to process (`count` is zero). + + :arg count: (required) + :arg min: (required) + :arg max: (required) + :arg avg: (required) + :arg sum: (required) + :arg min_as_string: + :arg max_as_string: + :arg avg_as_string: + :arg sum_as_string: + :arg meta: + """ + + count: int + min: Union[float, None] + max: Union[float, None] + avg: Union[float, None] + sum: float + min_as_string: str + max_as_string: str + avg_as_string: str + sum_as_string: str + meta: Mapping[str, Any] + + +class StatsBucketAggregate(AttrDict[Any]): + """ + :arg count: (required) + :arg min: (required) + :arg max: (required) + :arg avg: (required) + :arg sum: (required) + :arg min_as_string: + :arg max_as_string: + :arg avg_as_string: + :arg sum_as_string: + :arg meta: + """ + + count: int + min: Union[float, None] + max: Union[float, None] + avg: Union[float, None] + sum: float + min_as_string: str + max_as_string: str + avg_as_string: str + sum_as_string: str + meta: Mapping[str, Any] + + +class StringRareTermsAggregate(AttrDict[Any]): + """ + Result of the `rare_terms` aggregation when the field is a string. + + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + buckets: Sequence["StringRareTermsBucket"] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, "StringRareTermsBucket"]: + return self.buckets # type: ignore[return-value] + + +class StringRareTermsBucket(AttrDict[Any]): + """ + :arg key: (required) + :arg doc_count: (required) + """ + + key: str + doc_count: int + + +class StringStatsAggregate(AttrDict[Any]): + """ + :arg count: (required) + :arg min_length: (required) + :arg max_length: (required) + :arg avg_length: (required) + :arg entropy: (required) + :arg distribution: + :arg min_length_as_string: + :arg max_length_as_string: + :arg avg_length_as_string: + :arg meta: + """ + + count: int + min_length: Union[int, None] + max_length: Union[int, None] + avg_length: Union[float, None] + entropy: Union[float, None] + distribution: Union[Mapping[str, float], None] + min_length_as_string: str + max_length_as_string: str + avg_length_as_string: str + meta: Mapping[str, Any] + + +class StringTermsAggregate(AttrDict[Any]): + """ + Result of a `terms` aggregation when the field is a string. + + :arg doc_count_error_upper_bound: + :arg sum_other_doc_count: + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + doc_count_error_upper_bound: int + sum_other_doc_count: int + buckets: Sequence["StringTermsBucket"] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, "StringTermsBucket"]: + return self.buckets # type: ignore[return-value] + + +class StringTermsBucket(AttrDict[Any]): + """ + :arg key: (required) + :arg doc_count: (required) + :arg doc_count_error_upper_bound: + """ + + key: Union[int, float, str, bool, None, Any] + doc_count: int + doc_count_error_upper_bound: int + + +class SumAggregate(AttrDict[Any]): + """ + Sum aggregation result. `value` is always present and is zero if there + were no values to process. + + :arg value: (required) The metric value. A missing value generally + means that there was no data to aggregate, unless specified + otherwise. + :arg value_as_string: + :arg meta: + """ + + value: Union[float, None] + value_as_string: str + meta: Mapping[str, Any] + + +class TDigestPercentileRanksAggregate(AttrDict[Any]): + """ + :arg values: (required) + :arg meta: + """ + + values: Union[Mapping[str, Union[str, int, None]], Sequence["ArrayPercentilesItem"]] + meta: Mapping[str, Any] + + +class TDigestPercentilesAggregate(AttrDict[Any]): + """ + :arg values: (required) + :arg meta: + """ + + values: Union[Mapping[str, Union[str, int, None]], Sequence["ArrayPercentilesItem"]] + meta: Mapping[str, Any] + + +class TTestAggregate(AttrDict[Any]): + """ + :arg value: (required) + :arg value_as_string: + :arg meta: + """ + + value: Union[float, None] + value_as_string: str + meta: Mapping[str, Any] + + +class TermSuggest(AttrDict[Any]): + """ + :arg options: (required) + :arg length: (required) + :arg offset: (required) + :arg text: (required) + """ + + options: Sequence["TermSuggestOption"] + length: int + offset: int + text: str + + +class TermSuggestOption(AttrDict[Any]): + """ + :arg text: (required) + :arg score: (required) + :arg freq: (required) + :arg highlighted: + :arg collate_match: + """ + + text: str + score: float + freq: int + highlighted: str + collate_match: bool + + +class TimeSeriesAggregate(AttrDict[Any]): + """ + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + buckets: Sequence["TimeSeriesBucket"] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, "TimeSeriesBucket"]: + return self.buckets # type: ignore[return-value] + + +class TimeSeriesBucket(AttrDict[Any]): + """ + :arg key: (required) + :arg doc_count: (required) + """ + + key: Mapping[str, Union[int, float, str, bool, None, Any]] + doc_count: int + + +class TopHitsAggregate(AttrDict[Any]): + """ + :arg hits: (required) + :arg meta: + """ + + hits: "HitsMetadata" + meta: Mapping[str, Any] + + +class TopMetrics(AttrDict[Any]): + """ + :arg sort: (required) + :arg metrics: (required) + """ + + sort: Sequence[Union[Union[int, float, str, bool, None, Any], None]] + metrics: Mapping[str, Union[Union[int, float, str, bool, None, Any], None]] + + +class TopMetricsAggregate(AttrDict[Any]): + """ + :arg top: (required) + :arg meta: + """ + + top: Sequence["TopMetrics"] + meta: Mapping[str, Any] + + +class TotalHits(AttrDict[Any]): + """ + :arg relation: (required) + :arg value: (required) + """ + + relation: Literal["eq", "gte"] + value: int + + +class UnmappedRareTermsAggregate(AttrDict[Any]): + """ + Result of a `rare_terms` aggregation when the field is unmapped. + `buckets` is always empty. + + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + buckets: Sequence[Any] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, Any]: + return self.buckets # type: ignore[return-value] + + +class UnmappedSamplerAggregate(AttrDict[Any]): + """ + :arg doc_count: (required) + :arg meta: + """ + + doc_count: int + meta: Mapping[str, Any] + + +class UnmappedSignificantTermsAggregate(AttrDict[Any]): + """ + Result of the `significant_terms` aggregation on an unmapped field. + `buckets` is always empty. + + :arg bg_count: + :arg doc_count: + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + bg_count: int + doc_count: int + buckets: Sequence[Any] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, Any]: + return self.buckets # type: ignore[return-value] + + +class UnmappedTermsAggregate(AttrDict[Any]): + """ + Result of a `terms` aggregation when the field is unmapped. `buckets` + is always empty. + + :arg doc_count_error_upper_bound: + :arg sum_other_doc_count: + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + doc_count_error_upper_bound: int + sum_other_doc_count: int + buckets: Sequence[Any] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, Any]: + return self.buckets # type: ignore[return-value] + + +class ValueCountAggregate(AttrDict[Any]): + """ + Value count aggregation result. `value` is always present. + + :arg value: (required) The metric value. A missing value generally + means that there was no data to aggregate, unless specified + otherwise. + :arg value_as_string: + :arg meta: + """ + + value: Union[float, None] + value_as_string: str + meta: Mapping[str, Any] + + +class VariableWidthHistogramAggregate(AttrDict[Any]): + """ + :arg buckets: (required) the aggregation buckets as a list + :arg meta: + """ + + buckets: Sequence["VariableWidthHistogramBucket"] + meta: Mapping[str, Any] + + @property + def buckets_as_dict(self) -> Mapping[str, "VariableWidthHistogramBucket"]: + return self.buckets # type: ignore[return-value] + + +class VariableWidthHistogramBucket(AttrDict[Any]): + """ + :arg min: (required) + :arg key: (required) + :arg max: (required) + :arg doc_count: (required) + :arg min_as_string: + :arg key_as_string: + :arg max_as_string: + """ + + min: float + key: float + max: float + doc_count: int + min_as_string: str + key_as_string: str + max_as_string: str + + +class WeightedAvgAggregate(AttrDict[Any]): + """ + Weighted average aggregation result. `value` is missing if the weight + was set to zero. + + :arg value: (required) The metric value. A missing value generally + means that there was no data to aggregate, unless specified + otherwise. + :arg value_as_string: + :arg meta: + """ + + value: Union[float, None] + value_as_string: str + meta: Mapping[str, Any] diff --git a/elasticsearch/dsl/update_by_query.py b/elasticsearch/dsl/update_by_query.py new file mode 100644 index 000000000..fdff22bc8 --- /dev/null +++ b/elasticsearch/dsl/update_by_query.py @@ -0,0 +1,19 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from ._async.update_by_query import AsyncUpdateByQuery # noqa: F401 +from ._sync.update_by_query import UpdateByQuery # noqa: F401 diff --git a/elasticsearch/dsl/update_by_query_base.py b/elasticsearch/dsl/update_by_query_base.py new file mode 100644 index 000000000..e4490ddf6 --- /dev/null +++ b/elasticsearch/dsl/update_by_query_base.py @@ -0,0 +1,149 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import Any, Dict, Type + +from typing_extensions import Self + +from .query import Bool, Q +from .response import UpdateByQueryResponse +from .search_base import ProxyDescriptor, QueryProxy, Request +from .utils import _R, recursive_to_dict + + +class UpdateByQueryBase(Request[_R]): + query = ProxyDescriptor[Self]("query") + + def __init__(self, **kwargs: Any): + """ + Update by query request to elasticsearch. + + :arg using: `Elasticsearch` instance to use + :arg index: limit the search to index + :arg doc_type: only query this type. + + All the parameters supplied (or omitted) at creation type can be later + overridden by methods (`using`, `index` and `doc_type` respectively). + + """ + super().__init__(**kwargs) + self._response_class = UpdateByQueryResponse[_R] + self._script: Dict[str, Any] = {} + self._query_proxy = QueryProxy(self, "query") + + def filter(self, *args: Any, **kwargs: Any) -> Self: + return self.query(Bool(filter=[Q(*args, **kwargs)])) + + def exclude(self, *args: Any, **kwargs: Any) -> Self: + return self.query(Bool(filter=[~Q(*args, **kwargs)])) + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> Self: + """ + Construct a new `UpdateByQuery` instance from a raw dict containing the search + body. Useful when migrating from raw dictionaries. + + Example:: + + ubq = UpdateByQuery.from_dict({ + "query": { + "bool": { + "must": [...] + } + }, + "script": {...} + }) + ubq = ubq.filter('term', published=True) + """ + u = cls() + u.update_from_dict(d) + return u + + def _clone(self) -> Self: + """ + Return a clone of the current search request. Performs a shallow copy + of all the underlying objects. Used internally by most state modifying + APIs. + """ + ubq = super()._clone() + + ubq._response_class = self._response_class + ubq._script = self._script.copy() + ubq.query._proxied = self.query._proxied + return ubq + + def response_class(self, cls: Type[UpdateByQueryResponse[_R]]) -> Self: + """ + Override the default wrapper used for the response. + """ + ubq = self._clone() + ubq._response_class = cls + return ubq + + def update_from_dict(self, d: Dict[str, Any]) -> Self: + """ + Apply options from a serialized body to the current instance. Modifies + the object in-place. Used mostly by ``from_dict``. + """ + d = d.copy() + if "query" in d: + self.query._proxied = Q(d.pop("query")) + if "script" in d: + self._script = d.pop("script") + self._extra.update(d) + return self + + def script(self, **kwargs: Any) -> Self: + """ + Define update action to take: + https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting-using.html + for more details. + + Note: the API only accepts a single script, so + calling the script multiple times will overwrite. + + Example:: + + ubq = Search() + ubq = ubq.script(source="ctx._source.likes++"") + ubq = ubq.script(source="ctx._source.likes += params.f"", + lang="expression", + params={'f': 3}) + """ + ubq = self._clone() + if ubq._script: + ubq._script = {} + ubq._script.update(kwargs) + return ubq + + def to_dict(self, **kwargs: Any) -> Dict[str, Any]: + """ + Serialize the search into the dictionary that will be sent over as the + request'ubq body. + + All additional keyword arguments will be included into the dictionary. + """ + d = {} + if self.query: + d["query"] = self.query.to_dict() + + if self._script: + d["script"] = self._script + + d.update(recursive_to_dict(self._extra)) + d.update(recursive_to_dict(kwargs)) + return d diff --git a/elasticsearch/dsl/utils.py b/elasticsearch/dsl/utils.py new file mode 100644 index 000000000..b52ec63a0 --- /dev/null +++ b/elasticsearch/dsl/utils.py @@ -0,0 +1,687 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +import collections.abc +from copy import copy +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ClassVar, + Dict, + Generic, + Iterable, + Iterator, + List, + Mapping, + Optional, + Tuple, + Type, + Union, + cast, +) + +from elastic_transport.client_utils import DEFAULT +from typing_extensions import Self, TypeAlias, TypeVar + +from .exceptions import UnknownDslObject, ValidationException + +if TYPE_CHECKING: + from elastic_transport import ObjectApiResponse + + from elasticsearch import AsyncElasticsearch, Elasticsearch + + from .document_base import DocumentOptions + from .field import Field + from .index_base import IndexBase + from .response import Hit # noqa: F401 + from .types import Hit as HitBaseType + +UsingType: TypeAlias = Union[str, "Elasticsearch"] +AsyncUsingType: TypeAlias = Union[str, "AsyncElasticsearch"] +AnyUsingType: TypeAlias = Union[str, "Elasticsearch", "AsyncElasticsearch"] + +_ValT = TypeVar("_ValT") # used by AttrDict +_R = TypeVar("_R", default="Hit") # used by Search and Response classes + +SKIP_VALUES = ("", None) +EXPAND__TO_DOT = True + +DOC_META_FIELDS = frozenset( + ( + "id", + "routing", + ) +) + +META_FIELDS = frozenset( + ( + # Elasticsearch metadata fields, except 'type' + "index", + "using", + "score", + "version", + "seq_no", + "primary_term", + ) +).union(DOC_META_FIELDS) + + +def _wrap(val: Any, obj_wrapper: Optional[Callable[[Any], Any]] = None) -> Any: + if isinstance(val, dict): + return AttrDict(val) if obj_wrapper is None else obj_wrapper(val) + if isinstance(val, list): + return AttrList(val) + return val + + +def _recursive_to_dict(value: Any) -> Any: + if hasattr(value, "to_dict"): + return value.to_dict() + elif isinstance(value, dict) or isinstance(value, AttrDict): + return {k: _recursive_to_dict(v) for k, v in value.items()} + elif isinstance(value, list) or isinstance(value, AttrList): + return [recursive_to_dict(elem) for elem in value] + else: + return value + + +class AttrList(Generic[_ValT]): + def __init__( + self, l: List[_ValT], obj_wrapper: Optional[Callable[[_ValT], Any]] = None + ): + # make iterables into lists + if not isinstance(l, list): + l = list(l) + self._l_ = l + self._obj_wrapper = obj_wrapper + + def __repr__(self) -> str: + return repr(self._l_) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, AttrList): + return other._l_ == self._l_ + # make sure we still equal to a dict with the same data + return bool(other == self._l_) + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __getitem__(self, k: Union[int, slice]) -> Any: + l = self._l_[k] + if isinstance(k, slice): + return AttrList[_ValT](l, obj_wrapper=self._obj_wrapper) # type: ignore[arg-type] + return _wrap(l, self._obj_wrapper) + + def __setitem__(self, k: int, value: _ValT) -> None: + self._l_[k] = value + + def __iter__(self) -> Iterator[Any]: + return map(lambda i: _wrap(i, self._obj_wrapper), self._l_) + + def __len__(self) -> int: + return len(self._l_) + + def __nonzero__(self) -> bool: + return bool(self._l_) + + __bool__ = __nonzero__ + + def __getattr__(self, name: str) -> Any: + return getattr(self._l_, name) + + def __getstate__(self) -> Tuple[List[_ValT], Optional[Callable[[_ValT], Any]]]: + return self._l_, self._obj_wrapper + + def __setstate__( + self, state: Tuple[List[_ValT], Optional[Callable[[_ValT], Any]]] + ) -> None: + self._l_, self._obj_wrapper = state + + def to_list(self) -> List[_ValT]: + return self._l_ + + +class AttrDict(Generic[_ValT]): + """ + Helper class to provide attribute like access (read and write) to + dictionaries. Used to provide a convenient way to access both results and + nested dsl dicts. + """ + + _d_: Dict[str, _ValT] + RESERVED: Dict[str, str] = {"from_": "from"} + + def __init__(self, d: Dict[str, _ValT]): + # assign the inner dict manually to prevent __setattr__ from firing + super().__setattr__("_d_", d) + + def __contains__(self, key: object) -> bool: + return key in self._d_ + + def __nonzero__(self) -> bool: + return bool(self._d_) + + __bool__ = __nonzero__ + + def __dir__(self) -> List[str]: + # introspection for auto-complete in IPython etc + return list(self._d_.keys()) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, AttrDict): + return other._d_ == self._d_ + # make sure we still equal to a dict with the same data + return bool(other == self._d_) + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __repr__(self) -> str: + r = repr(self._d_) + if len(r) > 60: + r = r[:60] + "...}" + return r + + def __getstate__(self) -> Tuple[Dict[str, _ValT]]: + return (self._d_,) + + def __setstate__(self, state: Tuple[Dict[str, _ValT]]) -> None: + super().__setattr__("_d_", state[0]) + + def __getattr__(self, attr_name: str) -> Any: + try: + return self.__getitem__(attr_name) + except KeyError: + raise AttributeError( + f"{self.__class__.__name__!r} object has no attribute {attr_name!r}" + ) + + def __delattr__(self, attr_name: str) -> None: + try: + del self._d_[self.RESERVED.get(attr_name, attr_name)] + except KeyError: + raise AttributeError( + f"{self.__class__.__name__!r} object has no attribute {attr_name!r}" + ) + + def __getitem__(self, key: str) -> Any: + return _wrap(self._d_[self.RESERVED.get(key, key)]) + + def __setitem__(self, key: str, value: _ValT) -> None: + self._d_[self.RESERVED.get(key, key)] = value + + def __delitem__(self, key: str) -> None: + del self._d_[self.RESERVED.get(key, key)] + + def __setattr__(self, name: str, value: _ValT) -> None: + # the __orig__class__ attribute has to be treated as an exception, as + # is it added to an object when it is instantiated with type arguments + if ( + name in self._d_ or not hasattr(self.__class__, name) + ) and name != "__orig_class__": + self._d_[self.RESERVED.get(name, name)] = value + else: + # there is an attribute on the class (could be property, ..) - don't add it as field + super().__setattr__(name, value) + + def __iter__(self) -> Iterator[str]: + return iter(self._d_) + + def to_dict(self, recursive: bool = False) -> Dict[str, _ValT]: + return cast( + Dict[str, _ValT], _recursive_to_dict(self._d_) if recursive else self._d_ + ) + + def keys(self) -> Iterable[str]: + return self._d_.keys() + + def items(self) -> Iterable[Tuple[str, _ValT]]: + return self._d_.items() + + +class DslMeta(type): + """ + Base Metaclass for DslBase subclasses that builds a registry of all classes + for given DslBase subclass (== all the query types for the Query subclass + of DslBase). + + It then uses the information from that registry (as well as `name` and + `shortcut` attributes from the base class) to construct any subclass based + on it's name. + + For typical use see `QueryMeta` and `Query` in `elasticsearch.dsl.query`. + """ + + name: str + _classes: Dict[str, type] + _type_name: str + _types: ClassVar[Dict[str, Type["DslBase"]]] = {} + + def __init__(cls, name: str, bases: Tuple[type, ...], attrs: Dict[str, Any]): + super().__init__(name, bases, attrs) + # skip for DslBase + if not hasattr(cls, "_type_shortcut"): + return + if not cls.name: + # abstract base class, register it's shortcut + cls._types[cls._type_name] = cls._type_shortcut + # and create a registry for subclasses + if not hasattr(cls, "_classes"): + cls._classes = {} + elif cls.name not in cls._classes: + # normal class, register it + cls._classes[cls.name] = cls + + @classmethod + def get_dsl_type(cls, name: str) -> Type["DslBase"]: + try: + return cls._types[name] + except KeyError: + raise UnknownDslObject(f"DSL type {name} does not exist.") + + +class DslBase(metaclass=DslMeta): + """ + Base class for all DSL objects - queries, filters, aggregations etc. Wraps + a dictionary representing the object's json. + + Provides several feature: + - attribute access to the wrapped dictionary (.field instead of ['field']) + - _clone method returning a copy of self + - to_dict method to serialize into dict (to be sent via elasticsearch-py) + - basic logical operators (&, | and ~) using a Bool(Filter|Query) TODO: + move into a class specific for Query/Filter + - respects the definition of the class and (de)serializes it's + attributes based on the `_param_defs` definition (for example turning + all values in the `must` attribute into Query objects) + """ + + _param_defs: ClassVar[Dict[str, Dict[str, Union[str, bool]]]] = {} + + @classmethod + def get_dsl_class( + cls: Type[Self], name: str, default: Optional[str] = None + ) -> Type[Self]: + try: + return cls._classes[name] + except KeyError: + if default is not None: + return cls._classes[default] + raise UnknownDslObject( + f"DSL class `{name}` does not exist in {cls._type_name}." + ) + + def __init__(self, _expand__to_dot: Optional[bool] = None, **params: Any) -> None: + if _expand__to_dot is None: + _expand__to_dot = EXPAND__TO_DOT + self._params: Dict[str, Any] = {} + for pname, pvalue in params.items(): + if pvalue == DEFAULT: + continue + # expand "__" to dots + if "__" in pname and _expand__to_dot: + pname = pname.replace("__", ".") + # convert instrumented fields to string + if type(pvalue).__name__ == "InstrumentedField": + pvalue = str(pvalue) + self._setattr(pname, pvalue) + + def _repr_params(self) -> str: + """Produce a repr of all our parameters to be used in __repr__.""" + return ", ".join( + f"{n.replace('.', '__')}={v!r}" + for (n, v) in sorted(self._params.items()) + # make sure we don't include empty typed params + if "type" not in self._param_defs.get(n, {}) or v + ) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self._repr_params()})" + + def __eq__(self, other: Any) -> bool: + return isinstance(other, self.__class__) and other.to_dict() == self.to_dict() + + def __ne__(self, other: Any) -> bool: + return not self == other + + def __setattr__(self, name: str, value: Any) -> None: + if name.startswith("_"): + return super().__setattr__(name, value) + return self._setattr(name, value) + + def _setattr(self, name: str, value: Any) -> None: + # if this attribute has special type assigned to it... + name = AttrDict.RESERVED.get(name, name) + if name in self._param_defs: + pinfo = self._param_defs[name] + + if "type" in pinfo: + # get the shortcut used to construct this type (query.Q, aggs.A, etc) + shortcut = self.__class__.get_dsl_type(str(pinfo["type"])) + + # list of dict(name -> DslBase) + if pinfo.get("multi") and pinfo.get("hash"): + if not isinstance(value, (tuple, list)): + value = (value,) + value = list( + {k: shortcut(v) for (k, v) in obj.items()} for obj in value + ) + elif pinfo.get("multi"): + if not isinstance(value, (tuple, list)): + value = (value,) + value = list(map(shortcut, value)) + + # dict(name -> DslBase), make sure we pickup all the objs + elif pinfo.get("hash"): + value = {k: shortcut(v) for (k, v) in value.items()} + + # single value object, just convert + else: + value = shortcut(value) + self._params[name] = value + + def __getattr__(self, name: str) -> Any: + if name.startswith("_"): + raise AttributeError( + f"{self.__class__.__name__!r} object has no attribute {name!r}" + ) + + value = None + try: + value = self._params[name] + except KeyError: + # compound types should never throw AttributeError and return empty + # container instead + if name in self._param_defs: + pinfo = self._param_defs[name] + if pinfo.get("multi"): + value = self._params.setdefault(name, []) + elif pinfo.get("hash"): + value = self._params.setdefault(name, {}) + if value is None: + raise AttributeError( + f"{self.__class__.__name__!r} object has no attribute {name!r}" + ) + + # wrap nested dicts in AttrDict for convenient access + if isinstance(value, dict): + return AttrDict(value) + return value + + def to_dict(self) -> Dict[str, Any]: + """ + Serialize the DSL object to plain dict + """ + d = {} + for pname, value in self._params.items(): + pinfo = self._param_defs.get(pname) + + # typed param + if pinfo and "type" in pinfo: + # don't serialize empty lists and dicts for typed fields + if value in ({}, []): + continue + + # list of dict(name -> DslBase) + if pinfo.get("multi") and pinfo.get("hash"): + value = list( + {k: v.to_dict() for k, v in obj.items()} for obj in value + ) + + # multi-values are serialized as list of dicts + elif pinfo.get("multi"): + value = list(map(lambda x: x.to_dict(), value)) + + # squash all the hash values into one dict + elif pinfo.get("hash"): + value = {k: v.to_dict() for k, v in value.items()} + + # serialize single values + else: + value = value.to_dict() + + # serialize anything with to_dict method + elif hasattr(value, "to_dict"): + value = value.to_dict() + + d[pname] = value + return {self.name: d} + + def _clone(self) -> Self: + c = self.__class__() + for attr in self._params: + c._params[attr] = copy(self._params[attr]) + return c + + +if TYPE_CHECKING: + HitMetaBase = HitBaseType +else: + HitMetaBase = AttrDict[Any] + + +class HitMeta(HitMetaBase): + inner_hits: Mapping[str, Any] + + def __init__( + self, + document: Dict[str, Any], + exclude: Tuple[str, ...] = ("_source", "_fields"), + ): + d = { + k[1:] if k.startswith("_") else k: v + for (k, v) in document.items() + if k not in exclude + } + if "type" in d: + # make sure we are consistent everywhere in python + d["doc_type"] = d.pop("type") + super().__init__(d) + + +class ObjectBase(AttrDict[Any]): + _doc_type: "DocumentOptions" + _index: "IndexBase" + meta: HitMeta + + def __init__(self, meta: Optional[Dict[str, Any]] = None, **kwargs: Any): + meta = meta or {} + for k in list(kwargs): + if k.startswith("_") and k[1:] in META_FIELDS: + meta[k] = kwargs.pop(k) + + super(AttrDict, self).__setattr__("meta", HitMeta(meta)) + + # process field defaults + if hasattr(self, "_defaults"): + for name in self._defaults: + if name not in kwargs: + value = self._defaults[name] + if callable(value): + value = value() + kwargs[name] = value + + super().__init__(kwargs) + + @classmethod + def __list_fields(cls) -> Iterator[Tuple[str, "Field", bool]]: + """ + Get all the fields defined for our class, if we have an Index, try + looking at the index mappings as well, mark the fields from Index as + optional. + """ + for name in cls._doc_type.mapping: + field = cls._doc_type.mapping[name] + yield name, field, False + + if hasattr(cls.__class__, "_index"): + if not cls._index._mapping: + return + for name in cls._index._mapping: + # don't return fields that are in _doc_type + if name in cls._doc_type.mapping: + continue + field = cls._index._mapping[name] + yield name, field, True + + @classmethod + def __get_field(cls, name: str) -> Optional["Field"]: + try: + return cls._doc_type.mapping[name] + except KeyError: + # fallback to fields on the Index + if hasattr(cls, "_index") and cls._index._mapping: + try: + return cls._index._mapping[name] + except KeyError: + pass + return None + + @classmethod + def from_es(cls, hit: Union[Dict[str, Any], "ObjectApiResponse[Any]"]) -> Self: + meta = hit.copy() + data = meta.pop("_source", {}) + doc = cls(meta=meta) + doc._from_dict(data) + return doc + + def _from_dict(self, data: Dict[str, Any]) -> None: + for k, v in data.items(): + f = self.__get_field(k) + if f and f._coerce: + v = f.deserialize(v) + setattr(self, k, v) + + def __getstate__(self) -> Tuple[Dict[str, Any], Dict[str, Any]]: # type: ignore[override] + return self.to_dict(), self.meta._d_ + + def __setstate__(self, state: Tuple[Dict[str, Any], Dict[str, Any]]) -> None: # type: ignore[override] + data, meta = state + super(AttrDict, self).__setattr__("_d_", {}) + super(AttrDict, self).__setattr__("meta", HitMeta(meta)) + self._from_dict(data) + + def __getattr__(self, name: str) -> Any: + try: + return super().__getattr__(name) + except AttributeError: + f = self.__get_field(name) + if f is not None and hasattr(f, "empty"): + value = f.empty() + if value not in SKIP_VALUES: + setattr(self, name, value) + value = getattr(self, name) + return value + raise + + def __setattr__(self, name: str, value: Any) -> None: + if name in self.__class__._doc_type.mapping: + self._d_[name] = value + else: + super().__setattr__(name, value) + + def to_dict(self, skip_empty: bool = True) -> Dict[str, Any]: + out = {} + for k, v in self._d_.items(): + # if this is a mapped field, + f = self.__get_field(k) + if f and f._coerce: + v = f.serialize(v) + + # if someone assigned AttrList, unwrap it + if isinstance(v, AttrList): + v = v._l_ + + if skip_empty: + # don't serialize empty values + # careful not to include numeric zeros + if v in ([], {}, None): + continue + + out[k] = v + return out + + def clean_fields(self, validate: bool = True) -> None: + errors: Dict[str, List[ValidationException]] = {} + for name, field, optional in self.__list_fields(): + data = self._d_.get(name, None) + if data is None and optional: + continue + try: + # save the cleaned value + data = field.clean(data) + except ValidationException as e: + errors.setdefault(name, []).append(e) + + if name in self._d_ or data not in ([], {}, None): + self._d_[name] = cast(Any, data) + + if validate and errors: + raise ValidationException(errors) + + def clean(self) -> None: + pass + + def full_clean(self) -> None: + self.clean_fields(validate=False) + self.clean() + self.clean_fields(validate=True) + + +def merge( + data: Union[Dict[str, Any], AttrDict[Any]], + new_data: Union[Dict[str, Any], AttrDict[Any]], + raise_on_conflict: bool = False, +) -> None: + if not ( + isinstance(data, (AttrDict, collections.abc.Mapping)) + and isinstance(new_data, (AttrDict, collections.abc.Mapping)) + ): + raise ValueError( + f"You can only merge two dicts! Got {data!r} and {new_data!r} instead." + ) + + for key, value in new_data.items(): + if ( + key in data + and isinstance(data[key], (AttrDict, collections.abc.Mapping)) + and isinstance(value, (AttrDict, collections.abc.Mapping)) + ): + merge(data[key], value, raise_on_conflict) # type: ignore[arg-type] + elif key in data and data[key] != value and raise_on_conflict: + raise ValueError(f"Incompatible data for key {key!r}, cannot be merged.") + else: + data[key] = value + + +def recursive_to_dict(data: Any) -> Any: + """Recursively transform objects that potentially have .to_dict() + into dictionary literals by traversing AttrList, AttrDict, list, + tuple, and Mapping types. + """ + if isinstance(data, AttrList): + data = list(data._l_) + elif hasattr(data, "to_dict"): + data = data.to_dict() + if isinstance(data, (list, tuple)): + return type(data)(recursive_to_dict(inner) for inner in data) + elif isinstance(data, dict): + return {key: recursive_to_dict(val) for key, val in data.items()} + return data diff --git a/elasticsearch/dsl/wrappers.py b/elasticsearch/dsl/wrappers.py new file mode 100644 index 000000000..ecd2e1363 --- /dev/null +++ b/elasticsearch/dsl/wrappers.py @@ -0,0 +1,119 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import operator +from typing import ( + TYPE_CHECKING, + Callable, + ClassVar, + Dict, + Literal, + Mapping, + Optional, + Tuple, + TypeVar, + Union, + cast, +) + +if TYPE_CHECKING: + from _operator import _SupportsComparison + +from typing_extensions import TypeAlias + +from .utils import AttrDict + +ComparisonOperators: TypeAlias = Literal["lt", "lte", "gt", "gte"] +RangeValT = TypeVar("RangeValT", bound="_SupportsComparison") + +__all__ = ["Range"] + + +class Range(AttrDict[RangeValT]): + OPS: ClassVar[ + Mapping[ + ComparisonOperators, + Callable[["_SupportsComparison", "_SupportsComparison"], bool], + ] + ] = { + "lt": operator.lt, + "lte": operator.le, + "gt": operator.gt, + "gte": operator.ge, + } + + def __init__( + self, + d: Optional[Dict[str, RangeValT]] = None, + /, + **kwargs: RangeValT, + ): + if d is not None and (kwargs or not isinstance(d, dict)): + raise ValueError( + "Range accepts a single dictionary or a set of keyword arguments." + ) + + if d is None: + data = kwargs + else: + data = d + + for k in data: + if k not in self.OPS: + raise ValueError(f"Range received an unknown operator {k!r}") + + if "gt" in data and "gte" in data: + raise ValueError("You cannot specify both gt and gte for Range.") + + if "lt" in data and "lte" in data: + raise ValueError("You cannot specify both lt and lte for Range.") + + super().__init__(data) + + def __repr__(self) -> str: + return "Range(%s)" % ", ".join("%s=%r" % op for op in self._d_.items()) + + def __contains__(self, item: object) -> bool: + if isinstance(item, str): + return super().__contains__(item) + + item_supports_comp = any(hasattr(item, f"__{op}__") for op in self.OPS) + if not item_supports_comp: + return False + + for op in self.OPS: + if op in self._d_ and not self.OPS[op]( + cast("_SupportsComparison", item), self._d_[op] + ): + return False + return True + + @property + def upper(self) -> Union[Tuple[RangeValT, bool], Tuple[None, Literal[False]]]: + if "lt" in self._d_: + return self._d_["lt"], False + if "lte" in self._d_: + return self._d_["lte"], True + return None, False + + @property + def lower(self) -> Union[Tuple[RangeValT, bool], Tuple[None, Literal[False]]]: + if "gt" in self._d_: + return self._d_["gt"], False + if "gte" in self._d_: + return self._d_["gte"], True + return None, False diff --git a/elasticsearch/helpers/actions.py b/elasticsearch/helpers/actions.py index 687bf4b84..d1a43a8dc 100644 --- a/elasticsearch/helpers/actions.py +++ b/elasticsearch/helpers/actions.py @@ -593,7 +593,7 @@ def parallel_bulk( class BlockingPool(ThreadPool): def _setup_queues(self) -> None: - super()._setup_queues() # type: ignore + super()._setup_queues() # type: ignore[misc] # The queue must be at least the size of the number of threads to # prevent hanging when inserting sentinel values during teardown. self._inqueue: Queue[ diff --git a/examples/dsl/README.rst b/examples/dsl/README.rst new file mode 100644 index 000000000..87bfe0ec0 --- /dev/null +++ b/examples/dsl/README.rst @@ -0,0 +1,47 @@ +Elasticsearch DSL Examples +========================== + +In this directory you can see several complete examples demonstrating key +concepts and patterns exposed by ``elasticsearch-dsl``. + +``alias_migration.py`` +---------------------- + +The alias migration example shows a useful pattern where we use versioned +indices (``test-blog-0``, ``test-blog-1``, ...) to manage schema changes and +hides that behind an alias so that the application doesn't have to be aware of +the versions and just refer to the ``test-blog`` alias for both read and write +operations. + +For simplicity we use a timestamp as version in the index name. + +``parent_child.py`` +------------------- + +More complex example highlighting the possible relationships available in +elasticsearch - `parent/child +`_ and +`nested +`_. + +``composite_agg.py`` +-------------------- + +A helper function using the `composite aggregation +`_ +to paginate over aggregation results. + +``percolate.py`` +---------------- + +A ``BlogPost`` document with automatic classification using the `percolator +`_ +functionality. + +``completion.py`` +----------------- + +As example using `completion suggester +`_ +to auto complete people's names. + diff --git a/examples/dsl/alias_migration.py b/examples/dsl/alias_migration.py new file mode 100644 index 000000000..24355aded --- /dev/null +++ b/examples/dsl/alias_migration.py @@ -0,0 +1,161 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +Simple example with a single Document demonstrating how schema can be managed, +including upgrading with reindexing. + +Key concepts: + + * setup() function to first initialize the schema (as index template) in + elasticsearch. Can be called any time (recommended with every deploy of + your app). + + * migrate() function to be called any time when the schema changes - it + will create a new index (by incrementing the version) and update the alias. + By default it will also (before flipping the alias) move the data from the + previous index to the new one. + + * BlogPost._matches() class method is required for this code to work since + otherwise BlogPost will not be used to deserialize the documents as those + will have index set to the concrete index whereas the class refers to the + alias. +""" +import os +from datetime import datetime +from fnmatch import fnmatch +from typing import TYPE_CHECKING, Any, Dict, List, Optional + +from elasticsearch.dsl import Document, Keyword, connections, mapped_field + +ALIAS = "test-blog" +PATTERN = ALIAS + "-*" +PRIORITY = 100 + + +class BlogPost(Document): + if TYPE_CHECKING: + # definitions here help type checkers understand additional arguments + # that are allowed in the constructor + _id: int + + title: str + tags: List[str] = mapped_field(Keyword()) + content: str + published: Optional[datetime] = mapped_field(default=None) + + def is_published(self) -> bool: + return bool(self.published and datetime.now() > self.published) + + @classmethod + def _matches(cls, hit: Dict[str, Any]) -> bool: + # override _matches to match indices in a pattern instead of just ALIAS + # hit is the raw dict as returned by elasticsearch + return fnmatch(hit["_index"], PATTERN) + + class Index: + # we will use an alias instead of the index + name = ALIAS + # set settings and possibly other attributes of the index like + # analyzers + settings = {"number_of_shards": 1, "number_of_replicas": 0} + + +def setup() -> None: + """ + Create the index template in elasticsearch specifying the mappings and any + settings to be used. This can be run at any time, ideally at every new code + deploy. + """ + # create an index template + index_template = BlogPost._index.as_composable_template( + ALIAS, PATTERN, priority=PRIORITY + ) + # upload the template into elasticsearch + # potentially overriding the one already there + index_template.save() + + # create the first index if it doesn't exist + if not BlogPost._index.exists(): + migrate(move_data=False) + + +def migrate(move_data: bool = True, update_alias: bool = True) -> None: + """ + Upgrade function that creates a new index for the data. Optionally it also can + (and by default will) reindex previous copy of the data into the new index + (specify ``move_data=False`` to skip this step) and update the alias to + point to the latest index (set ``update_alias=False`` to skip). + + Note that while this function is running the application can still perform + any and all searches without any loss of functionality. It should, however, + not perform any writes at this time as those might be lost. + """ + # construct a new index name by appending current timestamp + next_index = PATTERN.replace("*", datetime.now().strftime("%Y%m%d%H%M%S%f")) + + # get the low level connection + es = connections.get_connection() + + # create new index, it will use the settings from the template + es.indices.create(index=next_index) + + if move_data: + # move data from current alias to the new index + es.options(request_timeout=3600).reindex( + body={"source": {"index": ALIAS}, "dest": {"index": next_index}} + ) + # refresh the index to make the changes visible + es.indices.refresh(index=next_index) + + if update_alias: + # repoint the alias to point to the newly created index + es.indices.update_aliases( + body={ + "actions": [ + {"remove": {"alias": ALIAS, "index": PATTERN}}, + {"add": {"alias": ALIAS, "index": next_index}}, + ] + } + ) + + +def main() -> None: + # initiate the default connection to elasticsearch + connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) + + # create the empty index + setup() + + # create a new document + bp = BlogPost( + _id=0, + title="Hello World!", + tags=["testing", "dummy"], + content=open(__file__).read(), + ) + bp.save(refresh=True) + + # create new index + migrate() + + # close the connection + connections.get_connection().close() + + +if __name__ == "__main__": + main() diff --git a/examples/dsl/async/alias_migration.py b/examples/dsl/async/alias_migration.py new file mode 100644 index 000000000..94bdd63ce --- /dev/null +++ b/examples/dsl/async/alias_migration.py @@ -0,0 +1,162 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +Simple example with a single Document demonstrating how schema can be managed, +including upgrading with reindexing. + +Key concepts: + + * setup() function to first initialize the schema (as index template) in + elasticsearch. Can be called any time (recommended with every deploy of + your app). + + * migrate() function to be called any time when the schema changes - it + will create a new index (by incrementing the version) and update the alias. + By default it will also (before flipping the alias) move the data from the + previous index to the new one. + + * BlogPost._matches() class method is required for this code to work since + otherwise BlogPost will not be used to deserialize the documents as those + will have index set to the concrete index whereas the class refers to the + alias. +""" +import asyncio +import os +from datetime import datetime +from fnmatch import fnmatch +from typing import TYPE_CHECKING, Any, Dict, List, Optional + +from elasticsearch.dsl import AsyncDocument, Keyword, async_connections, mapped_field + +ALIAS = "test-blog" +PATTERN = ALIAS + "-*" +PRIORITY = 100 + + +class BlogPost(AsyncDocument): + if TYPE_CHECKING: + # definitions here help type checkers understand additional arguments + # that are allowed in the constructor + _id: int + + title: str + tags: List[str] = mapped_field(Keyword()) + content: str + published: Optional[datetime] = mapped_field(default=None) + + def is_published(self) -> bool: + return bool(self.published and datetime.now() > self.published) + + @classmethod + def _matches(cls, hit: Dict[str, Any]) -> bool: + # override _matches to match indices in a pattern instead of just ALIAS + # hit is the raw dict as returned by elasticsearch + return fnmatch(hit["_index"], PATTERN) + + class Index: + # we will use an alias instead of the index + name = ALIAS + # set settings and possibly other attributes of the index like + # analyzers + settings = {"number_of_shards": 1, "number_of_replicas": 0} + + +async def setup() -> None: + """ + Create the index template in elasticsearch specifying the mappings and any + settings to be used. This can be run at any time, ideally at every new code + deploy. + """ + # create an index template + index_template = BlogPost._index.as_composable_template( + ALIAS, PATTERN, priority=PRIORITY + ) + # upload the template into elasticsearch + # potentially overriding the one already there + await index_template.save() + + # create the first index if it doesn't exist + if not await BlogPost._index.exists(): + await migrate(move_data=False) + + +async def migrate(move_data: bool = True, update_alias: bool = True) -> None: + """ + Upgrade function that creates a new index for the data. Optionally it also can + (and by default will) reindex previous copy of the data into the new index + (specify ``move_data=False`` to skip this step) and update the alias to + point to the latest index (set ``update_alias=False`` to skip). + + Note that while this function is running the application can still perform + any and all searches without any loss of functionality. It should, however, + not perform any writes at this time as those might be lost. + """ + # construct a new index name by appending current timestamp + next_index = PATTERN.replace("*", datetime.now().strftime("%Y%m%d%H%M%S%f")) + + # get the low level connection + es = async_connections.get_connection() + + # create new index, it will use the settings from the template + await es.indices.create(index=next_index) + + if move_data: + # move data from current alias to the new index + await es.options(request_timeout=3600).reindex( + body={"source": {"index": ALIAS}, "dest": {"index": next_index}} + ) + # refresh the index to make the changes visible + await es.indices.refresh(index=next_index) + + if update_alias: + # repoint the alias to point to the newly created index + await es.indices.update_aliases( + body={ + "actions": [ + {"remove": {"alias": ALIAS, "index": PATTERN}}, + {"add": {"alias": ALIAS, "index": next_index}}, + ] + } + ) + + +async def main() -> None: + # initiate the default connection to elasticsearch + async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) + + # create the empty index + await setup() + + # create a new document + bp = BlogPost( + _id=0, + title="Hello World!", + tags=["testing", "dummy"], + content=open(__file__).read(), + ) + await bp.save(refresh=True) + + # create new index + await migrate() + + # close the connection + await async_connections.get_connection().close() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/dsl/async/completion.py b/examples/dsl/async/completion.py new file mode 100644 index 000000000..1c5929b2b --- /dev/null +++ b/examples/dsl/async/completion.py @@ -0,0 +1,114 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +Example ``Document`` with completion suggester. + +In the ``Person`` class we index the person's name to allow auto completing in +any order ("first last", "middle last first", ...). For the weight we use a +value from the ``popularity`` field which is a long. + +To make the suggestions work in different languages we added a custom analyzer +that does ascii folding. +""" + +import asyncio +import os +from itertools import permutations +from typing import TYPE_CHECKING, Any, Dict, Optional + +from elasticsearch.dsl import ( + AsyncDocument, + Completion, + Keyword, + Long, + Text, + analyzer, + async_connections, + mapped_field, + token_filter, +) + +# custom analyzer for names +ascii_fold = analyzer( + "ascii_fold", + # we don't want to split O'Brian or Toulouse-Lautrec + tokenizer="whitespace", + filter=["lowercase", token_filter("ascii_fold", "asciifolding")], +) + + +class Person(AsyncDocument): + if TYPE_CHECKING: + # definitions here help type checkers understand additional arguments + # that are allowed in the constructor + _id: Optional[int] = mapped_field(default=None) + + name: str = mapped_field(Text(fields={"keyword": Keyword()}), default="") + popularity: int = mapped_field(Long(), default=0) + + # completion field with a custom analyzer + suggest: Dict[str, Any] = mapped_field(Completion(analyzer=ascii_fold), init=False) + + def clean(self) -> None: + """ + Automatically construct the suggestion input and weight by taking all + possible permutations of Person's name as ``input`` and taking their + popularity as ``weight``. + """ + self.suggest = { + "input": [" ".join(p) for p in permutations(self.name.split())], + "weight": self.popularity, + } + + class Index: + name = "test-suggest" + settings = {"number_of_shards": 1, "number_of_replicas": 0} + + +async def main() -> None: + # initiate the default connection to elasticsearch + async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) + + # create the empty index + await Person.init() + + # index some sample data + for id, (name, popularity) in enumerate( + [("Henri de Toulouse-Lautrec", 42), ("Jára Cimrman", 124)] + ): + await Person(_id=id, name=name, popularity=popularity).save() + + # refresh index manually to make changes live + await Person._index.refresh() + + # run some suggestions + for text in ("já", "Jara Cimr", "tou", "de hen"): + s = Person.search() + s = s.suggest("auto_complete", text, completion={"field": "suggest"}) + response = await s.execute() + + # print out all the options we got + for option in response.suggest["auto_complete"][0].options: + print("%10s: %25s (%d)" % (text, option._source.name, option._score)) + + # close the connection + await async_connections.get_connection().close() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/dsl/async/composite_agg.py b/examples/dsl/async/composite_agg.py new file mode 100644 index 000000000..e6ea62cc6 --- /dev/null +++ b/examples/dsl/async/composite_agg.py @@ -0,0 +1,93 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import asyncio +import os +from typing import Any, AsyncIterator, Dict, Mapping, Sequence, cast + +from elasticsearch.dsl import Agg, AsyncSearch, Response, aggs, async_connections +from elasticsearch.dsl.types import CompositeAggregate +from elasticsearch.helpers import async_bulk +from test_elasticsearch.test_dsl.test_integration.test_data import DATA, GIT_INDEX + + +async def scan_aggs( + search: AsyncSearch, + source_aggs: Sequence[Mapping[str, Agg]], + inner_aggs: Dict[str, Agg] = {}, + size: int = 10, +) -> AsyncIterator[CompositeAggregate]: + """ + Helper function used to iterate over all possible bucket combinations of + ``source_aggs``, returning results of ``inner_aggs`` for each. Uses the + ``composite`` aggregation under the hood to perform this. + """ + + async def run_search(**kwargs: Any) -> Response: + s = search[:0] + bucket = s.aggs.bucket( + "comp", + aggs.Composite( + sources=source_aggs, + size=size, + **kwargs, + ), + ) + for agg_name, agg in inner_aggs.items(): + bucket[agg_name] = agg + return await s.execute() + + response = await run_search() + while response.aggregations["comp"].buckets: + for b in response.aggregations["comp"].buckets: + yield cast(CompositeAggregate, b) + if "after_key" in response.aggregations["comp"]: + after = response.aggregations["comp"].after_key + else: + after = response.aggregations["comp"].buckets[-1].key + response = await run_search(after=after) + + +async def main() -> None: + # initiate the default connection to elasticsearch + client = async_connections.create_connection( + hosts=[os.environ["ELASTICSEARCH_URL"]] + ) + + # create the index and populate it with some data + # note that the dataset is imported from the library's test suite + await client.indices.delete(index="git", ignore_unavailable=True) + await client.indices.create(index="git", **GIT_INDEX) + await async_bulk(client, DATA, raise_on_error=True, refresh=True) + + # run some aggregations on the data + async for b in scan_aggs( + AsyncSearch(index="git"), + [{"files": aggs.Terms(field="files")}], + {"first_seen": aggs.Min(field="committed_date")}, + ): + print( + "File %s has been modified %d times, first seen at %s." + % (b.key.files, b.doc_count, b.first_seen.value_as_string) + ) + + # close the connection + await async_connections.get_connection().close() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/dsl/async/parent_child.py b/examples/dsl/async/parent_child.py new file mode 100644 index 000000000..16dc6ebc3 --- /dev/null +++ b/examples/dsl/async/parent_child.py @@ -0,0 +1,276 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +Complex data model example modeling stackoverflow-like data. + +It is used to showcase several key features of elasticsearch-dsl: + + * Object and Nested fields: see User and Comment classes and fields they + are used in + + * method add_comment is used to add comments + + * Parent/Child relationship + + * See the Join field on Post creating the relationship between Question + and Answer + + * Meta.matches allows the hits from same index to be wrapped in proper + classes + + * to see how child objects are created see Question.add_answer + + * Question.search_answers shows how to query for children of a + particular parent + +""" +import asyncio +import os +from datetime import datetime +from typing import TYPE_CHECKING, Any, Dict, List, Optional, cast + +from elasticsearch.dsl import ( + AsyncDocument, + AsyncSearch, + Date, + InnerDoc, + Join, + Keyword, + Long, + Text, + async_connections, + mapped_field, +) + + +class User(InnerDoc): + """ + Class used to represent a denormalized user stored on other objects. + """ + + id: int = mapped_field(Long()) + signed_up: Optional[datetime] = mapped_field(Date()) + username: str = mapped_field(Text(fields={"keyword": Keyword()})) + email: Optional[str] = mapped_field(Text(fields={"keyword": Keyword()})) + location: Optional[str] = mapped_field(Text(fields={"keyword": Keyword()})) + + +class Comment(InnerDoc): + """ + Class wrapper for nested comment objects. + """ + + author: User + created: datetime + content: str + + +class Post(AsyncDocument): + """ + Base class for Question and Answer containing the common fields. + """ + + author: User + + if TYPE_CHECKING: + # definitions here help type checkers understand additional arguments + # that are allowed in the constructor + _routing: str = mapped_field(default=None) + _id: Optional[int] = mapped_field(default=None) + + created: Optional[datetime] = mapped_field(default=None) + body: str = mapped_field(default="") + comments: List[Comment] = mapped_field(default_factory=list) + question_answer: Any = mapped_field( + Join(relations={"question": "answer"}), default_factory=dict + ) + + @classmethod + def _matches(cls, hit: Dict[str, Any]) -> bool: + # Post is an abstract class, make sure it never gets used for + # deserialization + return False + + class Index: + name = "test-qa-site" + settings = { + "number_of_shards": 1, + "number_of_replicas": 0, + } + + async def add_comment( + self, + user: User, + content: str, + created: Optional[datetime] = None, + commit: Optional[bool] = True, + ) -> Comment: + c = Comment(author=user, content=content, created=created or datetime.now()) + self.comments.append(c) + if commit: + await self.save() + return c + + async def save(self, **kwargs: Any) -> None: # type: ignore[override] + # if there is no date, use now + if self.created is None: + self.created = datetime.now() + await super().save(**kwargs) + + +class Question(Post): + tags: List[str] = mapped_field( + default_factory=list + ) # .tags will return empty list if not present + title: str = mapped_field(Text(fields={"keyword": Keyword()}), default="") + + @classmethod + def _matches(cls, hit: Dict[str, Any]) -> bool: + """Use Question class for parent documents""" + return bool(hit["_source"]["question_answer"] == "question") + + @classmethod + def search(cls, **kwargs: Any) -> AsyncSearch: # type: ignore[override] + return cls._index.search(**kwargs).filter("term", question_answer="question") + + async def add_answer( + self, + user: User, + body: str, + created: Optional[datetime] = None, + accepted: bool = False, + commit: Optional[bool] = True, + ) -> "Answer": + answer = Answer( + # required make sure the answer is stored in the same shard + _routing=self.meta.id, + # set up the parent/child mapping + question_answer={"name": "answer", "parent": self.meta.id}, + # pass in the field values + author=user, + created=created, + body=body, + is_accepted=accepted, + ) + if commit: + await answer.save() + return answer + + def search_answers(self) -> AsyncSearch: + # search only our index + s = Answer.search() + # filter for answers belonging to us + s = s.filter("parent_id", type="answer", id=self.meta.id) + # add routing to only go to specific shard + s = s.params(routing=self.meta.id) + return s + + async def get_answers(self) -> List[Any]: + """ + Get answers either from inner_hits already present or by searching + elasticsearch. + """ + if "inner_hits" in self.meta and "answer" in self.meta.inner_hits: + return cast(List[Any], self.meta.inner_hits["answer"].hits) + return [a async for a in self.search_answers()] + + async def save(self, **kwargs: Any) -> None: # type: ignore[override] + self.question_answer = "question" + await super().save(**kwargs) + + +class Answer(Post): + is_accepted: bool = mapped_field(default=False) + + @classmethod + def _matches(cls, hit: Dict[str, Any]) -> bool: + """Use Answer class for child documents with child name 'answer'""" + return ( + isinstance(hit["_source"]["question_answer"], dict) + and hit["_source"]["question_answer"].get("name") == "answer" + ) + + @classmethod + def search(cls, **kwargs: Any) -> AsyncSearch: # type: ignore[override] + return cls._index.search(**kwargs).exclude("term", question_answer="question") + + async def get_question(self) -> Optional[Question]: + # cache question in self.meta + # any attributes set on self would be interpreted as fields + if "question" not in self.meta: + self.meta.question = await Question.get( + id=self.question_answer.parent, index=self.meta.index + ) + return cast(Optional[Question], self.meta.question) + + async def save(self, **kwargs: Any) -> None: # type: ignore[override] + # set routing to parents id automatically + self.meta.routing = self.question_answer.parent + await super().save(**kwargs) + + +async def setup() -> None: + """Create an IndexTemplate and save it into elasticsearch.""" + index_template = Post._index.as_composable_template("base", priority=100) + await index_template.save() + + +async def main() -> Answer: + # initiate the default connection to elasticsearch + async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) + + # create index + await setup() + + # user objects to use + nick = User( + id=47, + signed_up=datetime(2017, 4, 3), + username="fxdgear", + email="nick.lang@elastic.co", + location="Colorado", + ) + honza = User( + id=42, + signed_up=datetime(2013, 4, 3), + username="honzakral", + email="honza@elastic.co", + location="Prague", + ) + + # create a question object + question = Question( + _id=1, + author=nick, + tags=["elasticsearch", "python"], + title="How do I use elasticsearch from Python?", + body=""" + I want to use elasticsearch, how do I do it from Python? + """, + ) + await question.save() + answer = await question.add_answer(honza, "Just use `elasticsearch-py`!") + + # close the connection + await async_connections.get_connection().close() + + return answer + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/dsl/async/percolate.py b/examples/dsl/async/percolate.py new file mode 100644 index 000000000..75350bbed --- /dev/null +++ b/examples/dsl/async/percolate.py @@ -0,0 +1,117 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import asyncio +import os +from typing import TYPE_CHECKING, Any, List, Optional + +from elasticsearch.dsl import ( + AsyncDocument, + AsyncSearch, + Keyword, + Percolator, + Q, + Query, + async_connections, + mapped_field, +) + + +class BlogPost(AsyncDocument): + """ + Blog posts that will be automatically tagged based on percolation queries. + """ + + if TYPE_CHECKING: + # definitions here help type checkers understand additional arguments + # that are allowed in the constructor + _id: int + + content: Optional[str] + tags: List[str] = mapped_field(Keyword(), default_factory=list) + + class Index: + name = "test-blogpost" + + async def add_tags(self) -> None: + # run a percolation to automatically tag the blog post. + s = AsyncSearch(index="test-percolator") + s = s.query( + "percolate", field="query", index=self._get_index(), document=self.to_dict() + ) + + # collect all the tags from matched percolators + async for percolator in s: + self.tags.extend(percolator.tags) + + # make sure tags are unique + self.tags = list(set(self.tags)) + + async def save(self, **kwargs: Any) -> None: # type: ignore[override] + await self.add_tags() + await super().save(**kwargs) + + +class PercolatorDoc(AsyncDocument): + """ + Document class used for storing the percolation queries. + """ + + if TYPE_CHECKING: + _id: str + + # relevant fields from BlogPost must be also present here for the queries + # to be able to use them. Another option would be to use document + # inheritance but save() would have to be reset to normal behavior. + content: Optional[str] + + # the percolator query to be run against the doc + query: Query = mapped_field(Percolator()) + # list of tags to append to a document + tags: List[str] = mapped_field(Keyword(multi=True)) + + class Index: + name = "test-percolator" + settings = {"number_of_shards": 1, "number_of_replicas": 0} + + +async def setup() -> None: + # create the percolator index if it doesn't exist + if not await PercolatorDoc._index.exists(): + await PercolatorDoc.init() + + # register a percolation query looking for documents about python + await PercolatorDoc( + _id="python", + tags=["programming", "development", "python"], + content="", + query=Q("match", content="python"), + ).save(refresh=True) + + +async def main() -> None: + # initiate the default connection to elasticsearch + async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) + + await setup() + + # close the connection + await async_connections.get_connection().close() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/dsl/async/search_as_you_type.py b/examples/dsl/async/search_as_you_type.py new file mode 100644 index 000000000..5919f3e3f --- /dev/null +++ b/examples/dsl/async/search_as_you_type.py @@ -0,0 +1,99 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +Example ``Document`` with search_as_you_type field datatype and how to search it. + +When creating a field with search_as_you_type datatype ElasticSearch creates additional +subfields to enable efficient as-you-type completion, matching terms at any position +within the input. + +To custom analyzer with ascii folding allow search to work in different languages. +""" + +import asyncio +import os +from typing import TYPE_CHECKING, Optional + +from elasticsearch.dsl import ( + AsyncDocument, + SearchAsYouType, + async_connections, + mapped_field, +) +from elasticsearch.dsl.query import MultiMatch + + +class Person(AsyncDocument): + if TYPE_CHECKING: + # definitions here help type checkers understand additional arguments + # that are allowed in the constructor + _id: Optional[int] = mapped_field(default=None) + + name: str = mapped_field(SearchAsYouType(max_shingle_size=3), default="") + + class Index: + name = "test-search-as-you-type" + settings = {"number_of_shards": 1, "number_of_replicas": 0} + + +async def main() -> None: + # initiate the default connection to elasticsearch + async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) + + # create the empty index + await Person.init() + + import pprint + + pprint.pprint(Person().to_dict(), indent=2) + + # index some sample data + names = [ + "Andy Warhol", + "Alphonse Mucha", + "Henri de Toulouse-Lautrec", + "Jára Cimrman", + ] + for id, name in enumerate(names): + await Person(_id=id, name=name).save() + + # refresh index manually to make changes live + await Person._index.refresh() + + # run some suggestions + for text in ("já", "Cimr", "toulouse", "Henri Tou", "a"): + s = Person.search() + + s.query = MultiMatch( # type: ignore[assignment] + query=text, + type="bool_prefix", + fields=["name", "name._2gram", "name._3gram"], + ) + + response = await s.execute() + + # print out all the options we got + for h in response: + print("%15s: %25s" % (text, h.name)) + + # close the connection + await async_connections.get_connection().close() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/dsl/async/semantic_text.py b/examples/dsl/async/semantic_text.py new file mode 100644 index 000000000..426bf1bbe --- /dev/null +++ b/examples/dsl/async/semantic_text.py @@ -0,0 +1,148 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +""" +# Semantic Text example + +Requirements: + +$ pip install "elasticsearch[async]" tqdm + +Before running this example, an ELSER inference endpoint must be created in the +Elasticsearch cluster. This can be done manually from Kibana, or with the +following curl command from a terminal: + +curl -X PUT \ + "$ELASTICSEARCH_URL/_inference/sparse_embedding/my-elser-endpoint" \ + -H "Content-Type: application/json" \ + -d '{"service":"elser","service_settings":{"num_allocations":1,"num_threads":1}}' + +To run the example: + +$ python semantic_text.py "text to search" + +The index will be created automatically if it does not exist. Add +`--recreate-index` to the command to regenerate it. + +The example dataset includes a selection of workplace documents. The +following are good example queries to try out with this dataset: + +$ python semantic_text.py "work from home" +$ python semantic_text.py "vacation time" +$ python semantic_text.py "can I bring a bird to work?" + +When the index is created, the inference service will split the documents into +short passages, and for each passage a sparse embedding will be generated using +Elastic's ELSER v2 model. +""" + +import argparse +import asyncio +import json +import os +from datetime import datetime +from typing import Any, Optional +from urllib.request import urlopen + +from tqdm import tqdm + +from elasticsearch import dsl + +DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json" + + +class WorkplaceDoc(dsl.AsyncDocument): + class Index: + name = "workplace_documents_semantic" + + name: str + summary: str + content: Any = dsl.mapped_field( + dsl.field.SemanticText(inference_id="my-elser-endpoint") + ) + created: datetime + updated: Optional[datetime] + url: str = dsl.mapped_field(dsl.Keyword()) + category: str = dsl.mapped_field(dsl.Keyword()) + + +async def create() -> None: + + # create the index + await WorkplaceDoc._index.delete(ignore_unavailable=True) + await WorkplaceDoc.init() + + # download the data + dataset = json.loads(urlopen(DATASET_URL).read()) + + # import the dataset + for data in tqdm(dataset, desc="Indexing documents..."): + doc = WorkplaceDoc( + name=data["name"], + summary=data["summary"], + content=data["content"], + created=data.get("created_on"), + updated=data.get("updated_at"), + url=data["url"], + category=data["category"], + ) + await doc.save() + + # refresh the index + await WorkplaceDoc._index.refresh() + + +async def search(query: str) -> dsl.AsyncSearch[WorkplaceDoc]: + search = WorkplaceDoc.search() + search = search[:5] + return search.query(dsl.query.Semantic(field=WorkplaceDoc.content, query=query)) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Vector database with Elasticsearch") + parser.add_argument( + "--recreate-index", action="store_true", help="Recreate and populate the index" + ) + parser.add_argument("query", action="store", help="The search query") + return parser.parse_args() + + +async def main() -> None: + args = parse_args() + + # initiate the default connection to elasticsearch + dsl.async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) + + if args.recreate_index or not await WorkplaceDoc._index.exists(): + await create() + + results = await search(args.query) + + async for hit in results: + print( + f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]" + ) + print(f"Content: {hit.content.text}") + print("--------------------\n") + + # close the connection + await dsl.async_connections.get_connection().close() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/dsl/async/sparse_vectors.py b/examples/dsl/async/sparse_vectors.py new file mode 100644 index 000000000..64ae63245 --- /dev/null +++ b/examples/dsl/async/sparse_vectors.py @@ -0,0 +1,198 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +# Sparse vector database example + +Requirements: + +$ pip install nltk tqdm "elasticsearch[async]" + +Before running this example, the ELSER v2 model must be downloaded and deployed +to the Elasticsearch cluster, and an ingest pipeline must be defined. This can +be done manually from Kibana, or with the following three curl commands from a +terminal, adjusting the endpoint as needed: + +curl -X PUT \ + "http://localhost:9200/_ml/trained_models/.elser_model_2?wait_for_completion" \ + -H "Content-Type: application/json" \ + -d '{"input":{"field_names":["text_field"]}}' +curl -X POST \ + "http://localhost:9200/_ml/trained_models/.elser_model_2/deployment/_start?wait_for=fully_allocated" +curl -X PUT \ + "http://localhost:9200/_ingest/pipeline/elser_ingest_pipeline" \ + -H "Content-Type: application/json" \ + -d '{"processors":[{"foreach":{"field":"passages","processor":{"inference":{"model_id":".elser_model_2","input_output":[{"input_field":"_ingest._value.content","output_field":"_ingest._value.embedding"}]}}}}]}' + +To run the example: + +$ python sparse_vectors.py "text to search" + +The index will be created automatically if it does not exist. Add +`--recreate-index` to regenerate it. + +The example dataset includes a selection of workplace documents. The +following are good example queries to try out with this dataset: + +$ python sparse_vectors.py "work from home" +$ python sparse_vectors.py "vacation time" +$ python sparse_vectors.py "can I bring a bird to work?" + +When the index is created, the documents are split into short passages, and for +each passage a sparse embedding is generated using Elastic's ELSER v2 model. +The documents that are returned as search results are those that have the +highest scored passages. Add `--show-inner-hits` to the command to see +individual passage results as well. +""" + +import argparse +import asyncio +import json +import os +from datetime import datetime +from typing import Any, Dict, List, Optional +from urllib.request import urlopen + +import nltk +from tqdm import tqdm + +from elasticsearch.dsl import ( + AsyncDocument, + AsyncSearch, + InnerDoc, + Keyword, + Q, + SparseVector, + async_connections, + mapped_field, +) + +DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json" + +# initialize sentence tokenizer +nltk.download("punkt_tab", quiet=True) + + +class Passage(InnerDoc): + content: Optional[str] + embedding: Dict[str, float] = mapped_field(SparseVector(), init=False) + + +class WorkplaceDoc(AsyncDocument): + class Index: + name = "workplace_documents_sparse" + settings = {"default_pipeline": "elser_ingest_pipeline"} + + name: str + summary: str + content: str + created: datetime + updated: Optional[datetime] + url: str = mapped_field(Keyword()) + category: str = mapped_field(Keyword()) + passages: List[Passage] = mapped_field(default=[]) + + _model: Any = None + + def clean(self) -> None: + # split the content into sentences + passages = nltk.sent_tokenize(self.content) + + # generate an embedding for each passage and save it as a nested document + for passage in passages: + self.passages.append(Passage(content=passage)) + + +async def create() -> None: + + # create the index + await WorkplaceDoc._index.delete(ignore_unavailable=True) + await WorkplaceDoc.init() + + # download the data + dataset = json.loads(urlopen(DATASET_URL).read()) + + # import the dataset + for data in tqdm(dataset, desc="Indexing documents..."): + doc = WorkplaceDoc( + name=data["name"], + summary=data["summary"], + content=data["content"], + created=data.get("created_on"), + updated=data.get("updated_at"), + url=data["url"], + category=data["category"], + ) + await doc.save() + + +async def search(query: str) -> AsyncSearch[WorkplaceDoc]: + return WorkplaceDoc.search()[:5].query( + "nested", + path="passages", + query=Q( + "text_expansion", + passages__content={ + "model_id": ".elser_model_2", + "model_text": query, + }, + ), + inner_hits={"size": 2}, + ) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Vector database with Elasticsearch") + parser.add_argument( + "--recreate-index", action="store_true", help="Recreate and populate the index" + ) + parser.add_argument( + "--show-inner-hits", + action="store_true", + help="Show results for individual passages", + ) + parser.add_argument("query", action="store", help="The search query") + return parser.parse_args() + + +async def main() -> None: + args = parse_args() + + # initiate the default connection to elasticsearch + async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) + + if args.recreate_index or not await WorkplaceDoc._index.exists(): + await create() + + results = await search(args.query) + + async for hit in results: + print( + f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]" + ) + print(f"Summary: {hit.summary}") + if args.show_inner_hits: + for passage in hit.meta.inner_hits["passages"]: + print(f" - [Score: {passage.meta.score}] {passage.content!r}") + print("") + + # close the connection + await async_connections.get_connection().close() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/dsl/async/vectors.py b/examples/dsl/async/vectors.py new file mode 100644 index 000000000..15829df9a --- /dev/null +++ b/examples/dsl/async/vectors.py @@ -0,0 +1,187 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +# Vector database example + +Requirements: + +$ pip install nltk sentence_transformers tqdm "elasticsearch[async]" + +To run the example: + +$ python vectors.py "text to search" + +The index will be created automatically if it does not exist. Add +`--recreate-index` to regenerate it. + +The example dataset includes a selection of workplace documents. The +following are good example queries to try out with this dataset: + +$ python vectors.py "work from home" +$ python vectors.py "vacation time" +$ python vectors.py "can I bring a bird to work?" + +When the index is created, the documents are split into short passages, and for +each passage an embedding is generated using the open source +"all-MiniLM-L6-v2" model. The documents that are returned as search results are +those that have the highest scored passages. Add `--show-inner-hits` to the +command to see individual passage results as well. +""" + +import argparse +import asyncio +import json +import os +from datetime import datetime +from typing import Any, List, Optional, cast +from urllib.request import urlopen + +import nltk +from sentence_transformers import SentenceTransformer +from tqdm import tqdm + +from elasticsearch.dsl import ( + AsyncDocument, + AsyncSearch, + DenseVector, + InnerDoc, + Keyword, + M, + async_connections, + mapped_field, +) + +DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json" +MODEL_NAME = "all-MiniLM-L6-v2" + +# initialize sentence tokenizer +nltk.download("punkt_tab", quiet=True) + +# this will be the embedding model +embedding_model: Any = None + + +class Passage(InnerDoc): + content: str + embedding: List[float] = mapped_field(DenseVector()) + + +class WorkplaceDoc(AsyncDocument): + class Index: + name = "workplace_documents" + + name: str + summary: str + content: str + created: datetime + updated: Optional[datetime] + url: str = mapped_field(Keyword(required=True)) + category: str = mapped_field(Keyword(required=True)) + passages: M[List[Passage]] = mapped_field(default=[]) + + @classmethod + def get_embedding(cls, input: str) -> List[float]: + global embedding_model + if embedding_model is None: + embedding_model = SentenceTransformer(MODEL_NAME) + return cast(List[float], list(embedding_model.encode(input))) + + def clean(self) -> None: + # split the content into sentences + passages = cast(List[str], nltk.sent_tokenize(self.content)) + + # generate an embedding for each passage and save it as a nested document + for passage in passages: + self.passages.append( + Passage(content=passage, embedding=self.get_embedding(passage)) + ) + + +async def create() -> None: + # create the index + await WorkplaceDoc._index.delete(ignore_unavailable=True) + await WorkplaceDoc.init() + + # download the data + dataset = json.loads(urlopen(DATASET_URL).read()) + + # import the dataset + for data in tqdm(dataset, desc="Indexing documents..."): + doc = WorkplaceDoc( + name=data["name"], + summary=data["summary"], + content=data["content"], + created=data.get("created_on"), + updated=data.get("updated_at"), + url=data["url"], + category=data["category"], + ) + await doc.save() + + +async def search(query: str) -> AsyncSearch[WorkplaceDoc]: + return WorkplaceDoc.search().knn( + field=WorkplaceDoc.passages.embedding, + k=5, + num_candidates=50, + query_vector=list(WorkplaceDoc.get_embedding(query)), + inner_hits={"size": 2}, + ) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Vector database with Elasticsearch") + parser.add_argument( + "--recreate-index", action="store_true", help="Recreate and populate the index" + ) + parser.add_argument( + "--show-inner-hits", + action="store_true", + help="Show results for individual passages", + ) + parser.add_argument("query", action="store", help="The search query") + return parser.parse_args() + + +async def main() -> None: + args = parse_args() + + # initiate the default connection to elasticsearch + async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) + + if args.recreate_index or not await WorkplaceDoc._index.exists(): + await create() + + results = await search(args.query) + + async for hit in results: + print( + f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]" + ) + print(f"Summary: {hit.summary}") + if args.show_inner_hits: + for passage in hit.meta.inner_hits["passages"]: + print(f" - [Score: {passage.meta.score}] {passage.content!r}") + print("") + + # close the connection + await async_connections.get_connection().close() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/dsl/completion.py b/examples/dsl/completion.py new file mode 100644 index 000000000..3380dc520 --- /dev/null +++ b/examples/dsl/completion.py @@ -0,0 +1,113 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +Example ``Document`` with completion suggester. + +In the ``Person`` class we index the person's name to allow auto completing in +any order ("first last", "middle last first", ...). For the weight we use a +value from the ``popularity`` field which is a long. + +To make the suggestions work in different languages we added a custom analyzer +that does ascii folding. +""" + +import os +from itertools import permutations +from typing import TYPE_CHECKING, Any, Dict, Optional + +from elasticsearch.dsl import ( + Completion, + Document, + Keyword, + Long, + Text, + analyzer, + connections, + mapped_field, + token_filter, +) + +# custom analyzer for names +ascii_fold = analyzer( + "ascii_fold", + # we don't want to split O'Brian or Toulouse-Lautrec + tokenizer="whitespace", + filter=["lowercase", token_filter("ascii_fold", "asciifolding")], +) + + +class Person(Document): + if TYPE_CHECKING: + # definitions here help type checkers understand additional arguments + # that are allowed in the constructor + _id: Optional[int] = mapped_field(default=None) + + name: str = mapped_field(Text(fields={"keyword": Keyword()}), default="") + popularity: int = mapped_field(Long(), default=0) + + # completion field with a custom analyzer + suggest: Dict[str, Any] = mapped_field(Completion(analyzer=ascii_fold), init=False) + + def clean(self) -> None: + """ + Automatically construct the suggestion input and weight by taking all + possible permutations of Person's name as ``input`` and taking their + popularity as ``weight``. + """ + self.suggest = { + "input": [" ".join(p) for p in permutations(self.name.split())], + "weight": self.popularity, + } + + class Index: + name = "test-suggest" + settings = {"number_of_shards": 1, "number_of_replicas": 0} + + +def main() -> None: + # initiate the default connection to elasticsearch + connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) + + # create the empty index + Person.init() + + # index some sample data + for id, (name, popularity) in enumerate( + [("Henri de Toulouse-Lautrec", 42), ("Jára Cimrman", 124)] + ): + Person(_id=id, name=name, popularity=popularity).save() + + # refresh index manually to make changes live + Person._index.refresh() + + # run some suggestions + for text in ("já", "Jara Cimr", "tou", "de hen"): + s = Person.search() + s = s.suggest("auto_complete", text, completion={"field": "suggest"}) + response = s.execute() + + # print out all the options we got + for option in response.suggest["auto_complete"][0].options: + print("%10s: %25s (%d)" % (text, option._source.name, option._score)) + + # close the connection + connections.get_connection().close() + + +if __name__ == "__main__": + main() diff --git a/examples/dsl/composite_agg.py b/examples/dsl/composite_agg.py new file mode 100644 index 000000000..56f7ae7a6 --- /dev/null +++ b/examples/dsl/composite_agg.py @@ -0,0 +1,90 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os +from typing import Any, Dict, Iterator, Mapping, Sequence, cast + +from elasticsearch.dsl import Agg, Response, Search, aggs, connections +from elasticsearch.dsl.types import CompositeAggregate +from elasticsearch.helpers import bulk +from test_elasticsearch.test_dsl.test_integration.test_data import DATA, GIT_INDEX + + +def scan_aggs( + search: Search, + source_aggs: Sequence[Mapping[str, Agg]], + inner_aggs: Dict[str, Agg] = {}, + size: int = 10, +) -> Iterator[CompositeAggregate]: + """ + Helper function used to iterate over all possible bucket combinations of + ``source_aggs``, returning results of ``inner_aggs`` for each. Uses the + ``composite`` aggregation under the hood to perform this. + """ + + def run_search(**kwargs: Any) -> Response: + s = search[:0] + bucket = s.aggs.bucket( + "comp", + aggs.Composite( + sources=source_aggs, + size=size, + **kwargs, + ), + ) + for agg_name, agg in inner_aggs.items(): + bucket[agg_name] = agg + return s.execute() + + response = run_search() + while response.aggregations["comp"].buckets: + for b in response.aggregations["comp"].buckets: + yield cast(CompositeAggregate, b) + if "after_key" in response.aggregations["comp"]: + after = response.aggregations["comp"].after_key + else: + after = response.aggregations["comp"].buckets[-1].key + response = run_search(after=after) + + +def main() -> None: + # initiate the default connection to elasticsearch + client = connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) + + # create the index and populate it with some data + # note that the dataset is imported from the library's test suite + client.indices.delete(index="git", ignore_unavailable=True) + client.indices.create(index="git", **GIT_INDEX) + bulk(client, DATA, raise_on_error=True, refresh=True) + + # run some aggregations on the data + for b in scan_aggs( + Search(index="git"), + [{"files": aggs.Terms(field="files")}], + {"first_seen": aggs.Min(field="committed_date")}, + ): + print( + "File %s has been modified %d times, first seen at %s." + % (b.key.files, b.doc_count, b.first_seen.value_as_string) + ) + + # close the connection + connections.get_connection().close() + + +if __name__ == "__main__": + main() diff --git a/examples/dsl/parent_child.py b/examples/dsl/parent_child.py new file mode 100644 index 000000000..22c597464 --- /dev/null +++ b/examples/dsl/parent_child.py @@ -0,0 +1,275 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +Complex data model example modeling stackoverflow-like data. + +It is used to showcase several key features of elasticsearch-dsl: + + * Object and Nested fields: see User and Comment classes and fields they + are used in + + * method add_comment is used to add comments + + * Parent/Child relationship + + * See the Join field on Post creating the relationship between Question + and Answer + + * Meta.matches allows the hits from same index to be wrapped in proper + classes + + * to see how child objects are created see Question.add_answer + + * Question.search_answers shows how to query for children of a + particular parent + +""" +import os +from datetime import datetime +from typing import TYPE_CHECKING, Any, Dict, List, Optional, cast + +from elasticsearch.dsl import ( + Date, + Document, + InnerDoc, + Join, + Keyword, + Long, + Search, + Text, + connections, + mapped_field, +) + + +class User(InnerDoc): + """ + Class used to represent a denormalized user stored on other objects. + """ + + id: int = mapped_field(Long()) + signed_up: Optional[datetime] = mapped_field(Date()) + username: str = mapped_field(Text(fields={"keyword": Keyword()})) + email: Optional[str] = mapped_field(Text(fields={"keyword": Keyword()})) + location: Optional[str] = mapped_field(Text(fields={"keyword": Keyword()})) + + +class Comment(InnerDoc): + """ + Class wrapper for nested comment objects. + """ + + author: User + created: datetime + content: str + + +class Post(Document): + """ + Base class for Question and Answer containing the common fields. + """ + + author: User + + if TYPE_CHECKING: + # definitions here help type checkers understand additional arguments + # that are allowed in the constructor + _routing: str = mapped_field(default=None) + _id: Optional[int] = mapped_field(default=None) + + created: Optional[datetime] = mapped_field(default=None) + body: str = mapped_field(default="") + comments: List[Comment] = mapped_field(default_factory=list) + question_answer: Any = mapped_field( + Join(relations={"question": "answer"}), default_factory=dict + ) + + @classmethod + def _matches(cls, hit: Dict[str, Any]) -> bool: + # Post is an abstract class, make sure it never gets used for + # deserialization + return False + + class Index: + name = "test-qa-site" + settings = { + "number_of_shards": 1, + "number_of_replicas": 0, + } + + def add_comment( + self, + user: User, + content: str, + created: Optional[datetime] = None, + commit: Optional[bool] = True, + ) -> Comment: + c = Comment(author=user, content=content, created=created or datetime.now()) + self.comments.append(c) + if commit: + self.save() + return c + + def save(self, **kwargs: Any) -> None: # type: ignore[override] + # if there is no date, use now + if self.created is None: + self.created = datetime.now() + super().save(**kwargs) + + +class Question(Post): + tags: List[str] = mapped_field( + default_factory=list + ) # .tags will return empty list if not present + title: str = mapped_field(Text(fields={"keyword": Keyword()}), default="") + + @classmethod + def _matches(cls, hit: Dict[str, Any]) -> bool: + """Use Question class for parent documents""" + return bool(hit["_source"]["question_answer"] == "question") + + @classmethod + def search(cls, **kwargs: Any) -> Search: # type: ignore[override] + return cls._index.search(**kwargs).filter("term", question_answer="question") + + def add_answer( + self, + user: User, + body: str, + created: Optional[datetime] = None, + accepted: bool = False, + commit: Optional[bool] = True, + ) -> "Answer": + answer = Answer( + # required make sure the answer is stored in the same shard + _routing=self.meta.id, + # set up the parent/child mapping + question_answer={"name": "answer", "parent": self.meta.id}, + # pass in the field values + author=user, + created=created, + body=body, + is_accepted=accepted, + ) + if commit: + answer.save() + return answer + + def search_answers(self) -> Search: + # search only our index + s = Answer.search() + # filter for answers belonging to us + s = s.filter("parent_id", type="answer", id=self.meta.id) + # add routing to only go to specific shard + s = s.params(routing=self.meta.id) + return s + + def get_answers(self) -> List[Any]: + """ + Get answers either from inner_hits already present or by searching + elasticsearch. + """ + if "inner_hits" in self.meta and "answer" in self.meta.inner_hits: + return cast(List[Any], self.meta.inner_hits["answer"].hits) + return [a for a in self.search_answers()] + + def save(self, **kwargs: Any) -> None: # type: ignore[override] + self.question_answer = "question" + super().save(**kwargs) + + +class Answer(Post): + is_accepted: bool = mapped_field(default=False) + + @classmethod + def _matches(cls, hit: Dict[str, Any]) -> bool: + """Use Answer class for child documents with child name 'answer'""" + return ( + isinstance(hit["_source"]["question_answer"], dict) + and hit["_source"]["question_answer"].get("name") == "answer" + ) + + @classmethod + def search(cls, **kwargs: Any) -> Search: # type: ignore[override] + return cls._index.search(**kwargs).exclude("term", question_answer="question") + + def get_question(self) -> Optional[Question]: + # cache question in self.meta + # any attributes set on self would be interpreted as fields + if "question" not in self.meta: + self.meta.question = Question.get( + id=self.question_answer.parent, index=self.meta.index + ) + return cast(Optional[Question], self.meta.question) + + def save(self, **kwargs: Any) -> None: # type: ignore[override] + # set routing to parents id automatically + self.meta.routing = self.question_answer.parent + super().save(**kwargs) + + +def setup() -> None: + """Create an IndexTemplate and save it into elasticsearch.""" + index_template = Post._index.as_composable_template("base", priority=100) + index_template.save() + + +def main() -> Answer: + # initiate the default connection to elasticsearch + connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) + + # create index + setup() + + # user objects to use + nick = User( + id=47, + signed_up=datetime(2017, 4, 3), + username="fxdgear", + email="nick.lang@elastic.co", + location="Colorado", + ) + honza = User( + id=42, + signed_up=datetime(2013, 4, 3), + username="honzakral", + email="honza@elastic.co", + location="Prague", + ) + + # create a question object + question = Question( + _id=1, + author=nick, + tags=["elasticsearch", "python"], + title="How do I use elasticsearch from Python?", + body=""" + I want to use elasticsearch, how do I do it from Python? + """, + ) + question.save() + answer = question.add_answer(honza, "Just use `elasticsearch-py`!") + + # close the connection + connections.get_connection().close() + + return answer + + +if __name__ == "__main__": + main() diff --git a/examples/dsl/percolate.py b/examples/dsl/percolate.py new file mode 100644 index 000000000..d8747feda --- /dev/null +++ b/examples/dsl/percolate.py @@ -0,0 +1,116 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os +from typing import TYPE_CHECKING, Any, List, Optional + +from elasticsearch.dsl import ( + Document, + Keyword, + Percolator, + Q, + Query, + Search, + connections, + mapped_field, +) + + +class BlogPost(Document): + """ + Blog posts that will be automatically tagged based on percolation queries. + """ + + if TYPE_CHECKING: + # definitions here help type checkers understand additional arguments + # that are allowed in the constructor + _id: int + + content: Optional[str] + tags: List[str] = mapped_field(Keyword(), default_factory=list) + + class Index: + name = "test-blogpost" + + def add_tags(self) -> None: + # run a percolation to automatically tag the blog post. + s = Search(index="test-percolator") + s = s.query( + "percolate", field="query", index=self._get_index(), document=self.to_dict() + ) + + # collect all the tags from matched percolators + for percolator in s: + self.tags.extend(percolator.tags) + + # make sure tags are unique + self.tags = list(set(self.tags)) + + def save(self, **kwargs: Any) -> None: # type: ignore[override] + self.add_tags() + super().save(**kwargs) + + +class PercolatorDoc(Document): + """ + Document class used for storing the percolation queries. + """ + + if TYPE_CHECKING: + _id: str + + # relevant fields from BlogPost must be also present here for the queries + # to be able to use them. Another option would be to use document + # inheritance but save() would have to be reset to normal behavior. + content: Optional[str] + + # the percolator query to be run against the doc + query: Query = mapped_field(Percolator()) + # list of tags to append to a document + tags: List[str] = mapped_field(Keyword(multi=True)) + + class Index: + name = "test-percolator" + settings = {"number_of_shards": 1, "number_of_replicas": 0} + + +def setup() -> None: + # create the percolator index if it doesn't exist + if not PercolatorDoc._index.exists(): + PercolatorDoc.init() + + # register a percolation query looking for documents about python + PercolatorDoc( + _id="python", + tags=["programming", "development", "python"], + content="", + query=Q("match", content="python"), + ).save(refresh=True) + + +def main() -> None: + # initiate the default connection to elasticsearch + connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) + + setup() + + # close the connection + connections.get_connection().close() + + +if __name__ == "__main__": + main() diff --git a/examples/dsl/search_as_you_type.py b/examples/dsl/search_as_you_type.py new file mode 100644 index 000000000..c1ebc99a4 --- /dev/null +++ b/examples/dsl/search_as_you_type.py @@ -0,0 +1,93 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +Example ``Document`` with search_as_you_type field datatype and how to search it. + +When creating a field with search_as_you_type datatype ElasticSearch creates additional +subfields to enable efficient as-you-type completion, matching terms at any position +within the input. + +To custom analyzer with ascii folding allow search to work in different languages. +""" + +import os +from typing import TYPE_CHECKING, Optional + +from elasticsearch.dsl import Document, SearchAsYouType, connections, mapped_field +from elasticsearch.dsl.query import MultiMatch + + +class Person(Document): + if TYPE_CHECKING: + # definitions here help type checkers understand additional arguments + # that are allowed in the constructor + _id: Optional[int] = mapped_field(default=None) + + name: str = mapped_field(SearchAsYouType(max_shingle_size=3), default="") + + class Index: + name = "test-search-as-you-type" + settings = {"number_of_shards": 1, "number_of_replicas": 0} + + +def main() -> None: + # initiate the default connection to elasticsearch + connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) + + # create the empty index + Person.init() + + import pprint + + pprint.pprint(Person().to_dict(), indent=2) + + # index some sample data + names = [ + "Andy Warhol", + "Alphonse Mucha", + "Henri de Toulouse-Lautrec", + "Jára Cimrman", + ] + for id, name in enumerate(names): + Person(_id=id, name=name).save() + + # refresh index manually to make changes live + Person._index.refresh() + + # run some suggestions + for text in ("já", "Cimr", "toulouse", "Henri Tou", "a"): + s = Person.search() + + s.query = MultiMatch( # type: ignore[assignment] + query=text, + type="bool_prefix", + fields=["name", "name._2gram", "name._3gram"], + ) + + response = s.execute() + + # print out all the options we got + for h in response: + print("%15s: %25s" % (text, h.name)) + + # close the connection + connections.get_connection().close() + + +if __name__ == "__main__": + main() diff --git a/examples/dsl/semantic_text.py b/examples/dsl/semantic_text.py new file mode 100644 index 000000000..8d552a2aa --- /dev/null +++ b/examples/dsl/semantic_text.py @@ -0,0 +1,147 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +""" +# Semantic Text example + +Requirements: + +$ pip install "elasticsearch" tqdm + +Before running this example, an ELSER inference endpoint must be created in the +Elasticsearch cluster. This can be done manually from Kibana, or with the +following curl command from a terminal: + +curl -X PUT \ + "$ELASTICSEARCH_URL/_inference/sparse_embedding/my-elser-endpoint" \ + -H "Content-Type: application/json" \ + -d '{"service":"elser","service_settings":{"num_allocations":1,"num_threads":1}}' + +To run the example: + +$ python semantic_text.py "text to search" + +The index will be created automatically if it does not exist. Add +`--recreate-index` to the command to regenerate it. + +The example dataset includes a selection of workplace documents. The +following are good example queries to try out with this dataset: + +$ python semantic_text.py "work from home" +$ python semantic_text.py "vacation time" +$ python semantic_text.py "can I bring a bird to work?" + +When the index is created, the inference service will split the documents into +short passages, and for each passage a sparse embedding will be generated using +Elastic's ELSER v2 model. +""" + +import argparse +import json +import os +from datetime import datetime +from typing import Any, Optional +from urllib.request import urlopen + +from tqdm import tqdm + +from elasticsearch import dsl + +DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json" + + +class WorkplaceDoc(dsl.Document): + class Index: + name = "workplace_documents_semantic" + + name: str + summary: str + content: Any = dsl.mapped_field( + dsl.field.SemanticText(inference_id="my-elser-endpoint") + ) + created: datetime + updated: Optional[datetime] + url: str = dsl.mapped_field(dsl.Keyword()) + category: str = dsl.mapped_field(dsl.Keyword()) + + +def create() -> None: + + # create the index + WorkplaceDoc._index.delete(ignore_unavailable=True) + WorkplaceDoc.init() + + # download the data + dataset = json.loads(urlopen(DATASET_URL).read()) + + # import the dataset + for data in tqdm(dataset, desc="Indexing documents..."): + doc = WorkplaceDoc( + name=data["name"], + summary=data["summary"], + content=data["content"], + created=data.get("created_on"), + updated=data.get("updated_at"), + url=data["url"], + category=data["category"], + ) + doc.save() + + # refresh the index + WorkplaceDoc._index.refresh() + + +def search(query: str) -> dsl.Search[WorkplaceDoc]: + search = WorkplaceDoc.search() + search = search[:5] + return search.query(dsl.query.Semantic(field=WorkplaceDoc.content, query=query)) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Vector database with Elasticsearch") + parser.add_argument( + "--recreate-index", action="store_true", help="Recreate and populate the index" + ) + parser.add_argument("query", action="store", help="The search query") + return parser.parse_args() + + +def main() -> None: + args = parse_args() + + # initiate the default connection to elasticsearch + dsl.connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) + + if args.recreate_index or not WorkplaceDoc._index.exists(): + create() + + results = search(args.query) + + for hit in results: + print( + f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]" + ) + print(f"Content: {hit.content.text}") + print("--------------------\n") + + # close the connection + dsl.connections.get_connection().close() + + +if __name__ == "__main__": + main() diff --git a/examples/dsl/sparse_vectors.py b/examples/dsl/sparse_vectors.py new file mode 100644 index 000000000..a92e82026 --- /dev/null +++ b/examples/dsl/sparse_vectors.py @@ -0,0 +1,197 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +# Sparse vector database example + +Requirements: + +$ pip install nltk tqdm "elasticsearch" + +Before running this example, the ELSER v2 model must be downloaded and deployed +to the Elasticsearch cluster, and an ingest pipeline must be defined. This can +be done manually from Kibana, or with the following three curl commands from a +terminal, adjusting the endpoint as needed: + +curl -X PUT \ + "http://localhost:9200/_ml/trained_models/.elser_model_2?wait_for_completion" \ + -H "Content-Type: application/json" \ + -d '{"input":{"field_names":["text_field"]}}' +curl -X POST \ + "http://localhost:9200/_ml/trained_models/.elser_model_2/deployment/_start?wait_for=fully_allocated" +curl -X PUT \ + "http://localhost:9200/_ingest/pipeline/elser_ingest_pipeline" \ + -H "Content-Type: application/json" \ + -d '{"processors":[{"foreach":{"field":"passages","processor":{"inference":{"model_id":".elser_model_2","input_output":[{"input_field":"_ingest._value.content","output_field":"_ingest._value.embedding"}]}}}}]}' + +To run the example: + +$ python sparse_vectors.py "text to search" + +The index will be created automatically if it does not exist. Add +`--recreate-index` to regenerate it. + +The example dataset includes a selection of workplace documents. The +following are good example queries to try out with this dataset: + +$ python sparse_vectors.py "work from home" +$ python sparse_vectors.py "vacation time" +$ python sparse_vectors.py "can I bring a bird to work?" + +When the index is created, the documents are split into short passages, and for +each passage a sparse embedding is generated using Elastic's ELSER v2 model. +The documents that are returned as search results are those that have the +highest scored passages. Add `--show-inner-hits` to the command to see +individual passage results as well. +""" + +import argparse +import json +import os +from datetime import datetime +from typing import Any, Dict, List, Optional +from urllib.request import urlopen + +import nltk +from tqdm import tqdm + +from elasticsearch.dsl import ( + Document, + InnerDoc, + Keyword, + Q, + Search, + SparseVector, + connections, + mapped_field, +) + +DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json" + +# initialize sentence tokenizer +nltk.download("punkt_tab", quiet=True) + + +class Passage(InnerDoc): + content: Optional[str] + embedding: Dict[str, float] = mapped_field(SparseVector(), init=False) + + +class WorkplaceDoc(Document): + class Index: + name = "workplace_documents_sparse" + settings = {"default_pipeline": "elser_ingest_pipeline"} + + name: str + summary: str + content: str + created: datetime + updated: Optional[datetime] + url: str = mapped_field(Keyword()) + category: str = mapped_field(Keyword()) + passages: List[Passage] = mapped_field(default=[]) + + _model: Any = None + + def clean(self) -> None: + # split the content into sentences + passages = nltk.sent_tokenize(self.content) + + # generate an embedding for each passage and save it as a nested document + for passage in passages: + self.passages.append(Passage(content=passage)) + + +def create() -> None: + + # create the index + WorkplaceDoc._index.delete(ignore_unavailable=True) + WorkplaceDoc.init() + + # download the data + dataset = json.loads(urlopen(DATASET_URL).read()) + + # import the dataset + for data in tqdm(dataset, desc="Indexing documents..."): + doc = WorkplaceDoc( + name=data["name"], + summary=data["summary"], + content=data["content"], + created=data.get("created_on"), + updated=data.get("updated_at"), + url=data["url"], + category=data["category"], + ) + doc.save() + + +def search(query: str) -> Search[WorkplaceDoc]: + return WorkplaceDoc.search()[:5].query( + "nested", + path="passages", + query=Q( + "text_expansion", + passages__content={ + "model_id": ".elser_model_2", + "model_text": query, + }, + ), + inner_hits={"size": 2}, + ) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Vector database with Elasticsearch") + parser.add_argument( + "--recreate-index", action="store_true", help="Recreate and populate the index" + ) + parser.add_argument( + "--show-inner-hits", + action="store_true", + help="Show results for individual passages", + ) + parser.add_argument("query", action="store", help="The search query") + return parser.parse_args() + + +def main() -> None: + args = parse_args() + + # initiate the default connection to elasticsearch + connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) + + if args.recreate_index or not WorkplaceDoc._index.exists(): + create() + + results = search(args.query) + + for hit in results: + print( + f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]" + ) + print(f"Summary: {hit.summary}") + if args.show_inner_hits: + for passage in hit.meta.inner_hits["passages"]: + print(f" - [Score: {passage.meta.score}] {passage.content!r}") + print("") + + # close the connection + connections.get_connection().close() + + +if __name__ == "__main__": + main() diff --git a/examples/dsl/vectors.py b/examples/dsl/vectors.py new file mode 100644 index 000000000..3afd76991 --- /dev/null +++ b/examples/dsl/vectors.py @@ -0,0 +1,186 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +# Vector database example + +Requirements: + +$ pip install nltk sentence_transformers tqdm "elasticsearch" + +To run the example: + +$ python vectors.py "text to search" + +The index will be created automatically if it does not exist. Add +`--recreate-index` to regenerate it. + +The example dataset includes a selection of workplace documents. The +following are good example queries to try out with this dataset: + +$ python vectors.py "work from home" +$ python vectors.py "vacation time" +$ python vectors.py "can I bring a bird to work?" + +When the index is created, the documents are split into short passages, and for +each passage an embedding is generated using the open source +"all-MiniLM-L6-v2" model. The documents that are returned as search results are +those that have the highest scored passages. Add `--show-inner-hits` to the +command to see individual passage results as well. +""" + +import argparse +import json +import os +from datetime import datetime +from typing import Any, List, Optional, cast +from urllib.request import urlopen + +import nltk +from sentence_transformers import SentenceTransformer +from tqdm import tqdm + +from elasticsearch.dsl import ( + DenseVector, + Document, + InnerDoc, + Keyword, + M, + Search, + connections, + mapped_field, +) + +DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json" +MODEL_NAME = "all-MiniLM-L6-v2" + +# initialize sentence tokenizer +nltk.download("punkt_tab", quiet=True) + +# this will be the embedding model +embedding_model: Any = None + + +class Passage(InnerDoc): + content: str + embedding: List[float] = mapped_field(DenseVector()) + + +class WorkplaceDoc(Document): + class Index: + name = "workplace_documents" + + name: str + summary: str + content: str + created: datetime + updated: Optional[datetime] + url: str = mapped_field(Keyword(required=True)) + category: str = mapped_field(Keyword(required=True)) + passages: M[List[Passage]] = mapped_field(default=[]) + + @classmethod + def get_embedding(cls, input: str) -> List[float]: + global embedding_model + if embedding_model is None: + embedding_model = SentenceTransformer(MODEL_NAME) + return cast(List[float], list(embedding_model.encode(input))) + + def clean(self) -> None: + # split the content into sentences + passages = cast(List[str], nltk.sent_tokenize(self.content)) + + # generate an embedding for each passage and save it as a nested document + for passage in passages: + self.passages.append( + Passage(content=passage, embedding=self.get_embedding(passage)) + ) + + +def create() -> None: + # create the index + WorkplaceDoc._index.delete(ignore_unavailable=True) + WorkplaceDoc.init() + + # download the data + dataset = json.loads(urlopen(DATASET_URL).read()) + + # import the dataset + for data in tqdm(dataset, desc="Indexing documents..."): + doc = WorkplaceDoc( + name=data["name"], + summary=data["summary"], + content=data["content"], + created=data.get("created_on"), + updated=data.get("updated_at"), + url=data["url"], + category=data["category"], + ) + doc.save() + + +def search(query: str) -> Search[WorkplaceDoc]: + return WorkplaceDoc.search().knn( + field=WorkplaceDoc.passages.embedding, + k=5, + num_candidates=50, + query_vector=list(WorkplaceDoc.get_embedding(query)), + inner_hits={"size": 2}, + ) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Vector database with Elasticsearch") + parser.add_argument( + "--recreate-index", action="store_true", help="Recreate and populate the index" + ) + parser.add_argument( + "--show-inner-hits", + action="store_true", + help="Show results for individual passages", + ) + parser.add_argument("query", action="store", help="The search query") + return parser.parse_args() + + +def main() -> None: + args = parse_args() + + # initiate the default connection to elasticsearch + connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) + + if args.recreate_index or not WorkplaceDoc._index.exists(): + create() + + results = search(args.query) + + for hit in results: + print( + f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]" + ) + print(f"Summary: {hit.summary}") + if args.show_inner_hits: + for passage in hit.meta.inner_hits["passages"]: + print(f" - [Score: {passage.meta.score}] {passage.content!r}") + print("") + + # close the connection + connections.get_connection().close() + + +if __name__ == "__main__": + main() diff --git a/noxfile.py b/noxfile.py index 8242e1ce0..90172d49e 100644 --- a/noxfile.py +++ b/noxfile.py @@ -37,7 +37,6 @@ def pytest_argv(): "pytest", "--cov-report=term-missing", "--cov=elasticsearch", - "--cov-config=setup.cfg", f"--junitxml={junit_xml}", "--log-level=DEBUG", "--cache-clear", @@ -47,7 +46,7 @@ def pytest_argv(): @nox.session(python=["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"]) def test(session): - session.install(".[dev]", env=INSTALL_ENV, silent=False) + session.install("-e", ".[dev]", env=INSTALL_ENV, silent=False) session.run(*pytest_argv(), *session.posargs) @@ -67,9 +66,11 @@ def test_otel(session): @nox.session() def format(session): - session.install("black~=24.0", "isort", "flynt", "unasync>=0.6.0") + session.install(".", "black~=24.0", "isort", "flynt", "unasync>=0.6.0", "jinja2") session.run("python", "utils/run-unasync.py") + session.run("python", "utils/run-unasync-dsl.py") + session.run("python", "utils/dsl-generator.py", env={"PYTHONPATH": "./"}) session.run("isort", "--profile=black", *SOURCE_FILES) session.run("flynt", *SOURCE_FILES) session.run("black", *SOURCE_FILES) @@ -86,11 +87,18 @@ def lint(session): session.run("python", "-c", "from elasticsearch._otel import OpenTelemetry") session.install( - "flake8", "black~=24.0", "mypy", "isort", "types-requests", "unasync>=0.6.0" + "flake8", + "black~=24.0", + "mypy", + "isort", + "types-requests", + "types-python-dateutil", + "unasync>=0.6.0", ) session.run("isort", "--check", "--profile=black", *SOURCE_FILES) session.run("black", "--check", *SOURCE_FILES) session.run("python", "utils/run-unasync.py", "--check") + session.run("python", "utils/run-unasync-dsl.py", "--check") session.run("flake8", *SOURCE_FILES) session.run("python", "utils/license-headers.py", "check", *SOURCE_FILES) @@ -98,7 +106,15 @@ def lint(session): # Run mypy on the package and then the type examples separately for # the two different mypy use-cases, ourselves and our users. - session.run("mypy", "--strict", "--show-error-codes", "elasticsearch/") + session.run( + "mypy", + "--strict", + "--implicit-reexport", + "--explicit-package-bases", + "--show-error-codes", + "--enable-error-code=ignore-without-code", + "elasticsearch/", + ) session.run( "mypy", "--strict", @@ -112,10 +128,28 @@ def lint(session): "test_elasticsearch/test_types/async_types.py", ) + # check typing on the DSL examples + session.run( + "mypy", + "--strict", + "--implicit-reexport", + "--explicit-package-bases", + "--show-error-codes", + "--enable-error-code=ignore-without-code", + "examples/dsl/", + ) + # Make sure we don't require aiohttp to be installed for users to # receive type hint information from mypy. session.run("python", "-m", "pip", "uninstall", "--yes", "aiohttp") - session.run("mypy", "--strict", "--show-error-codes", "elasticsearch/") + session.run( + "mypy", + "--strict", + "--implicit-reexport", + "--explicit-package-bases", + "--show-error-codes", + "elasticsearch/", + ) session.run( "mypy", "--strict", diff --git a/pyproject.toml b/pyproject.toml index b5f03e1d0..0c66e2f50 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -43,6 +43,8 @@ keywords = [ dynamic = ["version"] dependencies = [ "elastic-transport>=8.15.1,<9", + "python-dateutil", + "typing-extensions", ] [project.optional-dependencies] @@ -57,6 +59,7 @@ dev = [ "aiohttp", "pytest", "pytest-cov", + "pytest-mock", "pytest-asyncio", "coverage", "jinja2", @@ -74,6 +77,14 @@ dev = [ "pyarrow", "pandas", "mapbox-vector-tile", + "jinja2", + "nltk", + "sentence_transformers", + "tqdm", + "mypy", + "pyright", + "types-python-dateutil", + "types-tqdm", ] docs = [ "sphinx", @@ -109,7 +120,15 @@ packages = ["elasticsearch"] [tool.pytest.ini_options] junit_family = "legacy" xfail_strict = true -markers = "otel" +markers = [ + "otel", + "sync: mark a test as performing I/O without asyncio.", +] +filterwarnings = [ + "ignore:Legacy index templates are deprecated in favor of composable templates.:elasticsearch.exceptions.ElasticsearchWarning", + "ignore:datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version..*:DeprecationWarning", + "default:enable_cleanup_closed ignored.*:DeprecationWarning", +] [tool.isort] profile = "black" diff --git a/setup.cfg b/setup.cfg index 403f26a50..4d1925616 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,2 +1,2 @@ [flake8] -ignore = E203, E266, E501, W503 +ignore = E203, E266, E501, W503, E704, E741 diff --git a/test_elasticsearch/test_dsl/__init__.py b/test_elasticsearch/test_dsl/__init__.py new file mode 100644 index 000000000..2a87d183f --- /dev/null +++ b/test_elasticsearch/test_dsl/__init__.py @@ -0,0 +1,16 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/test_elasticsearch/test_dsl/_async/__init__.py b/test_elasticsearch/test_dsl/_async/__init__.py new file mode 100644 index 000000000..2a87d183f --- /dev/null +++ b/test_elasticsearch/test_dsl/_async/__init__.py @@ -0,0 +1,16 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/test_elasticsearch/test_dsl/_async/test_document.py b/test_elasticsearch/test_dsl/_async/test_document.py new file mode 100644 index 000000000..5fe2d326c --- /dev/null +++ b/test_elasticsearch/test_dsl/_async/test_document.py @@ -0,0 +1,883 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# this file creates several documents using bad or no types because +# these are still supported and should be kept functional in spite +# of not having appropriate type hints. For that reason the comment +# below disables many mypy checks that fails as a result of this. +# mypy: disable-error-code="assignment, index, arg-type, call-arg, operator, comparison-overlap, attr-defined" + +import codecs +import ipaddress +import pickle +import sys +from datetime import datetime +from hashlib import md5 +from typing import Any, ClassVar, Dict, List, Optional + +import pytest +from pytest import raises + +from elasticsearch.dsl import ( + AsyncDocument, + Index, + InnerDoc, + M, + Mapping, + MetaField, + Range, + analyzer, + field, + mapped_field, + utils, +) +from elasticsearch.dsl.document_base import InstrumentedField +from elasticsearch.dsl.exceptions import IllegalOperation, ValidationException + + +class MyInner(InnerDoc): + old_field = field.Text() + + +class MyDoc(AsyncDocument): + title = field.Keyword() + name = field.Text() + created_at = field.Date() + inner = field.Object(MyInner) + + +class MySubDoc(MyDoc): + name = field.Keyword() + + class Index: + name = "default-index" + + +class MyDoc2(AsyncDocument): + extra = field.Long() + + +class MyMultiSubDoc(MyDoc2, MySubDoc): + pass + + +class Comment(InnerDoc): + title = field.Text() + tags = field.Keyword(multi=True) + + +class DocWithNested(AsyncDocument): + comments = field.Nested(Comment) + + class Index: + name = "test-doc-with-nested" + + +class SimpleCommit(AsyncDocument): + files = field.Text(multi=True) + + class Index: + name = "test-git" + + +class Secret(str): + pass + + +class SecretField(field.CustomField): + builtin_type = "text" + + def _serialize(self, data: Any) -> Any: + return codecs.encode(data, "rot_13") + + def _deserialize(self, data: Any) -> Any: + if isinstance(data, Secret): + return data + return Secret(codecs.decode(data, "rot_13")) + + +class SecretDoc(AsyncDocument): + title = SecretField(index="no") + + class Index: + name = "test-secret-doc" + + +class NestedSecret(AsyncDocument): + secrets = field.Nested(SecretDoc) + + class Index: + name = "test-nested-secret" + + +class OptionalObjectWithRequiredField(AsyncDocument): + comments = field.Nested(properties={"title": field.Keyword(required=True)}) + + class Index: + name = "test-required" + + +class Host(AsyncDocument): + ip = field.Ip() + + class Index: + name = "test-host" + + +def test_range_serializes_properly() -> None: + class D(AsyncDocument): + lr: Range[int] = field.LongRange() + + d = D(lr=Range(lt=42)) + assert 40 in d.lr + assert 47 not in d.lr + assert {"lr": {"lt": 42}} == d.to_dict() + + d = D(lr={"lt": 42}) + assert {"lr": {"lt": 42}} == d.to_dict() + + +def test_range_deserializes_properly() -> None: + class D(InnerDoc): + lr = field.LongRange() + + d = D.from_es({"lr": {"lt": 42}}, True) + assert isinstance(d.lr, Range) + assert 40 in d.lr + assert 47 not in d.lr + + +def test_resolve_nested() -> None: + nested, field = NestedSecret._index.resolve_nested("secrets.title") + assert nested == ["secrets"] + assert field is NestedSecret._doc_type.mapping["secrets"]["title"] + + +def test_conflicting_mapping_raises_error_in_index_to_dict() -> None: + class A(AsyncDocument): + name = field.Text() + + class B(AsyncDocument): + name = field.Keyword() + + i = Index("i") + i.document(A) + i.document(B) + + with raises(ValueError): + i.to_dict() + + +def test_ip_address_serializes_properly() -> None: + host = Host(ip=ipaddress.IPv4Address("10.0.0.1")) + + assert {"ip": "10.0.0.1"} == host.to_dict() + + +def test_matches_uses_index() -> None: + assert SimpleCommit._matches({"_index": "test-git"}) + assert not SimpleCommit._matches({"_index": "not-test-git"}) + + +def test_matches_with_no_name_always_matches() -> None: + class D(AsyncDocument): + pass + + assert D._matches({}) + assert D._matches({"_index": "whatever"}) + + +def test_matches_accepts_wildcards() -> None: + class MyDoc(AsyncDocument): + class Index: + name = "my-*" + + assert MyDoc._matches({"_index": "my-index"}) + assert not MyDoc._matches({"_index": "not-my-index"}) + + +def test_assigning_attrlist_to_field() -> None: + sc = SimpleCommit() + l = ["README", "README.rst"] + sc.files = utils.AttrList(l) + + assert sc.to_dict()["files"] is l + + +def test_optional_inner_objects_are_not_validated_if_missing() -> None: + d = OptionalObjectWithRequiredField() + + d.full_clean() + + +def test_custom_field() -> None: + s = SecretDoc(title=Secret("Hello")) + + assert {"title": "Uryyb"} == s.to_dict() + assert s.title == "Hello" + + s = SecretDoc.from_es({"_source": {"title": "Uryyb"}}) + assert s.title == "Hello" + assert isinstance(s.title, Secret) + + +def test_custom_field_mapping() -> None: + assert { + "properties": {"title": {"index": "no", "type": "text"}} + } == SecretDoc._doc_type.mapping.to_dict() + + +def test_custom_field_in_nested() -> None: + s = NestedSecret() + s.secrets.append(SecretDoc(title=Secret("Hello"))) + + assert {"secrets": [{"title": "Uryyb"}]} == s.to_dict() + assert s.secrets[0].title == "Hello" + + +def test_multi_works_after_doc_has_been_saved() -> None: + c = SimpleCommit() + c.full_clean() + c.files.append("setup.py") + + assert c.to_dict() == {"files": ["setup.py"]} + + +def test_multi_works_in_nested_after_doc_has_been_serialized() -> None: + # Issue #359 + c = DocWithNested(comments=[Comment(title="First!")]) + + assert [] == c.comments[0].tags + assert {"comments": [{"title": "First!"}]} == c.to_dict() + assert [] == c.comments[0].tags + + +def test_null_value_for_object() -> None: + d = MyDoc(inner=None) + + assert d.inner is None + + +def test_inherited_doc_types_can_override_index() -> None: + class MyDocDifferentIndex(MySubDoc): + class Index: + name = "not-default-index" + settings = {"number_of_replicas": 0} + aliases: Dict[str, Any] = {"a": {}} + analyzers = [analyzer("my_analizer", tokenizer="keyword")] + + assert MyDocDifferentIndex._index._name == "not-default-index" + assert MyDocDifferentIndex()._get_index() == "not-default-index" + assert MyDocDifferentIndex._index.to_dict() == { + "aliases": {"a": {}}, + "mappings": { + "properties": { + "created_at": {"type": "date"}, + "inner": { + "type": "object", + "properties": {"old_field": {"type": "text"}}, + }, + "name": {"type": "keyword"}, + "title": {"type": "keyword"}, + } + }, + "settings": { + "analysis": { + "analyzer": {"my_analizer": {"tokenizer": "keyword", "type": "custom"}} + }, + "number_of_replicas": 0, + }, + } + + +def test_to_dict_with_meta() -> None: + d = MySubDoc(title="hello") + d.meta.routing = "some-parent" + + assert { + "_index": "default-index", + "_routing": "some-parent", + "_source": {"title": "hello"}, + } == d.to_dict(True) + + +def test_to_dict_with_meta_includes_custom_index() -> None: + d = MySubDoc(title="hello") + d.meta.index = "other-index" + + assert {"_index": "other-index", "_source": {"title": "hello"}} == d.to_dict(True) + + +def test_to_dict_without_skip_empty_will_include_empty_fields() -> None: + d = MySubDoc(tags=[], title=None, inner={}) + + assert {} == d.to_dict() + assert {"tags": [], "title": None, "inner": {}} == d.to_dict(skip_empty=False) + + +def test_attribute_can_be_removed() -> None: + d = MyDoc(title="hello") + + del d.title + assert "title" not in d._d_ + + +def test_doc_type_can_be_correctly_pickled() -> None: + d = DocWithNested( + title="Hello World!", comments=[Comment(title="hellp")], meta={"id": 42} + ) + s = pickle.dumps(d) + + d2 = pickle.loads(s) + + assert d2 == d + assert 42 == d2.meta.id + assert "Hello World!" == d2.title + assert [{"title": "hellp"}] == d2.comments + assert isinstance(d2.comments[0], Comment) + + +def test_meta_is_accessible_even_on_empty_doc() -> None: + d = MyDoc() + d.meta + + d = MyDoc(title="aaa") + d.meta + + +def test_meta_field_mapping() -> None: + class User(AsyncDocument): + username = field.Text() + + class Meta: + all = MetaField(enabled=False) + _index = MetaField(enabled=True) + dynamic = MetaField("strict") + dynamic_templates = MetaField([42]) + + assert { + "properties": {"username": {"type": "text"}}, + "_all": {"enabled": False}, + "_index": {"enabled": True}, + "dynamic": "strict", + "dynamic_templates": [42], + } == User._doc_type.mapping.to_dict() + + +def test_multi_value_fields() -> None: + class Blog(AsyncDocument): + tags = field.Keyword(multi=True) + + b = Blog() + assert [] == b.tags + b.tags.append("search") + b.tags.append("python") + assert ["search", "python"] == b.tags + + +def test_docs_with_properties() -> None: + class User(AsyncDocument): + pwd_hash: str = field.Text() + + def check_password(self, pwd: bytes) -> bool: + return md5(pwd).hexdigest() == self.pwd_hash + + @property + def password(self) -> None: + raise AttributeError("readonly") + + @password.setter + def password(self, pwd: bytes) -> None: + self.pwd_hash = md5(pwd).hexdigest() + + u = User(pwd_hash=md5(b"secret").hexdigest()) + assert u.check_password(b"secret") + assert not u.check_password(b"not-secret") + + u.password = b"not-secret" + assert "password" not in u._d_ + assert not u.check_password(b"secret") + assert u.check_password(b"not-secret") + + with raises(AttributeError): + u.password + + +def test_nested_can_be_assigned_to() -> None: + d1 = DocWithNested(comments=[Comment(title="First!")]) + d2 = DocWithNested() + + d2.comments = d1.comments + assert isinstance(d1.comments[0], Comment) + assert d2.comments == [{"title": "First!"}] + assert {"comments": [{"title": "First!"}]} == d2.to_dict() + assert isinstance(d2.comments[0], Comment) + + +def test_nested_can_be_none() -> None: + d = DocWithNested(comments=None, title="Hello World!") + + assert {"title": "Hello World!"} == d.to_dict() + + +def test_nested_defaults_to_list_and_can_be_updated() -> None: + md = DocWithNested() + + assert [] == md.comments + + md.comments.append({"title": "hello World!"}) + assert {"comments": [{"title": "hello World!"}]} == md.to_dict() + + +def test_to_dict_is_recursive_and_can_cope_with_multi_values() -> None: + md = MyDoc(name=["a", "b", "c"]) + md.inner = [MyInner(old_field="of1"), MyInner(old_field="of2")] + + assert isinstance(md.inner[0], MyInner) + + assert { + "name": ["a", "b", "c"], + "inner": [{"old_field": "of1"}, {"old_field": "of2"}], + } == md.to_dict() + + +def test_to_dict_ignores_empty_collections() -> None: + md = MySubDoc(name="", address={}, count=0, valid=False, tags=[]) + + assert {"name": "", "count": 0, "valid": False} == md.to_dict() + + +def test_declarative_mapping_definition() -> None: + assert issubclass(MyDoc, AsyncDocument) + assert hasattr(MyDoc, "_doc_type") + assert { + "properties": { + "created_at": {"type": "date"}, + "name": {"type": "text"}, + "title": {"type": "keyword"}, + "inner": {"type": "object", "properties": {"old_field": {"type": "text"}}}, + } + } == MyDoc._doc_type.mapping.to_dict() + + +def test_you_can_supply_own_mapping_instance() -> None: + class MyD(AsyncDocument): + title = field.Text() + + class Meta: + mapping = Mapping() + mapping.meta("_all", enabled=False) + + assert { + "_all": {"enabled": False}, + "properties": {"title": {"type": "text"}}, + } == MyD._doc_type.mapping.to_dict() + + +def test_document_can_be_created_dynamically() -> None: + n = datetime.now() + md = MyDoc(title="hello") + md.name = "My Fancy Document!" + md.created_at = n + + inner = md.inner + # consistent returns + assert inner is md.inner + inner.old_field = "Already defined." + + md.inner.new_field = ["undefined", "field"] + + assert { + "title": "hello", + "name": "My Fancy Document!", + "created_at": n, + "inner": {"old_field": "Already defined.", "new_field": ["undefined", "field"]}, + } == md.to_dict() + + +def test_invalid_date_will_raise_exception() -> None: + md = MyDoc() + md.created_at = "not-a-date" + with raises(ValidationException): + md.full_clean() + + +def test_document_inheritance() -> None: + assert issubclass(MySubDoc, MyDoc) + assert issubclass(MySubDoc, AsyncDocument) + assert hasattr(MySubDoc, "_doc_type") + assert { + "properties": { + "created_at": {"type": "date"}, + "name": {"type": "keyword"}, + "title": {"type": "keyword"}, + "inner": {"type": "object", "properties": {"old_field": {"type": "text"}}}, + } + } == MySubDoc._doc_type.mapping.to_dict() + + +def test_child_class_can_override_parent() -> None: + class A(AsyncDocument): + o = field.Object(dynamic=False, properties={"a": field.Text()}) + + class B(A): + o = field.Object(dynamic="strict", properties={"b": field.Text()}) + + assert { + "properties": { + "o": { + "dynamic": "strict", + "properties": {"a": {"type": "text"}, "b": {"type": "text"}}, + "type": "object", + } + } + } == B._doc_type.mapping.to_dict() + + +def test_meta_fields_are_stored_in_meta_and_ignored_by_to_dict() -> None: + md = MySubDoc(meta={"id": 42}, name="My First doc!") + + md.meta.index = "my-index" + assert md.meta.index == "my-index" + assert md.meta.id == 42 + assert {"name": "My First doc!"} == md.to_dict() + assert {"id": 42, "index": "my-index"} == md.meta.to_dict() + + +def test_index_inheritance() -> None: + assert issubclass(MyMultiSubDoc, MySubDoc) + assert issubclass(MyMultiSubDoc, MyDoc2) + assert issubclass(MyMultiSubDoc, AsyncDocument) + assert hasattr(MyMultiSubDoc, "_doc_type") + assert hasattr(MyMultiSubDoc, "_index") + assert { + "properties": { + "created_at": {"type": "date"}, + "name": {"type": "keyword"}, + "title": {"type": "keyword"}, + "inner": {"type": "object", "properties": {"old_field": {"type": "text"}}}, + "extra": {"type": "long"}, + } + } == MyMultiSubDoc._doc_type.mapping.to_dict() + + +def test_meta_fields_can_be_set_directly_in_init() -> None: + p = object() + md = MyDoc(_id=p, title="Hello World!") + + assert md.meta.id is p + + +@pytest.mark.asyncio +async def test_save_no_index(async_mock_client: Any) -> None: + md = MyDoc() + with raises(ValidationException): + await md.save(using="mock") + + +@pytest.mark.asyncio +async def test_delete_no_index(async_mock_client: Any) -> None: + md = MyDoc() + with raises(ValidationException): + await md.delete(using="mock") + + +@pytest.mark.asyncio +async def test_update_no_fields() -> None: + md = MyDoc() + with raises(IllegalOperation): + await md.update() + + +def test_search_with_custom_alias_and_index() -> None: + search_object = MyDoc.search( + using="staging", index=["custom_index1", "custom_index2"] + ) + + assert search_object._using == "staging" + assert search_object._index == ["custom_index1", "custom_index2"] + + +def test_from_es_respects_underscored_non_meta_fields() -> None: + doc = { + "_index": "test-index", + "_id": "elasticsearch", + "_score": 12.0, + "fields": {"hello": "world", "_routing": "es", "_tags": ["search"]}, + "_source": { + "city": "Amsterdam", + "name": "Elasticsearch", + "_tagline": "You know, for search", + }, + } + + class Company(AsyncDocument): + class Index: + name = "test-company" + + c = Company.from_es(doc) + + assert c.meta.fields._tags == ["search"] + assert c.meta.fields._routing == "es" + assert c._tagline == "You know, for search" + + +def test_nested_and_object_inner_doc() -> None: + class MySubDocWithNested(MyDoc): + nested_inner = field.Nested(MyInner) + + props = MySubDocWithNested._doc_type.mapping.to_dict()["properties"] + assert props == { + "created_at": {"type": "date"}, + "inner": {"properties": {"old_field": {"type": "text"}}, "type": "object"}, + "name": {"type": "text"}, + "nested_inner": { + "properties": {"old_field": {"type": "text"}}, + "type": "nested", + }, + "title": {"type": "keyword"}, + } + + +def test_doc_with_type_hints() -> None: + class TypedInnerDoc(InnerDoc): + st: M[str] + dt: M[Optional[datetime]] + li: M[List[int]] + + class TypedDoc(AsyncDocument): + st: str + dt: Optional[datetime] + li: List[int] + ob: TypedInnerDoc + ns: List[TypedInnerDoc] + ip: Optional[str] = field.Ip() + k1: str = field.Keyword(required=True) + k2: M[str] = field.Keyword() + k3: str = mapped_field(field.Keyword(), default="foo") + k4: M[Optional[str]] = mapped_field(field.Keyword()) # type: ignore[misc] + s1: Secret = SecretField() + s2: M[Secret] = SecretField() + s3: Secret = mapped_field(SecretField()) # type: ignore[misc] + s4: M[Optional[Secret]] = mapped_field( + SecretField(), default_factory=lambda: "foo" + ) + i1: ClassVar + i2: ClassVar[int] + + props = TypedDoc._doc_type.mapping.to_dict()["properties"] + assert props == { + "st": {"type": "text"}, + "dt": {"type": "date"}, + "li": {"type": "integer"}, + "ob": { + "type": "object", + "properties": { + "st": {"type": "text"}, + "dt": {"type": "date"}, + "li": {"type": "integer"}, + }, + }, + "ns": { + "type": "nested", + "properties": { + "st": {"type": "text"}, + "dt": {"type": "date"}, + "li": {"type": "integer"}, + }, + }, + "ip": {"type": "ip"}, + "k1": {"type": "keyword"}, + "k2": {"type": "keyword"}, + "k3": {"type": "keyword"}, + "k4": {"type": "keyword"}, + "s1": {"type": "text"}, + "s2": {"type": "text"}, + "s3": {"type": "text"}, + "s4": {"type": "text"}, + } + + TypedDoc.i1 = "foo" + TypedDoc.i2 = 123 + + doc = TypedDoc() + assert doc.k3 == "foo" + assert doc.s4 == "foo" + with raises(ValidationException) as exc_info: + doc.full_clean() + assert set(exc_info.value.args[0].keys()) == { + "st", + "k1", + "k2", + "ob", + "s1", + "s2", + "s3", + } + + assert TypedDoc.i1 == "foo" + assert TypedDoc.i2 == 123 + + doc.st = "s" + doc.li = [1, 2, 3] + doc.k1 = "k1" + doc.k2 = "k2" + doc.ob.st = "s" + doc.ob.li = [1] + doc.s1 = "s1" + doc.s2 = "s2" + doc.s3 = "s3" + doc.full_clean() + + doc.ob = TypedInnerDoc(li=[1]) + with raises(ValidationException) as exc_info: + doc.full_clean() + assert set(exc_info.value.args[0].keys()) == {"ob"} + assert set(exc_info.value.args[0]["ob"][0].args[0].keys()) == {"st"} + + doc.ob.st = "s" + doc.ns.append(TypedInnerDoc(li=[1, 2])) + with raises(ValidationException) as exc_info: + doc.full_clean() + + doc.ns[0].st = "s" + doc.full_clean() + + doc.ip = "1.2.3.4" + n = datetime.now() + doc.dt = n + assert doc.to_dict() == { + "st": "s", + "li": [1, 2, 3], + "dt": n, + "ob": { + "st": "s", + "li": [1], + }, + "ns": [ + { + "st": "s", + "li": [1, 2], + } + ], + "ip": "1.2.3.4", + "k1": "k1", + "k2": "k2", + "k3": "foo", + "s1": "s1", + "s2": "s2", + "s3": "s3", + "s4": "foo", + } + + s = TypedDoc.search().sort(TypedDoc.st, -TypedDoc.dt, +TypedDoc.ob.st) + s.aggs.bucket("terms_agg", "terms", field=TypedDoc.k1) + assert s.to_dict() == { + "aggs": {"terms_agg": {"terms": {"field": "k1"}}}, + "sort": ["st", {"dt": {"order": "desc"}}, "ob.st"], + } + + +@pytest.mark.skipif(sys.version_info < (3, 10), reason="requires Python 3.10") +def test_doc_with_pipe_type_hints() -> None: + with pytest.raises(TypeError): + + class BadlyTypedDoc(AsyncDocument): + s: str + f: str | int | None # type: ignore[syntax] + + class TypedDoc(AsyncDocument): + s: str + f1: str | None # type: ignore[syntax] + f2: M[int | None] # type: ignore[syntax] + f3: M[datetime | None] # type: ignore[syntax] + + props = TypedDoc._doc_type.mapping.to_dict()["properties"] + assert props == { + "s": {"type": "text"}, + "f1": {"type": "text"}, + "f2": {"type": "integer"}, + "f3": {"type": "date"}, + } + + doc = TypedDoc() + with raises(ValidationException) as exc_info: + doc.full_clean() + assert set(exc_info.value.args[0].keys()) == {"s"} + doc.s = "s" + doc.full_clean() + + +def test_instrumented_field() -> None: + class Child(InnerDoc): + st: M[str] + + class Doc(AsyncDocument): + st: str + ob: Child + ns: List[Child] + + doc = Doc( + st="foo", + ob=Child(st="bar"), + ns=[ + Child(st="baz"), + Child(st="qux"), + ], + ) + + assert type(doc.st) is str + assert doc.st == "foo" + + assert type(doc.ob) is Child + assert doc.ob.st == "bar" + + assert type(doc.ns) is utils.AttrList + assert doc.ns[0].st == "baz" + assert doc.ns[1].st == "qux" + assert type(doc.ns[0]) is Child + assert type(doc.ns[1]) is Child + + assert type(Doc.st) is InstrumentedField + assert str(Doc.st) == "st" + assert +Doc.st == "st" + assert -Doc.st == "-st" + assert Doc.st.to_dict() == {"type": "text"} + with raises(AttributeError): + Doc.st.something + + assert type(Doc.ob) is InstrumentedField + assert str(Doc.ob) == "ob" + assert str(Doc.ob.st) == "ob.st" + assert +Doc.ob.st == "ob.st" + assert -Doc.ob.st == "-ob.st" + assert Doc.ob.st.to_dict() == {"type": "text"} + with raises(AttributeError): + Doc.ob.something + with raises(AttributeError): + Doc.ob.st.something + + assert type(Doc.ns) is InstrumentedField + assert str(Doc.ns) == "ns" + assert str(Doc.ns.st) == "ns.st" + assert +Doc.ns.st == "ns.st" + assert -Doc.ns.st == "-ns.st" + assert Doc.ns.st.to_dict() == {"type": "text"} + with raises(AttributeError): + Doc.ns.something + with raises(AttributeError): + Doc.ns.st.something diff --git a/test_elasticsearch/test_dsl/_async/test_faceted_search.py b/test_elasticsearch/test_dsl/_async/test_faceted_search.py new file mode 100644 index 000000000..e3bd30850 --- /dev/null +++ b/test_elasticsearch/test_dsl/_async/test_faceted_search.py @@ -0,0 +1,201 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from datetime import datetime + +import pytest + +from elasticsearch.dsl.faceted_search import ( + AsyncFacetedSearch, + DateHistogramFacet, + TermsFacet, +) + + +class BlogSearch(AsyncFacetedSearch): + doc_types = ["user", "post"] + fields = [ + "title^5", + "body", + ] + + facets = { + "category": TermsFacet(field="category.raw"), + "tags": TermsFacet(field="tags"), + } + + +def test_query_is_created_properly() -> None: + bs = BlogSearch("python search") + s = bs.build_search() + + assert s._doc_type == ["user", "post"] + assert { + "aggs": { + "_filter_tags": { + "filter": {"match_all": {}}, + "aggs": {"tags": {"terms": {"field": "tags"}}}, + }, + "_filter_category": { + "filter": {"match_all": {}}, + "aggs": {"category": {"terms": {"field": "category.raw"}}}, + }, + }, + "query": { + "multi_match": {"fields": ["title^5", "body"], "query": "python search"} + }, + "highlight": {"fields": {"body": {}, "title": {}}}, + } == s.to_dict() + + +def test_query_is_created_properly_with_sort_tuple() -> None: + bs = BlogSearch("python search", sort=("category", "-title")) + s = bs.build_search() + + assert s._doc_type == ["user", "post"] + assert { + "aggs": { + "_filter_tags": { + "filter": {"match_all": {}}, + "aggs": {"tags": {"terms": {"field": "tags"}}}, + }, + "_filter_category": { + "filter": {"match_all": {}}, + "aggs": {"category": {"terms": {"field": "category.raw"}}}, + }, + }, + "query": { + "multi_match": {"fields": ["title^5", "body"], "query": "python search"} + }, + "highlight": {"fields": {"body": {}, "title": {}}}, + "sort": ["category", {"title": {"order": "desc"}}], + } == s.to_dict() + + +def test_filter_is_applied_to_search_but_not_relevant_facet() -> None: + bs = BlogSearch("python search", filters={"category": "elastic"}) + s = bs.build_search() + + assert { + "aggs": { + "_filter_tags": { + "filter": {"terms": {"category.raw": ["elastic"]}}, + "aggs": {"tags": {"terms": {"field": "tags"}}}, + }, + "_filter_category": { + "filter": {"match_all": {}}, + "aggs": {"category": {"terms": {"field": "category.raw"}}}, + }, + }, + "post_filter": {"terms": {"category.raw": ["elastic"]}}, + "query": { + "multi_match": {"fields": ["title^5", "body"], "query": "python search"} + }, + "highlight": {"fields": {"body": {}, "title": {}}}, + } == s.to_dict() + + +def test_filters_are_applied_to_search_ant_relevant_facets() -> None: + bs = BlogSearch( + "python search", filters={"category": "elastic", "tags": ["python", "django"]} + ) + s = bs.build_search() + + d = s.to_dict() + + # we need to test post_filter without relying on order + f = d["post_filter"]["bool"].pop("must") + assert len(f) == 2 + assert {"terms": {"category.raw": ["elastic"]}} in f + assert {"terms": {"tags": ["python", "django"]}} in f + + assert { + "aggs": { + "_filter_tags": { + "filter": {"terms": {"category.raw": ["elastic"]}}, + "aggs": {"tags": {"terms": {"field": "tags"}}}, + }, + "_filter_category": { + "filter": {"terms": {"tags": ["python", "django"]}}, + "aggs": {"category": {"terms": {"field": "category.raw"}}}, + }, + }, + "query": { + "multi_match": {"fields": ["title^5", "body"], "query": "python search"} + }, + "post_filter": {"bool": {}}, + "highlight": {"fields": {"body": {}, "title": {}}}, + } == d + + +def test_date_histogram_facet_with_1970_01_01_date() -> None: + dhf = DateHistogramFacet() + assert dhf.get_value({"key": None}) == datetime(1970, 1, 1, 0, 0) # type: ignore[arg-type] + assert dhf.get_value({"key": 0}) == datetime(1970, 1, 1, 0, 0) # type: ignore[arg-type] + + +@pytest.mark.parametrize( + ["interval_type", "interval"], + [ + ("interval", "year"), + ("calendar_interval", "year"), + ("interval", "month"), + ("calendar_interval", "month"), + ("interval", "week"), + ("calendar_interval", "week"), + ("interval", "day"), + ("calendar_interval", "day"), + ("fixed_interval", "day"), + ("interval", "hour"), + ("fixed_interval", "hour"), + ("interval", "1Y"), + ("calendar_interval", "1Y"), + ("interval", "1M"), + ("calendar_interval", "1M"), + ("interval", "1w"), + ("calendar_interval", "1w"), + ("interval", "1d"), + ("calendar_interval", "1d"), + ("fixed_interval", "1d"), + ("interval", "1h"), + ("fixed_interval", "1h"), + ], +) +def test_date_histogram_interval_types(interval_type: str, interval: str) -> None: + dhf = DateHistogramFacet(field="@timestamp", **{interval_type: interval}) + assert dhf.get_aggregation().to_dict() == { + "date_histogram": { + "field": "@timestamp", + interval_type: interval, + "min_doc_count": 0, + } + } + dhf.get_value_filter(datetime.now()) + + +def test_date_histogram_no_interval_keyerror() -> None: + dhf = DateHistogramFacet(field="@timestamp") + with pytest.raises(KeyError) as e: + dhf.get_value_filter(datetime.now()) + assert str(e.value) == "'interval'" + + +def test_params_added_to_search() -> None: + bs = BlogSearch("python search") + assert bs._s._params == {} + bs.params(routing="42") + assert bs._s._params == {"routing": "42"} diff --git a/test_elasticsearch/test_dsl/_async/test_index.py b/test_elasticsearch/test_dsl/_async/test_index.py new file mode 100644 index 000000000..624bab79a --- /dev/null +++ b/test_elasticsearch/test_dsl/_async/test_index.py @@ -0,0 +1,197 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import string +from random import choice +from typing import Any, Dict + +import pytest +from pytest import raises + +from elasticsearch.dsl import ( + AsyncDocument, + AsyncIndex, + AsyncIndexTemplate, + Date, + Text, + analyzer, +) + + +class Post(AsyncDocument): + title = Text() + published_from = Date() + + +def test_multiple_doc_types_will_combine_mappings() -> None: + class User(AsyncDocument): + username = Text() + + i = AsyncIndex("i") + i.document(Post) + i.document(User) + assert { + "mappings": { + "properties": { + "title": {"type": "text"}, + "username": {"type": "text"}, + "published_from": {"type": "date"}, + } + } + } == i.to_dict() + + +def test_search_is_limited_to_index_name() -> None: + i = AsyncIndex("my-index") + s = i.search() + + assert s._index == ["my-index"] + + +def test_cloned_index_has_copied_settings_and_using() -> None: + client = object() + i = AsyncIndex("my-index", using=client) # type: ignore[arg-type] + i.settings(number_of_shards=1) + + i2 = i.clone("my-other-index") + + assert "my-other-index" == i2._name + assert client is i2._using + assert i._settings == i2._settings + assert i._settings is not i2._settings + + +def test_cloned_index_has_analysis_attribute() -> None: + """ + Regression test for Issue #582 in which `AsyncIndex.clone()` was not copying + over the `_analysis` attribute. + """ + client = object() + i = AsyncIndex("my-index", using=client) # type: ignore[arg-type] + + random_analyzer_name = "".join(choice(string.ascii_letters) for _ in range(100)) + random_analyzer = analyzer( + random_analyzer_name, tokenizer="standard", filter="standard" + ) + + i.analyzer(random_analyzer) + + i2 = i.clone("my-clone-index") + + assert i.to_dict()["settings"]["analysis"] == i2.to_dict()["settings"]["analysis"] + + +def test_settings_are_saved() -> None: + i = AsyncIndex("i") + i.settings(number_of_replicas=0) + i.settings(number_of_shards=1) + + assert {"settings": {"number_of_shards": 1, "number_of_replicas": 0}} == i.to_dict() + + +def test_registered_doc_type_included_in_to_dict() -> None: + i = AsyncIndex("i", using="alias") + i.document(Post) + + assert { + "mappings": { + "properties": { + "title": {"type": "text"}, + "published_from": {"type": "date"}, + } + } + } == i.to_dict() + + +def test_registered_doc_type_included_in_search() -> None: + i = AsyncIndex("i", using="alias") + i.document(Post) + + s = i.search() + + assert s._doc_type == [Post] + + +def test_aliases_add_to_object() -> None: + random_alias = "".join(choice(string.ascii_letters) for _ in range(100)) + alias_dict: Dict[str, Any] = {random_alias: {}} + + index = AsyncIndex("i", using="alias") + index.aliases(**alias_dict) + + assert index._aliases == alias_dict + + +def test_aliases_returned_from_to_dict() -> None: + random_alias = "".join(choice(string.ascii_letters) for _ in range(100)) + alias_dict: Dict[str, Any] = {random_alias: {}} + + index = AsyncIndex("i", using="alias") + index.aliases(**alias_dict) + + assert index._aliases == index.to_dict()["aliases"] == alias_dict + + +def test_analyzers_added_to_object() -> None: + random_analyzer_name = "".join(choice(string.ascii_letters) for _ in range(100)) + random_analyzer = analyzer( + random_analyzer_name, tokenizer="standard", filter="standard" + ) + + index = AsyncIndex("i", using="alias") + index.analyzer(random_analyzer) + + assert index._analysis["analyzer"][random_analyzer_name] == { + "filter": ["standard"], + "type": "custom", + "tokenizer": "standard", + } + + +def test_analyzers_returned_from_to_dict() -> None: + random_analyzer_name = "".join(choice(string.ascii_letters) for _ in range(100)) + random_analyzer = analyzer( + random_analyzer_name, tokenizer="standard", filter="standard" + ) + index = AsyncIndex("i", using="alias") + index.analyzer(random_analyzer) + + assert index.to_dict()["settings"]["analysis"]["analyzer"][ + random_analyzer_name + ] == {"filter": ["standard"], "type": "custom", "tokenizer": "standard"} + + +def test_conflicting_analyzer_raises_error() -> None: + i = AsyncIndex("i") + i.analyzer("my_analyzer", tokenizer="whitespace", filter=["lowercase", "stop"]) + + with raises(ValueError): + i.analyzer("my_analyzer", tokenizer="keyword", filter=["lowercase", "stop"]) + + +def test_index_template_can_have_order() -> None: + i = AsyncIndex("i-*") + it = i.as_template("i", order=2) + + assert {"index_patterns": ["i-*"], "order": 2} == it.to_dict() + + +@pytest.mark.asyncio +async def test_index_template_save_result(async_mock_client: Any) -> None: + it = AsyncIndexTemplate("test-template", "test-*") + + assert await it.save(using="mock") == await async_mock_client.indices.put_template() diff --git a/test_elasticsearch/test_dsl/_async/test_mapping.py b/test_elasticsearch/test_dsl/_async/test_mapping.py new file mode 100644 index 000000000..93da49fae --- /dev/null +++ b/test_elasticsearch/test_dsl/_async/test_mapping.py @@ -0,0 +1,222 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import json + +from elasticsearch.dsl import AsyncMapping, Keyword, Nested, Text, analysis + + +def test_mapping_can_has_fields() -> None: + m = AsyncMapping() + m.field("name", "text").field("tags", "keyword") + + assert { + "properties": {"name": {"type": "text"}, "tags": {"type": "keyword"}} + } == m.to_dict() + + +def test_mapping_update_is_recursive() -> None: + m1 = AsyncMapping() + m1.field("title", "text") + m1.field("author", "object") + m1.field("author", "object", properties={"name": {"type": "text"}}) + m1.meta("_all", enabled=False) + m1.meta("dynamic", False) + + m2 = AsyncMapping() + m2.field("published_from", "date") + m2.field("author", "object", properties={"email": {"type": "text"}}) + m2.field("title", "text") + m2.field("lang", "keyword") + m2.meta("_analyzer", path="lang") + + m1.update(m2, update_only=True) + + assert { + "_all": {"enabled": False}, + "_analyzer": {"path": "lang"}, + "dynamic": False, + "properties": { + "published_from": {"type": "date"}, + "title": {"type": "text"}, + "lang": {"type": "keyword"}, + "author": { + "type": "object", + "properties": {"name": {"type": "text"}, "email": {"type": "text"}}, + }, + }, + } == m1.to_dict() + + +def test_properties_can_iterate_over_all_the_fields() -> None: + m = AsyncMapping() + m.field("f1", "text", test_attr="f1", fields={"f2": Keyword(test_attr="f2")}) + m.field("f3", Nested(test_attr="f3", properties={"f4": Text(test_attr="f4")})) + + assert {"f1", "f2", "f3", "f4"} == { + f.test_attr for f in m.properties._collect_fields() + } + + +def test_mapping_can_collect_all_analyzers_and_normalizers() -> None: + a1 = analysis.analyzer( + "my_analyzer1", + tokenizer="keyword", + filter=[ + "lowercase", + analysis.token_filter("my_filter1", "stop", stopwords=["a", "b"]), + ], + ) + a2 = analysis.analyzer("english") + a3 = analysis.analyzer("unknown_custom") + a4 = analysis.analyzer( + "my_analyzer2", + tokenizer=analysis.tokenizer("trigram", "nGram", min_gram=3, max_gram=3), + filter=[analysis.token_filter("my_filter2", "stop", stopwords=["c", "d"])], + ) + a5 = analysis.analyzer("my_analyzer3", tokenizer="keyword") + n1 = analysis.normalizer("my_normalizer1", filter=["lowercase"]) + n2 = analysis.normalizer( + "my_normalizer2", + filter=[ + "my_filter1", + "my_filter2", + analysis.token_filter("my_filter3", "stop", stopwords=["e", "f"]), + ], + ) + n3 = analysis.normalizer("unknown_custom") + + m = AsyncMapping() + m.field( + "title", + "text", + analyzer=a1, + fields={"english": Text(analyzer=a2), "unknown": Keyword(search_analyzer=a3)}, + ) + m.field("comments", Nested(properties={"author": Text(analyzer=a4)})) + m.field("normalized_title", "keyword", normalizer=n1) + m.field("normalized_comment", "keyword", normalizer=n2) + m.field("unknown", "keyword", normalizer=n3) + m.meta("_all", analyzer=a5) + + assert { + "analyzer": { + "my_analyzer1": { + "filter": ["lowercase", "my_filter1"], + "tokenizer": "keyword", + "type": "custom", + }, + "my_analyzer2": { + "filter": ["my_filter2"], + "tokenizer": "trigram", + "type": "custom", + }, + "my_analyzer3": {"tokenizer": "keyword", "type": "custom"}, + }, + "normalizer": { + "my_normalizer1": {"filter": ["lowercase"], "type": "custom"}, + "my_normalizer2": { + "filter": ["my_filter1", "my_filter2", "my_filter3"], + "type": "custom", + }, + }, + "filter": { + "my_filter1": {"stopwords": ["a", "b"], "type": "stop"}, + "my_filter2": {"stopwords": ["c", "d"], "type": "stop"}, + "my_filter3": {"stopwords": ["e", "f"], "type": "stop"}, + }, + "tokenizer": {"trigram": {"max_gram": 3, "min_gram": 3, "type": "nGram"}}, + } == m._collect_analysis() + + assert json.loads(json.dumps(m.to_dict())) == m.to_dict() + + +def test_mapping_can_collect_multiple_analyzers() -> None: + a1 = analysis.analyzer( + "my_analyzer1", + tokenizer="keyword", + filter=[ + "lowercase", + analysis.token_filter("my_filter1", "stop", stopwords=["a", "b"]), + ], + ) + a2 = analysis.analyzer( + "my_analyzer2", + tokenizer=analysis.tokenizer("trigram", "nGram", min_gram=3, max_gram=3), + filter=[analysis.token_filter("my_filter2", "stop", stopwords=["c", "d"])], + ) + m = AsyncMapping() + m.field("title", "text", analyzer=a1, search_analyzer=a2) + m.field( + "text", + "text", + analyzer=a1, + fields={ + "english": Text(analyzer=a1), + "unknown": Keyword(analyzer=a1, search_analyzer=a2), + }, + ) + assert { + "analyzer": { + "my_analyzer1": { + "filter": ["lowercase", "my_filter1"], + "tokenizer": "keyword", + "type": "custom", + }, + "my_analyzer2": { + "filter": ["my_filter2"], + "tokenizer": "trigram", + "type": "custom", + }, + }, + "filter": { + "my_filter1": {"stopwords": ["a", "b"], "type": "stop"}, + "my_filter2": {"stopwords": ["c", "d"], "type": "stop"}, + }, + "tokenizer": {"trigram": {"max_gram": 3, "min_gram": 3, "type": "nGram"}}, + } == m._collect_analysis() + + +def test_even_non_custom_analyzers_can_have_params() -> None: + a1 = analysis.analyzer("whitespace", type="pattern", pattern=r"\\s+") + m = AsyncMapping() + m.field("title", "text", analyzer=a1) + + assert { + "analyzer": {"whitespace": {"type": "pattern", "pattern": r"\\s+"}} + } == m._collect_analysis() + + +def test_resolve_field_can_resolve_multifields() -> None: + m = AsyncMapping() + m.field("title", "text", fields={"keyword": Keyword()}) + + assert isinstance(m.resolve_field("title.keyword"), Keyword) + + +def test_resolve_nested() -> None: + m = AsyncMapping() + m.field("n1", "nested", properties={"n2": Nested(properties={"k1": Keyword()})}) + m.field("k2", "keyword") + + nested, field = m.resolve_nested("n1.n2.k1") + assert nested == ["n1", "n1.n2"] + assert isinstance(field, Keyword) + + nested, field = m.resolve_nested("k2") + assert nested == [] + assert isinstance(field, Keyword) diff --git a/test_elasticsearch/test_dsl/_async/test_search.py b/test_elasticsearch/test_dsl/_async/test_search.py new file mode 100644 index 000000000..a00ddf448 --- /dev/null +++ b/test_elasticsearch/test_dsl/_async/test_search.py @@ -0,0 +1,841 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from copy import deepcopy +from typing import Any + +import pytest +from pytest import raises + +from elasticsearch.dsl import ( + AsyncEmptySearch, + AsyncSearch, + Document, + Q, + query, + types, + wrappers, +) +from elasticsearch.dsl.exceptions import IllegalOperation + + +def test_expand__to_dot_is_respected() -> None: + s = AsyncSearch().query("match", a__b=42, _expand__to_dot=False) + + assert {"query": {"match": {"a__b": 42}}} == s.to_dict() + + +@pytest.mark.asyncio +async def test_execute_uses_cache() -> None: + s = AsyncSearch() + r = object() + s._response = r # type: ignore[assignment] + + assert r is await s.execute() + + +@pytest.mark.asyncio +async def test_cache_can_be_ignored(async_mock_client: Any) -> None: + s = AsyncSearch(using="mock") + r = object() + s._response = r # type: ignore[assignment] + await s.execute(ignore_cache=True) + + async_mock_client.search.assert_awaited_once_with(index=None, body={}) + + +@pytest.mark.asyncio +async def test_iter_iterates_over_hits() -> None: + s = AsyncSearch() + s._response = [1, 2, 3] # type: ignore[assignment] + + assert [1, 2, 3] == [hit async for hit in s] + + +def test_cache_isnt_cloned() -> None: + s = AsyncSearch() + s._response = object() # type: ignore[assignment] + + assert not hasattr(s._clone(), "_response") + + +def test_search_starts_with_no_query() -> None: + s = AsyncSearch() + + assert s.query._proxied is None + + +def test_search_query_combines_query() -> None: + s = AsyncSearch() + + s2 = s.query("match", f=42) + assert s2.query._proxied == query.Match(f=42) + assert s.query._proxied is None + + s3 = s2.query("match", f=43) + assert s2.query._proxied == query.Match(f=42) + assert s3.query._proxied == query.Bool(must=[query.Match(f=42), query.Match(f=43)]) + + +def test_query_can_be_assigned_to() -> None: + s = AsyncSearch() + + q = Q("match", title="python") + s.query = q # type: ignore + + assert s.query._proxied is q + + +def test_query_can_be_wrapped() -> None: + s = AsyncSearch().query("match", title="python") + + s.query = Q("function_score", query=s.query, field_value_factor={"field": "rating"}) # type: ignore + + assert { + "query": { + "function_score": { + "functions": [{"field_value_factor": {"field": "rating"}}], + "query": {"match": {"title": "python"}}, + } + } + } == s.to_dict() + + +def test_using() -> None: + o = object() + o2 = object() + s = AsyncSearch(using=o) + assert s._using is o + s2 = s.using(o2) # type: ignore[arg-type] + assert s._using is o + assert s2._using is o2 + + +def test_methods_are_proxied_to_the_query() -> None: + s = AsyncSearch().query("match_all") + + assert s.query.to_dict() == {"match_all": {}} + + +def test_query_always_returns_search() -> None: + s = AsyncSearch() + + assert isinstance(s.query("match", f=42), AsyncSearch) + + +def test_source_copied_on_clone() -> None: + s = AsyncSearch().source(False) + assert s._clone()._source == s._source + assert s._clone()._source is False + + s2 = AsyncSearch().source([]) + assert s2._clone()._source == s2._source + assert s2._source == [] + + s3 = AsyncSearch().source(["some", "fields"]) + assert s3._clone()._source == s3._source + assert s3._clone()._source == ["some", "fields"] + + +def test_copy_clones() -> None: + from copy import copy + + s1 = AsyncSearch().source(["some", "fields"]) + s2 = copy(s1) + + assert s1 == s2 + assert s1 is not s2 + + +def test_aggs_allow_two_metric() -> None: + s = AsyncSearch() + + s.aggs.metric("a", "max", field="a").metric("b", "max", field="b") + + assert s.to_dict() == { + "aggs": {"a": {"max": {"field": "a"}}, "b": {"max": {"field": "b"}}} + } + + +def test_aggs_get_copied_on_change() -> None: + s = AsyncSearch().query("match_all") + s.aggs.bucket("per_tag", "terms", field="f").metric( + "max_score", "max", field="score" + ) + + s2 = s.query("match_all") + s2.aggs.bucket("per_month", "date_histogram", field="date", interval="month") + s3 = s2.query("match_all") + s3.aggs["per_month"].metric("max_score", "max", field="score") + s4 = s3._clone() + s4.aggs.metric("max_score", "max", field="score") + + d: Any = { + "query": {"match_all": {}}, + "aggs": { + "per_tag": { + "terms": {"field": "f"}, + "aggs": {"max_score": {"max": {"field": "score"}}}, + } + }, + } + + assert d == s.to_dict() + d["aggs"]["per_month"] = {"date_histogram": {"field": "date", "interval": "month"}} + assert d == s2.to_dict() + d["aggs"]["per_month"]["aggs"] = {"max_score": {"max": {"field": "score"}}} + assert d == s3.to_dict() + d["aggs"]["max_score"] = {"max": {"field": "score"}} + assert d == s4.to_dict() + + +def test_search_index() -> None: + s = AsyncSearch(index="i") + assert s._index == ["i"] + s = s.index("i2") + assert s._index == ["i", "i2"] + s = s.index("i3") + assert s._index == ["i", "i2", "i3"] + s = s.index() + assert s._index is None + s = AsyncSearch(index=("i", "i2")) + assert s._index == ["i", "i2"] + s = AsyncSearch(index=["i", "i2"]) + assert s._index == ["i", "i2"] + s = AsyncSearch() + s = s.index("i", "i2") + assert s._index == ["i", "i2"] + s2 = s.index("i3") + assert s._index == ["i", "i2"] + assert s2._index == ["i", "i2", "i3"] + s = AsyncSearch() + s = s.index(["i", "i2"], "i3") + assert s._index == ["i", "i2", "i3"] + s2 = s.index("i4") + assert s._index == ["i", "i2", "i3"] + assert s2._index == ["i", "i2", "i3", "i4"] + s2 = s.index(["i4"]) + assert s2._index == ["i", "i2", "i3", "i4"] + s2 = s.index(("i4", "i5")) + assert s2._index == ["i", "i2", "i3", "i4", "i5"] + + +def test_doc_type_document_class() -> None: + class MyDocument(Document): + pass + + s = AsyncSearch(doc_type=MyDocument) + assert s._doc_type == [MyDocument] + assert s._doc_type_map == {} + + s = AsyncSearch().doc_type(MyDocument) + assert s._doc_type == [MyDocument] + assert s._doc_type_map == {} + + +def test_knn() -> None: + s = AsyncSearch() + + with raises(TypeError): + s.knn() # type: ignore[call-arg] + with raises(TypeError): + s.knn("field") # type: ignore[call-arg] + with raises(TypeError): + s.knn("field", 5) # type: ignore[call-arg] + with raises(ValueError): + s.knn("field", 5, 100) + with raises(ValueError): + s.knn("field", 5, 100, query_vector=[1, 2, 3], query_vector_builder={}) + + s = s.knn("field", 5, 100, query_vector=[1, 2, 3]) + assert { + "knn": { + "field": "field", + "k": 5, + "num_candidates": 100, + "query_vector": [1, 2, 3], + } + } == s.to_dict() + + s = s.knn( + k=4, + num_candidates=40, + boost=0.8, + field="name", + query_vector_builder={ + "text_embedding": {"model_id": "foo", "model_text": "search text"} + }, + inner_hits={"size": 1}, + ) + assert { + "knn": [ + { + "field": "field", + "k": 5, + "num_candidates": 100, + "query_vector": [1, 2, 3], + }, + { + "field": "name", + "k": 4, + "num_candidates": 40, + "query_vector_builder": { + "text_embedding": {"model_id": "foo", "model_text": "search text"} + }, + "boost": 0.8, + "inner_hits": {"size": 1}, + }, + ] + } == s.to_dict() + + +def test_rank() -> None: + s = AsyncSearch() + s.rank(rrf=False) + assert {} == s.to_dict() + + s = s.rank(rrf=True) + assert {"rank": {"rrf": {}}} == s.to_dict() + + s = s.rank(rrf={"window_size": 50, "rank_constant": 20}) + assert {"rank": {"rrf": {"window_size": 50, "rank_constant": 20}}} == s.to_dict() + + +def test_sort() -> None: + s = AsyncSearch() + s = s.sort("fielda", "-fieldb") + + assert ["fielda", {"fieldb": {"order": "desc"}}] == s._sort + assert {"sort": ["fielda", {"fieldb": {"order": "desc"}}]} == s.to_dict() + + s = s.sort() + assert [] == s._sort + assert AsyncSearch().to_dict() == s.to_dict() + + +def test_sort_by_score() -> None: + s = AsyncSearch() + s = s.sort("_score") + assert {"sort": ["_score"]} == s.to_dict() + + s = AsyncSearch() + with raises(IllegalOperation): + s.sort("-_score") + + +def test_collapse() -> None: + s = AsyncSearch() + + inner_hits = {"name": "most_recent", "size": 5, "sort": [{"@timestamp": "desc"}]} + s = s.collapse("user.id", inner_hits=inner_hits, max_concurrent_group_searches=4) + + assert { + "field": "user.id", + "inner_hits": { + "name": "most_recent", + "size": 5, + "sort": [{"@timestamp": "desc"}], + }, + "max_concurrent_group_searches": 4, + } == s._collapse + assert { + "collapse": { + "field": "user.id", + "inner_hits": { + "name": "most_recent", + "size": 5, + "sort": [{"@timestamp": "desc"}], + }, + "max_concurrent_group_searches": 4, + } + } == s.to_dict() + + s = s.collapse() + assert {} == s._collapse + assert AsyncSearch().to_dict() == s.to_dict() + + +def test_slice() -> None: + s = AsyncSearch() + assert {"from": 3, "size": 7} == s[3:10].to_dict() + assert {"size": 5} == s[:5].to_dict() + assert {"from": 3} == s[3:].to_dict() + assert {"from": 0, "size": 0} == s[0:0].to_dict() + assert {"from": 20, "size": 0} == s[20:0].to_dict() + assert {"from": 10, "size": 5} == s[10:][:5].to_dict() + assert {"from": 10, "size": 0} == s[:5][10:].to_dict() + assert {"size": 10} == s[:10][:40].to_dict() + assert {"size": 10} == s[:40][:10].to_dict() + assert {"size": 40} == s[:40][:80].to_dict() + assert {"from": 12, "size": 0} == s[:5][10:][2:].to_dict() + assert {"from": 15, "size": 0} == s[10:][:5][5:].to_dict() + assert {} == s[:].to_dict() + with raises(ValueError): + s[-1:] + with raises(ValueError): + s[4:-1] + with raises(ValueError): + s[-3:-2] + + +def test_index() -> None: + s = AsyncSearch() + assert {"from": 3, "size": 1} == s[3].to_dict() + assert {"from": 3, "size": 1} == s[3][0].to_dict() + assert {"from": 8, "size": 0} == s[3][5].to_dict() + assert {"from": 4, "size": 1} == s[3:10][1].to_dict() + with raises(ValueError): + s[-3] + + +def test_search_to_dict() -> None: + s = AsyncSearch() + assert {} == s.to_dict() + + s = s.query("match", f=42) + assert {"query": {"match": {"f": 42}}} == s.to_dict() + + assert {"query": {"match": {"f": 42}}, "size": 10} == s.to_dict(size=10) + + s.aggs.bucket("per_tag", "terms", field="f").metric( + "max_score", "max", field="score" + ) + d = { + "aggs": { + "per_tag": { + "terms": {"field": "f"}, + "aggs": {"max_score": {"max": {"field": "score"}}}, + } + }, + "query": {"match": {"f": 42}}, + } + assert d == s.to_dict() + + s = AsyncSearch(extra={"size": 5}) + assert {"size": 5} == s.to_dict() + s = s.extra(from_=42) + assert {"size": 5, "from": 42} == s.to_dict() + + +def test_complex_example() -> None: + s = AsyncSearch() + s = ( + s.query("match", title="python") + .query(~Q("match", title="ruby")) + .filter(Q("term", category="meetup") | Q("term", category="conference")) + .collapse("user_id") + .post_filter("terms", tags=["prague", "czech"]) + .script_fields(more_attendees="doc['attendees'].value + 42") + ) + + s.aggs.bucket("per_country", "terms", field="country").metric( + "avg_attendees", "avg", field="attendees" + ) + + s.query.minimum_should_match = 2 + + s = s.highlight_options(order="score").highlight("title", "body", fragment_size=50) + + assert { + "query": { + "bool": { + "filter": [ + { + "bool": { + "should": [ + {"term": {"category": "meetup"}}, + {"term": {"category": "conference"}}, + ] + } + } + ], + "must": [{"match": {"title": "python"}}], + "must_not": [{"match": {"title": "ruby"}}], + "minimum_should_match": 2, + } + }, + "post_filter": {"terms": {"tags": ["prague", "czech"]}}, + "aggs": { + "per_country": { + "terms": {"field": "country"}, + "aggs": {"avg_attendees": {"avg": {"field": "attendees"}}}, + } + }, + "collapse": {"field": "user_id"}, + "highlight": { + "order": "score", + "fields": {"title": {"fragment_size": 50}, "body": {"fragment_size": 50}}, + }, + "script_fields": {"more_attendees": {"script": "doc['attendees'].value + 42"}}, + } == s.to_dict() + + +def test_reverse() -> None: + d = { + "query": { + "bool": { + "filter": [ + { + "bool": { + "should": [ + {"term": {"category": "meetup"}}, + {"term": {"category": "conference"}}, + ] + } + } + ], + "must": [ + { + "bool": { + "must": [{"match": {"title": "python"}}], + "must_not": [{"match": {"title": "ruby"}}], + "minimum_should_match": 2, + } + } + ], + } + }, + "post_filter": {"bool": {"must": [{"terms": {"tags": ["prague", "czech"]}}]}}, + "aggs": { + "per_country": { + "terms": {"field": "country"}, + "aggs": {"avg_attendees": {"avg": {"field": "attendees"}}}, + } + }, + "sort": ["title", {"category": {"order": "desc"}}, "_score"], + "size": 5, + "highlight": {"order": "score", "fields": {"title": {"fragment_size": 50}}}, + "suggest": { + "my-title-suggestions-1": { + "text": "devloping distibutd saerch engies", + "term": {"size": 3, "field": "title"}, + } + }, + "script_fields": {"more_attendees": {"script": "doc['attendees'].value + 42"}}, + } + + d2 = deepcopy(d) + + s = AsyncSearch.from_dict(d) + + # make sure we haven't modified anything in place + assert d == d2 + assert {"size": 5} == s._extra + assert d == s.to_dict() + + +def test_code_generated_classes() -> None: + s = AsyncSearch() + s = ( + s.query(query.Match("title", types.MatchQuery(query="python"))) + .query(~query.Match("title", types.MatchQuery(query="ruby"))) + .query( + query.Knn( + field="title", + query_vector=[1.0, 2.0, 3.0], + num_candidates=10, + k=3, + filter=query.Range("year", wrappers.Range(gt="2004")), + ) + ) + .filter( + query.Term("category", types.TermQuery(value="meetup")) + | query.Term("category", types.TermQuery(value="conference")) + ) + .collapse("user_id") + .post_filter(query.Terms(tags=["prague", "czech"])) + .script_fields(more_attendees="doc['attendees'].value + 42") + ) + assert { + "query": { + "bool": { + "filter": [ + { + "bool": { + "should": [ + {"term": {"category": {"value": "meetup"}}}, + {"term": {"category": {"value": "conference"}}}, + ] + } + } + ], + "must": [ + {"match": {"title": {"query": "python"}}}, + { + "knn": { + "field": "title", + "filter": [ + { + "range": { + "year": { + "gt": "2004", + }, + }, + }, + ], + "k": 3, + "num_candidates": 10, + "query_vector": [ + 1.0, + 2.0, + 3.0, + ], + }, + }, + ], + "must_not": [{"match": {"title": {"query": "ruby"}}}], + } + }, + "post_filter": {"terms": {"tags": ["prague", "czech"]}}, + "collapse": {"field": "user_id"}, + "script_fields": {"more_attendees": {"script": "doc['attendees'].value + 42"}}, + } == s.to_dict() + + +def test_from_dict_doesnt_need_query() -> None: + s = AsyncSearch.from_dict({"size": 5}) + + assert {"size": 5} == s.to_dict() + + +@pytest.mark.asyncio +async def test_params_being_passed_to_search(async_mock_client: Any) -> None: + s = AsyncSearch(using="mock") + s = s.params(routing="42") + await s.execute() + + async_mock_client.search.assert_awaited_once_with(index=None, body={}, routing="42") + + +def test_source() -> None: + assert {} == AsyncSearch().source().to_dict() + + assert { + "_source": {"includes": ["foo.bar.*"], "excludes": ["foo.one"]} + } == AsyncSearch().source(includes=["foo.bar.*"], excludes=("foo.one",)).to_dict() + + assert {"_source": False} == AsyncSearch().source(False).to_dict() + + assert {"_source": ["f1", "f2"]} == AsyncSearch().source( + includes=["foo.bar.*"], excludes=["foo.one"] + ).source(["f1", "f2"]).to_dict() + + +def test_source_on_clone() -> None: + assert { + "_source": {"includes": ["foo.bar.*"], "excludes": ["foo.one"]}, + "query": {"bool": {"filter": [{"term": {"title": "python"}}]}}, + } == AsyncSearch().source(includes=["foo.bar.*"]).source( + excludes=["foo.one"] + ).filter( + "term", title="python" + ).to_dict() + assert { + "_source": False, + "query": {"bool": {"filter": [{"term": {"title": "python"}}]}}, + } == AsyncSearch().source(False).filter("term", title="python").to_dict() + + +def test_source_on_clear() -> None: + assert ( + {} + == AsyncSearch() + .source(includes=["foo.bar.*"]) + .source(includes=None, excludes=None) + .to_dict() + ) + + +def test_suggest_accepts_global_text() -> None: + s = AsyncSearch.from_dict( + { + "suggest": { + "text": "the amsterdma meetpu", + "my-suggest-1": {"term": {"field": "title"}}, + "my-suggest-2": {"text": "other", "term": {"field": "body"}}, + } + } + ) + + assert { + "suggest": { + "my-suggest-1": { + "term": {"field": "title"}, + "text": "the amsterdma meetpu", + }, + "my-suggest-2": {"term": {"field": "body"}, "text": "other"}, + } + } == s.to_dict() + + +def test_suggest() -> None: + s = AsyncSearch() + s = s.suggest("my_suggestion", "pyhton", term={"field": "title"}) + + assert { + "suggest": {"my_suggestion": {"term": {"field": "title"}, "text": "pyhton"}} + } == s.to_dict() + + +def test_exclude() -> None: + s = AsyncSearch() + s = s.exclude("match", title="python") + + assert { + "query": { + "bool": { + "filter": [{"bool": {"must_not": [{"match": {"title": "python"}}]}}] + } + } + } == s.to_dict() + + +@pytest.mark.asyncio +async def test_delete_by_query(async_mock_client: Any) -> None: + s = AsyncSearch(using="mock", index="i").query("match", lang="java") + await s.delete() + + async_mock_client.delete_by_query.assert_awaited_once_with( + index=["i"], body={"query": {"match": {"lang": "java"}}} + ) + + +def test_update_from_dict() -> None: + s = AsyncSearch() + s.update_from_dict({"indices_boost": [{"important-documents": 2}]}) + s.update_from_dict({"_source": ["id", "name"]}) + s.update_from_dict({"collapse": {"field": "user_id"}}) + + assert { + "indices_boost": [{"important-documents": 2}], + "_source": ["id", "name"], + "collapse": {"field": "user_id"}, + } == s.to_dict() + + +def test_rescore_query_to_dict() -> None: + s = AsyncSearch(index="index-name") + + positive_query = Q( + "function_score", + query=Q("term", tags="a"), + script_score={"script": "_score * 1"}, + ) + + negative_query = Q( + "function_score", + query=Q("term", tags="b"), + script_score={"script": "_score * -100"}, + ) + + s = s.query(positive_query) + s = s.extra( + rescore={"window_size": 100, "query": {"rescore_query": negative_query}} + ) + assert s.to_dict() == { + "query": { + "function_score": { + "query": {"term": {"tags": "a"}}, + "functions": [{"script_score": {"script": "_score * 1"}}], + } + }, + "rescore": { + "window_size": 100, + "query": { + "rescore_query": { + "function_score": { + "query": {"term": {"tags": "b"}}, + "functions": [{"script_score": {"script": "_score * -100"}}], + } + } + }, + }, + } + + assert s.to_dict( + rescore={"window_size": 10, "query": {"rescore_query": positive_query}} + ) == { + "query": { + "function_score": { + "query": {"term": {"tags": "a"}}, + "functions": [{"script_score": {"script": "_score * 1"}}], + } + }, + "rescore": { + "window_size": 10, + "query": { + "rescore_query": { + "function_score": { + "query": {"term": {"tags": "a"}}, + "functions": [{"script_score": {"script": "_score * 1"}}], + } + } + }, + }, + } + + +@pytest.mark.asyncio +async def test_empty_search() -> None: + s = AsyncEmptySearch(index="index-name") + s = s.query("match", lang="java") + s.aggs.bucket("versions", "terms", field="version") + + assert await s.count() == 0 + assert [hit async for hit in s] == [] + assert [hit async for hit in s.scan()] == [] + await s.delete() # should not error + + +def test_suggest_completion() -> None: + s = AsyncSearch() + s = s.suggest("my_suggestion", "pyhton", completion={"field": "title"}) + + assert { + "suggest": { + "my_suggestion": {"completion": {"field": "title"}, "prefix": "pyhton"} + } + } == s.to_dict() + + +def test_suggest_regex_query() -> None: + s = AsyncSearch() + s = s.suggest("my_suggestion", regex="py[thon|py]", completion={"field": "title"}) + + assert { + "suggest": { + "my_suggestion": {"completion": {"field": "title"}, "regex": "py[thon|py]"} + } + } == s.to_dict() + + +def test_suggest_must_pass_text_or_regex() -> None: + s = AsyncSearch() + with raises(ValueError): + s.suggest("my_suggestion") + + +def test_suggest_can_only_pass_text_or_regex() -> None: + s = AsyncSearch() + with raises(ValueError): + s.suggest("my_suggestion", text="python", regex="py[hton|py]") + + +def test_suggest_regex_must_be_wtih_completion() -> None: + s = AsyncSearch() + with raises(ValueError): + s.suggest("my_suggestion", regex="py[thon|py]") diff --git a/test_elasticsearch/test_dsl/_async/test_update_by_query.py b/test_elasticsearch/test_dsl/_async/test_update_by_query.py new file mode 100644 index 000000000..9253623dc --- /dev/null +++ b/test_elasticsearch/test_dsl/_async/test_update_by_query.py @@ -0,0 +1,180 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from copy import deepcopy +from typing import Any + +import pytest + +from elasticsearch.dsl import AsyncUpdateByQuery, Q +from elasticsearch.dsl.response import UpdateByQueryResponse +from elasticsearch.dsl.search_base import SearchBase + + +def test_ubq_starts_with_no_query() -> None: + ubq = AsyncUpdateByQuery() + + assert ubq.query._proxied is None + + +def test_ubq_to_dict() -> None: + ubq = AsyncUpdateByQuery() + assert {} == ubq.to_dict() + + ubq = ubq.query("match", f=42) + assert {"query": {"match": {"f": 42}}} == ubq.to_dict() + + assert {"query": {"match": {"f": 42}}, "size": 10} == ubq.to_dict(size=10) + + ubq = AsyncUpdateByQuery(extra={"size": 5}) + assert {"size": 5} == ubq.to_dict() + + ubq = AsyncUpdateByQuery(extra={"extra_q": Q("term", category="conference")}) + assert {"extra_q": {"term": {"category": "conference"}}} == ubq.to_dict() + + +def test_complex_example() -> None: + ubq = AsyncUpdateByQuery() + ubq = ( + ubq.query("match", title="python") + .query(~Q("match", title="ruby")) + .filter(Q("term", category="meetup") | Q("term", category="conference")) + .script( + source="ctx._source.likes += params.f", lang="painless", params={"f": 3} + ) + ) + + ubq.query.minimum_should_match = 2 + assert { + "query": { + "bool": { + "filter": [ + { + "bool": { + "should": [ + {"term": {"category": "meetup"}}, + {"term": {"category": "conference"}}, + ] + } + } + ], + "must": [{"match": {"title": "python"}}], + "must_not": [{"match": {"title": "ruby"}}], + "minimum_should_match": 2, + } + }, + "script": { + "source": "ctx._source.likes += params.f", + "lang": "painless", + "params": {"f": 3}, + }, + } == ubq.to_dict() + + +def test_exclude() -> None: + ubq = AsyncUpdateByQuery() + ubq = ubq.exclude("match", title="python") + + assert { + "query": { + "bool": { + "filter": [{"bool": {"must_not": [{"match": {"title": "python"}}]}}] + } + } + } == ubq.to_dict() + + +def test_reverse() -> None: + d = { + "query": { + "bool": { + "filter": [ + { + "bool": { + "should": [ + {"term": {"category": "meetup"}}, + {"term": {"category": "conference"}}, + ] + } + } + ], + "must": [ + { + "bool": { + "must": [{"match": {"title": "python"}}], + "must_not": [{"match": {"title": "ruby"}}], + "minimum_should_match": 2, + } + } + ], + } + }, + "script": { + "source": "ctx._source.likes += params.f", + "lang": "painless", + "params": {"f": 3}, + }, + } + + d2 = deepcopy(d) + + ubq = AsyncUpdateByQuery.from_dict(d) + + assert d == d2 + assert d == ubq.to_dict() + + +def test_from_dict_doesnt_need_query() -> None: + ubq = AsyncUpdateByQuery.from_dict({"script": {"source": "test"}}) + + assert {"script": {"source": "test"}} == ubq.to_dict() + + +@pytest.mark.asyncio +async def test_params_being_passed_to_search(async_mock_client: Any) -> None: + ubq = AsyncUpdateByQuery(using="mock", index="i") + ubq = ubq.params(routing="42") + await ubq.execute() + + async_mock_client.update_by_query.assert_called_once_with(index=["i"], routing="42") + + +def test_overwrite_script() -> None: + ubq = AsyncUpdateByQuery() + ubq = ubq.script( + source="ctx._source.likes += params.f", lang="painless", params={"f": 3} + ) + assert { + "script": { + "source": "ctx._source.likes += params.f", + "lang": "painless", + "params": {"f": 3}, + } + } == ubq.to_dict() + ubq = ubq.script(source="ctx._source.likes++") + assert {"script": {"source": "ctx._source.likes++"}} == ubq.to_dict() + + +def test_update_by_query_response_success() -> None: + ubqr = UpdateByQueryResponse(SearchBase(), {"timed_out": False, "failures": []}) + assert ubqr.success() + + ubqr = UpdateByQueryResponse(SearchBase(), {"timed_out": True, "failures": []}) + assert not ubqr.success() + + ubqr = UpdateByQueryResponse(SearchBase(), {"timed_out": False, "failures": [{}]}) + assert not ubqr.success() diff --git a/test_elasticsearch/test_dsl/_sync/__init__.py b/test_elasticsearch/test_dsl/_sync/__init__.py new file mode 100644 index 000000000..2a87d183f --- /dev/null +++ b/test_elasticsearch/test_dsl/_sync/__init__.py @@ -0,0 +1,16 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/test_elasticsearch/test_dsl/_sync/test_document.py b/test_elasticsearch/test_dsl/_sync/test_document.py new file mode 100644 index 000000000..05ad9d623 --- /dev/null +++ b/test_elasticsearch/test_dsl/_sync/test_document.py @@ -0,0 +1,883 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# this file creates several documents using bad or no types because +# these are still supported and should be kept functional in spite +# of not having appropriate type hints. For that reason the comment +# below disables many mypy checks that fails as a result of this. +# mypy: disable-error-code="assignment, index, arg-type, call-arg, operator, comparison-overlap, attr-defined" + +import codecs +import ipaddress +import pickle +import sys +from datetime import datetime +from hashlib import md5 +from typing import Any, ClassVar, Dict, List, Optional + +import pytest +from pytest import raises + +from elasticsearch.dsl import ( + Document, + Index, + InnerDoc, + M, + Mapping, + MetaField, + Range, + analyzer, + field, + mapped_field, + utils, +) +from elasticsearch.dsl.document_base import InstrumentedField +from elasticsearch.dsl.exceptions import IllegalOperation, ValidationException + + +class MyInner(InnerDoc): + old_field = field.Text() + + +class MyDoc(Document): + title = field.Keyword() + name = field.Text() + created_at = field.Date() + inner = field.Object(MyInner) + + +class MySubDoc(MyDoc): + name = field.Keyword() + + class Index: + name = "default-index" + + +class MyDoc2(Document): + extra = field.Long() + + +class MyMultiSubDoc(MyDoc2, MySubDoc): + pass + + +class Comment(InnerDoc): + title = field.Text() + tags = field.Keyword(multi=True) + + +class DocWithNested(Document): + comments = field.Nested(Comment) + + class Index: + name = "test-doc-with-nested" + + +class SimpleCommit(Document): + files = field.Text(multi=True) + + class Index: + name = "test-git" + + +class Secret(str): + pass + + +class SecretField(field.CustomField): + builtin_type = "text" + + def _serialize(self, data: Any) -> Any: + return codecs.encode(data, "rot_13") + + def _deserialize(self, data: Any) -> Any: + if isinstance(data, Secret): + return data + return Secret(codecs.decode(data, "rot_13")) + + +class SecretDoc(Document): + title = SecretField(index="no") + + class Index: + name = "test-secret-doc" + + +class NestedSecret(Document): + secrets = field.Nested(SecretDoc) + + class Index: + name = "test-nested-secret" + + +class OptionalObjectWithRequiredField(Document): + comments = field.Nested(properties={"title": field.Keyword(required=True)}) + + class Index: + name = "test-required" + + +class Host(Document): + ip = field.Ip() + + class Index: + name = "test-host" + + +def test_range_serializes_properly() -> None: + class D(Document): + lr: Range[int] = field.LongRange() + + d = D(lr=Range(lt=42)) + assert 40 in d.lr + assert 47 not in d.lr + assert {"lr": {"lt": 42}} == d.to_dict() + + d = D(lr={"lt": 42}) + assert {"lr": {"lt": 42}} == d.to_dict() + + +def test_range_deserializes_properly() -> None: + class D(InnerDoc): + lr = field.LongRange() + + d = D.from_es({"lr": {"lt": 42}}, True) + assert isinstance(d.lr, Range) + assert 40 in d.lr + assert 47 not in d.lr + + +def test_resolve_nested() -> None: + nested, field = NestedSecret._index.resolve_nested("secrets.title") + assert nested == ["secrets"] + assert field is NestedSecret._doc_type.mapping["secrets"]["title"] + + +def test_conflicting_mapping_raises_error_in_index_to_dict() -> None: + class A(Document): + name = field.Text() + + class B(Document): + name = field.Keyword() + + i = Index("i") + i.document(A) + i.document(B) + + with raises(ValueError): + i.to_dict() + + +def test_ip_address_serializes_properly() -> None: + host = Host(ip=ipaddress.IPv4Address("10.0.0.1")) + + assert {"ip": "10.0.0.1"} == host.to_dict() + + +def test_matches_uses_index() -> None: + assert SimpleCommit._matches({"_index": "test-git"}) + assert not SimpleCommit._matches({"_index": "not-test-git"}) + + +def test_matches_with_no_name_always_matches() -> None: + class D(Document): + pass + + assert D._matches({}) + assert D._matches({"_index": "whatever"}) + + +def test_matches_accepts_wildcards() -> None: + class MyDoc(Document): + class Index: + name = "my-*" + + assert MyDoc._matches({"_index": "my-index"}) + assert not MyDoc._matches({"_index": "not-my-index"}) + + +def test_assigning_attrlist_to_field() -> None: + sc = SimpleCommit() + l = ["README", "README.rst"] + sc.files = utils.AttrList(l) + + assert sc.to_dict()["files"] is l + + +def test_optional_inner_objects_are_not_validated_if_missing() -> None: + d = OptionalObjectWithRequiredField() + + d.full_clean() + + +def test_custom_field() -> None: + s = SecretDoc(title=Secret("Hello")) + + assert {"title": "Uryyb"} == s.to_dict() + assert s.title == "Hello" + + s = SecretDoc.from_es({"_source": {"title": "Uryyb"}}) + assert s.title == "Hello" + assert isinstance(s.title, Secret) + + +def test_custom_field_mapping() -> None: + assert { + "properties": {"title": {"index": "no", "type": "text"}} + } == SecretDoc._doc_type.mapping.to_dict() + + +def test_custom_field_in_nested() -> None: + s = NestedSecret() + s.secrets.append(SecretDoc(title=Secret("Hello"))) + + assert {"secrets": [{"title": "Uryyb"}]} == s.to_dict() + assert s.secrets[0].title == "Hello" + + +def test_multi_works_after_doc_has_been_saved() -> None: + c = SimpleCommit() + c.full_clean() + c.files.append("setup.py") + + assert c.to_dict() == {"files": ["setup.py"]} + + +def test_multi_works_in_nested_after_doc_has_been_serialized() -> None: + # Issue #359 + c = DocWithNested(comments=[Comment(title="First!")]) + + assert [] == c.comments[0].tags + assert {"comments": [{"title": "First!"}]} == c.to_dict() + assert [] == c.comments[0].tags + + +def test_null_value_for_object() -> None: + d = MyDoc(inner=None) + + assert d.inner is None + + +def test_inherited_doc_types_can_override_index() -> None: + class MyDocDifferentIndex(MySubDoc): + class Index: + name = "not-default-index" + settings = {"number_of_replicas": 0} + aliases: Dict[str, Any] = {"a": {}} + analyzers = [analyzer("my_analizer", tokenizer="keyword")] + + assert MyDocDifferentIndex._index._name == "not-default-index" + assert MyDocDifferentIndex()._get_index() == "not-default-index" + assert MyDocDifferentIndex._index.to_dict() == { + "aliases": {"a": {}}, + "mappings": { + "properties": { + "created_at": {"type": "date"}, + "inner": { + "type": "object", + "properties": {"old_field": {"type": "text"}}, + }, + "name": {"type": "keyword"}, + "title": {"type": "keyword"}, + } + }, + "settings": { + "analysis": { + "analyzer": {"my_analizer": {"tokenizer": "keyword", "type": "custom"}} + }, + "number_of_replicas": 0, + }, + } + + +def test_to_dict_with_meta() -> None: + d = MySubDoc(title="hello") + d.meta.routing = "some-parent" + + assert { + "_index": "default-index", + "_routing": "some-parent", + "_source": {"title": "hello"}, + } == d.to_dict(True) + + +def test_to_dict_with_meta_includes_custom_index() -> None: + d = MySubDoc(title="hello") + d.meta.index = "other-index" + + assert {"_index": "other-index", "_source": {"title": "hello"}} == d.to_dict(True) + + +def test_to_dict_without_skip_empty_will_include_empty_fields() -> None: + d = MySubDoc(tags=[], title=None, inner={}) + + assert {} == d.to_dict() + assert {"tags": [], "title": None, "inner": {}} == d.to_dict(skip_empty=False) + + +def test_attribute_can_be_removed() -> None: + d = MyDoc(title="hello") + + del d.title + assert "title" not in d._d_ + + +def test_doc_type_can_be_correctly_pickled() -> None: + d = DocWithNested( + title="Hello World!", comments=[Comment(title="hellp")], meta={"id": 42} + ) + s = pickle.dumps(d) + + d2 = pickle.loads(s) + + assert d2 == d + assert 42 == d2.meta.id + assert "Hello World!" == d2.title + assert [{"title": "hellp"}] == d2.comments + assert isinstance(d2.comments[0], Comment) + + +def test_meta_is_accessible_even_on_empty_doc() -> None: + d = MyDoc() + d.meta + + d = MyDoc(title="aaa") + d.meta + + +def test_meta_field_mapping() -> None: + class User(Document): + username = field.Text() + + class Meta: + all = MetaField(enabled=False) + _index = MetaField(enabled=True) + dynamic = MetaField("strict") + dynamic_templates = MetaField([42]) + + assert { + "properties": {"username": {"type": "text"}}, + "_all": {"enabled": False}, + "_index": {"enabled": True}, + "dynamic": "strict", + "dynamic_templates": [42], + } == User._doc_type.mapping.to_dict() + + +def test_multi_value_fields() -> None: + class Blog(Document): + tags = field.Keyword(multi=True) + + b = Blog() + assert [] == b.tags + b.tags.append("search") + b.tags.append("python") + assert ["search", "python"] == b.tags + + +def test_docs_with_properties() -> None: + class User(Document): + pwd_hash: str = field.Text() + + def check_password(self, pwd: bytes) -> bool: + return md5(pwd).hexdigest() == self.pwd_hash + + @property + def password(self) -> None: + raise AttributeError("readonly") + + @password.setter + def password(self, pwd: bytes) -> None: + self.pwd_hash = md5(pwd).hexdigest() + + u = User(pwd_hash=md5(b"secret").hexdigest()) + assert u.check_password(b"secret") + assert not u.check_password(b"not-secret") + + u.password = b"not-secret" + assert "password" not in u._d_ + assert not u.check_password(b"secret") + assert u.check_password(b"not-secret") + + with raises(AttributeError): + u.password + + +def test_nested_can_be_assigned_to() -> None: + d1 = DocWithNested(comments=[Comment(title="First!")]) + d2 = DocWithNested() + + d2.comments = d1.comments + assert isinstance(d1.comments[0], Comment) + assert d2.comments == [{"title": "First!"}] + assert {"comments": [{"title": "First!"}]} == d2.to_dict() + assert isinstance(d2.comments[0], Comment) + + +def test_nested_can_be_none() -> None: + d = DocWithNested(comments=None, title="Hello World!") + + assert {"title": "Hello World!"} == d.to_dict() + + +def test_nested_defaults_to_list_and_can_be_updated() -> None: + md = DocWithNested() + + assert [] == md.comments + + md.comments.append({"title": "hello World!"}) + assert {"comments": [{"title": "hello World!"}]} == md.to_dict() + + +def test_to_dict_is_recursive_and_can_cope_with_multi_values() -> None: + md = MyDoc(name=["a", "b", "c"]) + md.inner = [MyInner(old_field="of1"), MyInner(old_field="of2")] + + assert isinstance(md.inner[0], MyInner) + + assert { + "name": ["a", "b", "c"], + "inner": [{"old_field": "of1"}, {"old_field": "of2"}], + } == md.to_dict() + + +def test_to_dict_ignores_empty_collections() -> None: + md = MySubDoc(name="", address={}, count=0, valid=False, tags=[]) + + assert {"name": "", "count": 0, "valid": False} == md.to_dict() + + +def test_declarative_mapping_definition() -> None: + assert issubclass(MyDoc, Document) + assert hasattr(MyDoc, "_doc_type") + assert { + "properties": { + "created_at": {"type": "date"}, + "name": {"type": "text"}, + "title": {"type": "keyword"}, + "inner": {"type": "object", "properties": {"old_field": {"type": "text"}}}, + } + } == MyDoc._doc_type.mapping.to_dict() + + +def test_you_can_supply_own_mapping_instance() -> None: + class MyD(Document): + title = field.Text() + + class Meta: + mapping = Mapping() + mapping.meta("_all", enabled=False) + + assert { + "_all": {"enabled": False}, + "properties": {"title": {"type": "text"}}, + } == MyD._doc_type.mapping.to_dict() + + +def test_document_can_be_created_dynamically() -> None: + n = datetime.now() + md = MyDoc(title="hello") + md.name = "My Fancy Document!" + md.created_at = n + + inner = md.inner + # consistent returns + assert inner is md.inner + inner.old_field = "Already defined." + + md.inner.new_field = ["undefined", "field"] + + assert { + "title": "hello", + "name": "My Fancy Document!", + "created_at": n, + "inner": {"old_field": "Already defined.", "new_field": ["undefined", "field"]}, + } == md.to_dict() + + +def test_invalid_date_will_raise_exception() -> None: + md = MyDoc() + md.created_at = "not-a-date" + with raises(ValidationException): + md.full_clean() + + +def test_document_inheritance() -> None: + assert issubclass(MySubDoc, MyDoc) + assert issubclass(MySubDoc, Document) + assert hasattr(MySubDoc, "_doc_type") + assert { + "properties": { + "created_at": {"type": "date"}, + "name": {"type": "keyword"}, + "title": {"type": "keyword"}, + "inner": {"type": "object", "properties": {"old_field": {"type": "text"}}}, + } + } == MySubDoc._doc_type.mapping.to_dict() + + +def test_child_class_can_override_parent() -> None: + class A(Document): + o = field.Object(dynamic=False, properties={"a": field.Text()}) + + class B(A): + o = field.Object(dynamic="strict", properties={"b": field.Text()}) + + assert { + "properties": { + "o": { + "dynamic": "strict", + "properties": {"a": {"type": "text"}, "b": {"type": "text"}}, + "type": "object", + } + } + } == B._doc_type.mapping.to_dict() + + +def test_meta_fields_are_stored_in_meta_and_ignored_by_to_dict() -> None: + md = MySubDoc(meta={"id": 42}, name="My First doc!") + + md.meta.index = "my-index" + assert md.meta.index == "my-index" + assert md.meta.id == 42 + assert {"name": "My First doc!"} == md.to_dict() + assert {"id": 42, "index": "my-index"} == md.meta.to_dict() + + +def test_index_inheritance() -> None: + assert issubclass(MyMultiSubDoc, MySubDoc) + assert issubclass(MyMultiSubDoc, MyDoc2) + assert issubclass(MyMultiSubDoc, Document) + assert hasattr(MyMultiSubDoc, "_doc_type") + assert hasattr(MyMultiSubDoc, "_index") + assert { + "properties": { + "created_at": {"type": "date"}, + "name": {"type": "keyword"}, + "title": {"type": "keyword"}, + "inner": {"type": "object", "properties": {"old_field": {"type": "text"}}}, + "extra": {"type": "long"}, + } + } == MyMultiSubDoc._doc_type.mapping.to_dict() + + +def test_meta_fields_can_be_set_directly_in_init() -> None: + p = object() + md = MyDoc(_id=p, title="Hello World!") + + assert md.meta.id is p + + +@pytest.mark.sync +def test_save_no_index(mock_client: Any) -> None: + md = MyDoc() + with raises(ValidationException): + md.save(using="mock") + + +@pytest.mark.sync +def test_delete_no_index(mock_client: Any) -> None: + md = MyDoc() + with raises(ValidationException): + md.delete(using="mock") + + +@pytest.mark.sync +def test_update_no_fields() -> None: + md = MyDoc() + with raises(IllegalOperation): + md.update() + + +def test_search_with_custom_alias_and_index() -> None: + search_object = MyDoc.search( + using="staging", index=["custom_index1", "custom_index2"] + ) + + assert search_object._using == "staging" + assert search_object._index == ["custom_index1", "custom_index2"] + + +def test_from_es_respects_underscored_non_meta_fields() -> None: + doc = { + "_index": "test-index", + "_id": "elasticsearch", + "_score": 12.0, + "fields": {"hello": "world", "_routing": "es", "_tags": ["search"]}, + "_source": { + "city": "Amsterdam", + "name": "Elasticsearch", + "_tagline": "You know, for search", + }, + } + + class Company(Document): + class Index: + name = "test-company" + + c = Company.from_es(doc) + + assert c.meta.fields._tags == ["search"] + assert c.meta.fields._routing == "es" + assert c._tagline == "You know, for search" + + +def test_nested_and_object_inner_doc() -> None: + class MySubDocWithNested(MyDoc): + nested_inner = field.Nested(MyInner) + + props = MySubDocWithNested._doc_type.mapping.to_dict()["properties"] + assert props == { + "created_at": {"type": "date"}, + "inner": {"properties": {"old_field": {"type": "text"}}, "type": "object"}, + "name": {"type": "text"}, + "nested_inner": { + "properties": {"old_field": {"type": "text"}}, + "type": "nested", + }, + "title": {"type": "keyword"}, + } + + +def test_doc_with_type_hints() -> None: + class TypedInnerDoc(InnerDoc): + st: M[str] + dt: M[Optional[datetime]] + li: M[List[int]] + + class TypedDoc(Document): + st: str + dt: Optional[datetime] + li: List[int] + ob: TypedInnerDoc + ns: List[TypedInnerDoc] + ip: Optional[str] = field.Ip() + k1: str = field.Keyword(required=True) + k2: M[str] = field.Keyword() + k3: str = mapped_field(field.Keyword(), default="foo") + k4: M[Optional[str]] = mapped_field(field.Keyword()) # type: ignore[misc] + s1: Secret = SecretField() + s2: M[Secret] = SecretField() + s3: Secret = mapped_field(SecretField()) # type: ignore[misc] + s4: M[Optional[Secret]] = mapped_field( + SecretField(), default_factory=lambda: "foo" + ) + i1: ClassVar + i2: ClassVar[int] + + props = TypedDoc._doc_type.mapping.to_dict()["properties"] + assert props == { + "st": {"type": "text"}, + "dt": {"type": "date"}, + "li": {"type": "integer"}, + "ob": { + "type": "object", + "properties": { + "st": {"type": "text"}, + "dt": {"type": "date"}, + "li": {"type": "integer"}, + }, + }, + "ns": { + "type": "nested", + "properties": { + "st": {"type": "text"}, + "dt": {"type": "date"}, + "li": {"type": "integer"}, + }, + }, + "ip": {"type": "ip"}, + "k1": {"type": "keyword"}, + "k2": {"type": "keyword"}, + "k3": {"type": "keyword"}, + "k4": {"type": "keyword"}, + "s1": {"type": "text"}, + "s2": {"type": "text"}, + "s3": {"type": "text"}, + "s4": {"type": "text"}, + } + + TypedDoc.i1 = "foo" + TypedDoc.i2 = 123 + + doc = TypedDoc() + assert doc.k3 == "foo" + assert doc.s4 == "foo" + with raises(ValidationException) as exc_info: + doc.full_clean() + assert set(exc_info.value.args[0].keys()) == { + "st", + "k1", + "k2", + "ob", + "s1", + "s2", + "s3", + } + + assert TypedDoc.i1 == "foo" + assert TypedDoc.i2 == 123 + + doc.st = "s" + doc.li = [1, 2, 3] + doc.k1 = "k1" + doc.k2 = "k2" + doc.ob.st = "s" + doc.ob.li = [1] + doc.s1 = "s1" + doc.s2 = "s2" + doc.s3 = "s3" + doc.full_clean() + + doc.ob = TypedInnerDoc(li=[1]) + with raises(ValidationException) as exc_info: + doc.full_clean() + assert set(exc_info.value.args[0].keys()) == {"ob"} + assert set(exc_info.value.args[0]["ob"][0].args[0].keys()) == {"st"} + + doc.ob.st = "s" + doc.ns.append(TypedInnerDoc(li=[1, 2])) + with raises(ValidationException) as exc_info: + doc.full_clean() + + doc.ns[0].st = "s" + doc.full_clean() + + doc.ip = "1.2.3.4" + n = datetime.now() + doc.dt = n + assert doc.to_dict() == { + "st": "s", + "li": [1, 2, 3], + "dt": n, + "ob": { + "st": "s", + "li": [1], + }, + "ns": [ + { + "st": "s", + "li": [1, 2], + } + ], + "ip": "1.2.3.4", + "k1": "k1", + "k2": "k2", + "k3": "foo", + "s1": "s1", + "s2": "s2", + "s3": "s3", + "s4": "foo", + } + + s = TypedDoc.search().sort(TypedDoc.st, -TypedDoc.dt, +TypedDoc.ob.st) + s.aggs.bucket("terms_agg", "terms", field=TypedDoc.k1) + assert s.to_dict() == { + "aggs": {"terms_agg": {"terms": {"field": "k1"}}}, + "sort": ["st", {"dt": {"order": "desc"}}, "ob.st"], + } + + +@pytest.mark.skipif(sys.version_info < (3, 10), reason="requires Python 3.10") +def test_doc_with_pipe_type_hints() -> None: + with pytest.raises(TypeError): + + class BadlyTypedDoc(Document): + s: str + f: str | int | None # type: ignore[syntax] + + class TypedDoc(Document): + s: str + f1: str | None # type: ignore[syntax] + f2: M[int | None] # type: ignore[syntax] + f3: M[datetime | None] # type: ignore[syntax] + + props = TypedDoc._doc_type.mapping.to_dict()["properties"] + assert props == { + "s": {"type": "text"}, + "f1": {"type": "text"}, + "f2": {"type": "integer"}, + "f3": {"type": "date"}, + } + + doc = TypedDoc() + with raises(ValidationException) as exc_info: + doc.full_clean() + assert set(exc_info.value.args[0].keys()) == {"s"} + doc.s = "s" + doc.full_clean() + + +def test_instrumented_field() -> None: + class Child(InnerDoc): + st: M[str] + + class Doc(Document): + st: str + ob: Child + ns: List[Child] + + doc = Doc( + st="foo", + ob=Child(st="bar"), + ns=[ + Child(st="baz"), + Child(st="qux"), + ], + ) + + assert type(doc.st) is str + assert doc.st == "foo" + + assert type(doc.ob) is Child + assert doc.ob.st == "bar" + + assert type(doc.ns) is utils.AttrList + assert doc.ns[0].st == "baz" + assert doc.ns[1].st == "qux" + assert type(doc.ns[0]) is Child + assert type(doc.ns[1]) is Child + + assert type(Doc.st) is InstrumentedField + assert str(Doc.st) == "st" + assert +Doc.st == "st" + assert -Doc.st == "-st" + assert Doc.st.to_dict() == {"type": "text"} + with raises(AttributeError): + Doc.st.something + + assert type(Doc.ob) is InstrumentedField + assert str(Doc.ob) == "ob" + assert str(Doc.ob.st) == "ob.st" + assert +Doc.ob.st == "ob.st" + assert -Doc.ob.st == "-ob.st" + assert Doc.ob.st.to_dict() == {"type": "text"} + with raises(AttributeError): + Doc.ob.something + with raises(AttributeError): + Doc.ob.st.something + + assert type(Doc.ns) is InstrumentedField + assert str(Doc.ns) == "ns" + assert str(Doc.ns.st) == "ns.st" + assert +Doc.ns.st == "ns.st" + assert -Doc.ns.st == "-ns.st" + assert Doc.ns.st.to_dict() == {"type": "text"} + with raises(AttributeError): + Doc.ns.something + with raises(AttributeError): + Doc.ns.st.something diff --git a/test_elasticsearch/test_dsl/_sync/test_faceted_search.py b/test_elasticsearch/test_dsl/_sync/test_faceted_search.py new file mode 100644 index 000000000..33b17bd1e --- /dev/null +++ b/test_elasticsearch/test_dsl/_sync/test_faceted_search.py @@ -0,0 +1,201 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from datetime import datetime + +import pytest + +from elasticsearch.dsl.faceted_search import ( + DateHistogramFacet, + FacetedSearch, + TermsFacet, +) + + +class BlogSearch(FacetedSearch): + doc_types = ["user", "post"] + fields = [ + "title^5", + "body", + ] + + facets = { + "category": TermsFacet(field="category.raw"), + "tags": TermsFacet(field="tags"), + } + + +def test_query_is_created_properly() -> None: + bs = BlogSearch("python search") + s = bs.build_search() + + assert s._doc_type == ["user", "post"] + assert { + "aggs": { + "_filter_tags": { + "filter": {"match_all": {}}, + "aggs": {"tags": {"terms": {"field": "tags"}}}, + }, + "_filter_category": { + "filter": {"match_all": {}}, + "aggs": {"category": {"terms": {"field": "category.raw"}}}, + }, + }, + "query": { + "multi_match": {"fields": ["title^5", "body"], "query": "python search"} + }, + "highlight": {"fields": {"body": {}, "title": {}}}, + } == s.to_dict() + + +def test_query_is_created_properly_with_sort_tuple() -> None: + bs = BlogSearch("python search", sort=("category", "-title")) + s = bs.build_search() + + assert s._doc_type == ["user", "post"] + assert { + "aggs": { + "_filter_tags": { + "filter": {"match_all": {}}, + "aggs": {"tags": {"terms": {"field": "tags"}}}, + }, + "_filter_category": { + "filter": {"match_all": {}}, + "aggs": {"category": {"terms": {"field": "category.raw"}}}, + }, + }, + "query": { + "multi_match": {"fields": ["title^5", "body"], "query": "python search"} + }, + "highlight": {"fields": {"body": {}, "title": {}}}, + "sort": ["category", {"title": {"order": "desc"}}], + } == s.to_dict() + + +def test_filter_is_applied_to_search_but_not_relevant_facet() -> None: + bs = BlogSearch("python search", filters={"category": "elastic"}) + s = bs.build_search() + + assert { + "aggs": { + "_filter_tags": { + "filter": {"terms": {"category.raw": ["elastic"]}}, + "aggs": {"tags": {"terms": {"field": "tags"}}}, + }, + "_filter_category": { + "filter": {"match_all": {}}, + "aggs": {"category": {"terms": {"field": "category.raw"}}}, + }, + }, + "post_filter": {"terms": {"category.raw": ["elastic"]}}, + "query": { + "multi_match": {"fields": ["title^5", "body"], "query": "python search"} + }, + "highlight": {"fields": {"body": {}, "title": {}}}, + } == s.to_dict() + + +def test_filters_are_applied_to_search_ant_relevant_facets() -> None: + bs = BlogSearch( + "python search", filters={"category": "elastic", "tags": ["python", "django"]} + ) + s = bs.build_search() + + d = s.to_dict() + + # we need to test post_filter without relying on order + f = d["post_filter"]["bool"].pop("must") + assert len(f) == 2 + assert {"terms": {"category.raw": ["elastic"]}} in f + assert {"terms": {"tags": ["python", "django"]}} in f + + assert { + "aggs": { + "_filter_tags": { + "filter": {"terms": {"category.raw": ["elastic"]}}, + "aggs": {"tags": {"terms": {"field": "tags"}}}, + }, + "_filter_category": { + "filter": {"terms": {"tags": ["python", "django"]}}, + "aggs": {"category": {"terms": {"field": "category.raw"}}}, + }, + }, + "query": { + "multi_match": {"fields": ["title^5", "body"], "query": "python search"} + }, + "post_filter": {"bool": {}}, + "highlight": {"fields": {"body": {}, "title": {}}}, + } == d + + +def test_date_histogram_facet_with_1970_01_01_date() -> None: + dhf = DateHistogramFacet() + assert dhf.get_value({"key": None}) == datetime(1970, 1, 1, 0, 0) # type: ignore[arg-type] + assert dhf.get_value({"key": 0}) == datetime(1970, 1, 1, 0, 0) # type: ignore[arg-type] + + +@pytest.mark.parametrize( + ["interval_type", "interval"], + [ + ("interval", "year"), + ("calendar_interval", "year"), + ("interval", "month"), + ("calendar_interval", "month"), + ("interval", "week"), + ("calendar_interval", "week"), + ("interval", "day"), + ("calendar_interval", "day"), + ("fixed_interval", "day"), + ("interval", "hour"), + ("fixed_interval", "hour"), + ("interval", "1Y"), + ("calendar_interval", "1Y"), + ("interval", "1M"), + ("calendar_interval", "1M"), + ("interval", "1w"), + ("calendar_interval", "1w"), + ("interval", "1d"), + ("calendar_interval", "1d"), + ("fixed_interval", "1d"), + ("interval", "1h"), + ("fixed_interval", "1h"), + ], +) +def test_date_histogram_interval_types(interval_type: str, interval: str) -> None: + dhf = DateHistogramFacet(field="@timestamp", **{interval_type: interval}) + assert dhf.get_aggregation().to_dict() == { + "date_histogram": { + "field": "@timestamp", + interval_type: interval, + "min_doc_count": 0, + } + } + dhf.get_value_filter(datetime.now()) + + +def test_date_histogram_no_interval_keyerror() -> None: + dhf = DateHistogramFacet(field="@timestamp") + with pytest.raises(KeyError) as e: + dhf.get_value_filter(datetime.now()) + assert str(e.value) == "'interval'" + + +def test_params_added_to_search() -> None: + bs = BlogSearch("python search") + assert bs._s._params == {} + bs.params(routing="42") + assert bs._s._params == {"routing": "42"} diff --git a/test_elasticsearch/test_dsl/_sync/test_index.py b/test_elasticsearch/test_dsl/_sync/test_index.py new file mode 100644 index 000000000..c6d1b7904 --- /dev/null +++ b/test_elasticsearch/test_dsl/_sync/test_index.py @@ -0,0 +1,190 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import string +from random import choice +from typing import Any, Dict + +import pytest +from pytest import raises + +from elasticsearch.dsl import Date, Document, Index, IndexTemplate, Text, analyzer + + +class Post(Document): + title = Text() + published_from = Date() + + +def test_multiple_doc_types_will_combine_mappings() -> None: + class User(Document): + username = Text() + + i = Index("i") + i.document(Post) + i.document(User) + assert { + "mappings": { + "properties": { + "title": {"type": "text"}, + "username": {"type": "text"}, + "published_from": {"type": "date"}, + } + } + } == i.to_dict() + + +def test_search_is_limited_to_index_name() -> None: + i = Index("my-index") + s = i.search() + + assert s._index == ["my-index"] + + +def test_cloned_index_has_copied_settings_and_using() -> None: + client = object() + i = Index("my-index", using=client) # type: ignore[arg-type] + i.settings(number_of_shards=1) + + i2 = i.clone("my-other-index") + + assert "my-other-index" == i2._name + assert client is i2._using + assert i._settings == i2._settings + assert i._settings is not i2._settings + + +def test_cloned_index_has_analysis_attribute() -> None: + """ + Regression test for Issue #582 in which `AsyncIndex.clone()` was not copying + over the `_analysis` attribute. + """ + client = object() + i = Index("my-index", using=client) # type: ignore[arg-type] + + random_analyzer_name = "".join(choice(string.ascii_letters) for _ in range(100)) + random_analyzer = analyzer( + random_analyzer_name, tokenizer="standard", filter="standard" + ) + + i.analyzer(random_analyzer) + + i2 = i.clone("my-clone-index") + + assert i.to_dict()["settings"]["analysis"] == i2.to_dict()["settings"]["analysis"] + + +def test_settings_are_saved() -> None: + i = Index("i") + i.settings(number_of_replicas=0) + i.settings(number_of_shards=1) + + assert {"settings": {"number_of_shards": 1, "number_of_replicas": 0}} == i.to_dict() + + +def test_registered_doc_type_included_in_to_dict() -> None: + i = Index("i", using="alias") + i.document(Post) + + assert { + "mappings": { + "properties": { + "title": {"type": "text"}, + "published_from": {"type": "date"}, + } + } + } == i.to_dict() + + +def test_registered_doc_type_included_in_search() -> None: + i = Index("i", using="alias") + i.document(Post) + + s = i.search() + + assert s._doc_type == [Post] + + +def test_aliases_add_to_object() -> None: + random_alias = "".join(choice(string.ascii_letters) for _ in range(100)) + alias_dict: Dict[str, Any] = {random_alias: {}} + + index = Index("i", using="alias") + index.aliases(**alias_dict) + + assert index._aliases == alias_dict + + +def test_aliases_returned_from_to_dict() -> None: + random_alias = "".join(choice(string.ascii_letters) for _ in range(100)) + alias_dict: Dict[str, Any] = {random_alias: {}} + + index = Index("i", using="alias") + index.aliases(**alias_dict) + + assert index._aliases == index.to_dict()["aliases"] == alias_dict + + +def test_analyzers_added_to_object() -> None: + random_analyzer_name = "".join(choice(string.ascii_letters) for _ in range(100)) + random_analyzer = analyzer( + random_analyzer_name, tokenizer="standard", filter="standard" + ) + + index = Index("i", using="alias") + index.analyzer(random_analyzer) + + assert index._analysis["analyzer"][random_analyzer_name] == { + "filter": ["standard"], + "type": "custom", + "tokenizer": "standard", + } + + +def test_analyzers_returned_from_to_dict() -> None: + random_analyzer_name = "".join(choice(string.ascii_letters) for _ in range(100)) + random_analyzer = analyzer( + random_analyzer_name, tokenizer="standard", filter="standard" + ) + index = Index("i", using="alias") + index.analyzer(random_analyzer) + + assert index.to_dict()["settings"]["analysis"]["analyzer"][ + random_analyzer_name + ] == {"filter": ["standard"], "type": "custom", "tokenizer": "standard"} + + +def test_conflicting_analyzer_raises_error() -> None: + i = Index("i") + i.analyzer("my_analyzer", tokenizer="whitespace", filter=["lowercase", "stop"]) + + with raises(ValueError): + i.analyzer("my_analyzer", tokenizer="keyword", filter=["lowercase", "stop"]) + + +def test_index_template_can_have_order() -> None: + i = Index("i-*") + it = i.as_template("i", order=2) + + assert {"index_patterns": ["i-*"], "order": 2} == it.to_dict() + + +@pytest.mark.sync +def test_index_template_save_result(mock_client: Any) -> None: + it = IndexTemplate("test-template", "test-*") + + assert it.save(using="mock") == mock_client.indices.put_template() diff --git a/test_elasticsearch/test_dsl/_sync/test_mapping.py b/test_elasticsearch/test_dsl/_sync/test_mapping.py new file mode 100644 index 000000000..0e63d2e05 --- /dev/null +++ b/test_elasticsearch/test_dsl/_sync/test_mapping.py @@ -0,0 +1,222 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import json + +from elasticsearch.dsl import Keyword, Mapping, Nested, Text, analysis + + +def test_mapping_can_has_fields() -> None: + m = Mapping() + m.field("name", "text").field("tags", "keyword") + + assert { + "properties": {"name": {"type": "text"}, "tags": {"type": "keyword"}} + } == m.to_dict() + + +def test_mapping_update_is_recursive() -> None: + m1 = Mapping() + m1.field("title", "text") + m1.field("author", "object") + m1.field("author", "object", properties={"name": {"type": "text"}}) + m1.meta("_all", enabled=False) + m1.meta("dynamic", False) + + m2 = Mapping() + m2.field("published_from", "date") + m2.field("author", "object", properties={"email": {"type": "text"}}) + m2.field("title", "text") + m2.field("lang", "keyword") + m2.meta("_analyzer", path="lang") + + m1.update(m2, update_only=True) + + assert { + "_all": {"enabled": False}, + "_analyzer": {"path": "lang"}, + "dynamic": False, + "properties": { + "published_from": {"type": "date"}, + "title": {"type": "text"}, + "lang": {"type": "keyword"}, + "author": { + "type": "object", + "properties": {"name": {"type": "text"}, "email": {"type": "text"}}, + }, + }, + } == m1.to_dict() + + +def test_properties_can_iterate_over_all_the_fields() -> None: + m = Mapping() + m.field("f1", "text", test_attr="f1", fields={"f2": Keyword(test_attr="f2")}) + m.field("f3", Nested(test_attr="f3", properties={"f4": Text(test_attr="f4")})) + + assert {"f1", "f2", "f3", "f4"} == { + f.test_attr for f in m.properties._collect_fields() + } + + +def test_mapping_can_collect_all_analyzers_and_normalizers() -> None: + a1 = analysis.analyzer( + "my_analyzer1", + tokenizer="keyword", + filter=[ + "lowercase", + analysis.token_filter("my_filter1", "stop", stopwords=["a", "b"]), + ], + ) + a2 = analysis.analyzer("english") + a3 = analysis.analyzer("unknown_custom") + a4 = analysis.analyzer( + "my_analyzer2", + tokenizer=analysis.tokenizer("trigram", "nGram", min_gram=3, max_gram=3), + filter=[analysis.token_filter("my_filter2", "stop", stopwords=["c", "d"])], + ) + a5 = analysis.analyzer("my_analyzer3", tokenizer="keyword") + n1 = analysis.normalizer("my_normalizer1", filter=["lowercase"]) + n2 = analysis.normalizer( + "my_normalizer2", + filter=[ + "my_filter1", + "my_filter2", + analysis.token_filter("my_filter3", "stop", stopwords=["e", "f"]), + ], + ) + n3 = analysis.normalizer("unknown_custom") + + m = Mapping() + m.field( + "title", + "text", + analyzer=a1, + fields={"english": Text(analyzer=a2), "unknown": Keyword(search_analyzer=a3)}, + ) + m.field("comments", Nested(properties={"author": Text(analyzer=a4)})) + m.field("normalized_title", "keyword", normalizer=n1) + m.field("normalized_comment", "keyword", normalizer=n2) + m.field("unknown", "keyword", normalizer=n3) + m.meta("_all", analyzer=a5) + + assert { + "analyzer": { + "my_analyzer1": { + "filter": ["lowercase", "my_filter1"], + "tokenizer": "keyword", + "type": "custom", + }, + "my_analyzer2": { + "filter": ["my_filter2"], + "tokenizer": "trigram", + "type": "custom", + }, + "my_analyzer3": {"tokenizer": "keyword", "type": "custom"}, + }, + "normalizer": { + "my_normalizer1": {"filter": ["lowercase"], "type": "custom"}, + "my_normalizer2": { + "filter": ["my_filter1", "my_filter2", "my_filter3"], + "type": "custom", + }, + }, + "filter": { + "my_filter1": {"stopwords": ["a", "b"], "type": "stop"}, + "my_filter2": {"stopwords": ["c", "d"], "type": "stop"}, + "my_filter3": {"stopwords": ["e", "f"], "type": "stop"}, + }, + "tokenizer": {"trigram": {"max_gram": 3, "min_gram": 3, "type": "nGram"}}, + } == m._collect_analysis() + + assert json.loads(json.dumps(m.to_dict())) == m.to_dict() + + +def test_mapping_can_collect_multiple_analyzers() -> None: + a1 = analysis.analyzer( + "my_analyzer1", + tokenizer="keyword", + filter=[ + "lowercase", + analysis.token_filter("my_filter1", "stop", stopwords=["a", "b"]), + ], + ) + a2 = analysis.analyzer( + "my_analyzer2", + tokenizer=analysis.tokenizer("trigram", "nGram", min_gram=3, max_gram=3), + filter=[analysis.token_filter("my_filter2", "stop", stopwords=["c", "d"])], + ) + m = Mapping() + m.field("title", "text", analyzer=a1, search_analyzer=a2) + m.field( + "text", + "text", + analyzer=a1, + fields={ + "english": Text(analyzer=a1), + "unknown": Keyword(analyzer=a1, search_analyzer=a2), + }, + ) + assert { + "analyzer": { + "my_analyzer1": { + "filter": ["lowercase", "my_filter1"], + "tokenizer": "keyword", + "type": "custom", + }, + "my_analyzer2": { + "filter": ["my_filter2"], + "tokenizer": "trigram", + "type": "custom", + }, + }, + "filter": { + "my_filter1": {"stopwords": ["a", "b"], "type": "stop"}, + "my_filter2": {"stopwords": ["c", "d"], "type": "stop"}, + }, + "tokenizer": {"trigram": {"max_gram": 3, "min_gram": 3, "type": "nGram"}}, + } == m._collect_analysis() + + +def test_even_non_custom_analyzers_can_have_params() -> None: + a1 = analysis.analyzer("whitespace", type="pattern", pattern=r"\\s+") + m = Mapping() + m.field("title", "text", analyzer=a1) + + assert { + "analyzer": {"whitespace": {"type": "pattern", "pattern": r"\\s+"}} + } == m._collect_analysis() + + +def test_resolve_field_can_resolve_multifields() -> None: + m = Mapping() + m.field("title", "text", fields={"keyword": Keyword()}) + + assert isinstance(m.resolve_field("title.keyword"), Keyword) + + +def test_resolve_nested() -> None: + m = Mapping() + m.field("n1", "nested", properties={"n2": Nested(properties={"k1": Keyword()})}) + m.field("k2", "keyword") + + nested, field = m.resolve_nested("n1.n2.k1") + assert nested == ["n1", "n1.n2"] + assert isinstance(field, Keyword) + + nested, field = m.resolve_nested("k2") + assert nested == [] + assert isinstance(field, Keyword) diff --git a/test_elasticsearch/test_dsl/_sync/test_search.py b/test_elasticsearch/test_dsl/_sync/test_search.py new file mode 100644 index 000000000..04b0ad53e --- /dev/null +++ b/test_elasticsearch/test_dsl/_sync/test_search.py @@ -0,0 +1,831 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from copy import deepcopy +from typing import Any + +import pytest +from pytest import raises + +from elasticsearch.dsl import Document, EmptySearch, Q, Search, query, types, wrappers +from elasticsearch.dsl.exceptions import IllegalOperation + + +def test_expand__to_dot_is_respected() -> None: + s = Search().query("match", a__b=42, _expand__to_dot=False) + + assert {"query": {"match": {"a__b": 42}}} == s.to_dict() + + +@pytest.mark.sync +def test_execute_uses_cache() -> None: + s = Search() + r = object() + s._response = r # type: ignore[assignment] + + assert r is s.execute() + + +@pytest.mark.sync +def test_cache_can_be_ignored(mock_client: Any) -> None: + s = Search(using="mock") + r = object() + s._response = r # type: ignore[assignment] + s.execute(ignore_cache=True) + + mock_client.search.assert_called_once_with(index=None, body={}) + + +@pytest.mark.sync +def test_iter_iterates_over_hits() -> None: + s = Search() + s._response = [1, 2, 3] # type: ignore[assignment] + + assert [1, 2, 3] == [hit for hit in s] + + +def test_cache_isnt_cloned() -> None: + s = Search() + s._response = object() # type: ignore[assignment] + + assert not hasattr(s._clone(), "_response") + + +def test_search_starts_with_no_query() -> None: + s = Search() + + assert s.query._proxied is None + + +def test_search_query_combines_query() -> None: + s = Search() + + s2 = s.query("match", f=42) + assert s2.query._proxied == query.Match(f=42) + assert s.query._proxied is None + + s3 = s2.query("match", f=43) + assert s2.query._proxied == query.Match(f=42) + assert s3.query._proxied == query.Bool(must=[query.Match(f=42), query.Match(f=43)]) + + +def test_query_can_be_assigned_to() -> None: + s = Search() + + q = Q("match", title="python") + s.query = q # type: ignore + + assert s.query._proxied is q + + +def test_query_can_be_wrapped() -> None: + s = Search().query("match", title="python") + + s.query = Q("function_score", query=s.query, field_value_factor={"field": "rating"}) # type: ignore + + assert { + "query": { + "function_score": { + "functions": [{"field_value_factor": {"field": "rating"}}], + "query": {"match": {"title": "python"}}, + } + } + } == s.to_dict() + + +def test_using() -> None: + o = object() + o2 = object() + s = Search(using=o) + assert s._using is o + s2 = s.using(o2) # type: ignore[arg-type] + assert s._using is o + assert s2._using is o2 + + +def test_methods_are_proxied_to_the_query() -> None: + s = Search().query("match_all") + + assert s.query.to_dict() == {"match_all": {}} + + +def test_query_always_returns_search() -> None: + s = Search() + + assert isinstance(s.query("match", f=42), Search) + + +def test_source_copied_on_clone() -> None: + s = Search().source(False) + assert s._clone()._source == s._source + assert s._clone()._source is False + + s2 = Search().source([]) + assert s2._clone()._source == s2._source + assert s2._source == [] + + s3 = Search().source(["some", "fields"]) + assert s3._clone()._source == s3._source + assert s3._clone()._source == ["some", "fields"] + + +def test_copy_clones() -> None: + from copy import copy + + s1 = Search().source(["some", "fields"]) + s2 = copy(s1) + + assert s1 == s2 + assert s1 is not s2 + + +def test_aggs_allow_two_metric() -> None: + s = Search() + + s.aggs.metric("a", "max", field="a").metric("b", "max", field="b") + + assert s.to_dict() == { + "aggs": {"a": {"max": {"field": "a"}}, "b": {"max": {"field": "b"}}} + } + + +def test_aggs_get_copied_on_change() -> None: + s = Search().query("match_all") + s.aggs.bucket("per_tag", "terms", field="f").metric( + "max_score", "max", field="score" + ) + + s2 = s.query("match_all") + s2.aggs.bucket("per_month", "date_histogram", field="date", interval="month") + s3 = s2.query("match_all") + s3.aggs["per_month"].metric("max_score", "max", field="score") + s4 = s3._clone() + s4.aggs.metric("max_score", "max", field="score") + + d: Any = { + "query": {"match_all": {}}, + "aggs": { + "per_tag": { + "terms": {"field": "f"}, + "aggs": {"max_score": {"max": {"field": "score"}}}, + } + }, + } + + assert d == s.to_dict() + d["aggs"]["per_month"] = {"date_histogram": {"field": "date", "interval": "month"}} + assert d == s2.to_dict() + d["aggs"]["per_month"]["aggs"] = {"max_score": {"max": {"field": "score"}}} + assert d == s3.to_dict() + d["aggs"]["max_score"] = {"max": {"field": "score"}} + assert d == s4.to_dict() + + +def test_search_index() -> None: + s = Search(index="i") + assert s._index == ["i"] + s = s.index("i2") + assert s._index == ["i", "i2"] + s = s.index("i3") + assert s._index == ["i", "i2", "i3"] + s = s.index() + assert s._index is None + s = Search(index=("i", "i2")) + assert s._index == ["i", "i2"] + s = Search(index=["i", "i2"]) + assert s._index == ["i", "i2"] + s = Search() + s = s.index("i", "i2") + assert s._index == ["i", "i2"] + s2 = s.index("i3") + assert s._index == ["i", "i2"] + assert s2._index == ["i", "i2", "i3"] + s = Search() + s = s.index(["i", "i2"], "i3") + assert s._index == ["i", "i2", "i3"] + s2 = s.index("i4") + assert s._index == ["i", "i2", "i3"] + assert s2._index == ["i", "i2", "i3", "i4"] + s2 = s.index(["i4"]) + assert s2._index == ["i", "i2", "i3", "i4"] + s2 = s.index(("i4", "i5")) + assert s2._index == ["i", "i2", "i3", "i4", "i5"] + + +def test_doc_type_document_class() -> None: + class MyDocument(Document): + pass + + s = Search(doc_type=MyDocument) + assert s._doc_type == [MyDocument] + assert s._doc_type_map == {} + + s = Search().doc_type(MyDocument) + assert s._doc_type == [MyDocument] + assert s._doc_type_map == {} + + +def test_knn() -> None: + s = Search() + + with raises(TypeError): + s.knn() # type: ignore[call-arg] + with raises(TypeError): + s.knn("field") # type: ignore[call-arg] + with raises(TypeError): + s.knn("field", 5) # type: ignore[call-arg] + with raises(ValueError): + s.knn("field", 5, 100) + with raises(ValueError): + s.knn("field", 5, 100, query_vector=[1, 2, 3], query_vector_builder={}) + + s = s.knn("field", 5, 100, query_vector=[1, 2, 3]) + assert { + "knn": { + "field": "field", + "k": 5, + "num_candidates": 100, + "query_vector": [1, 2, 3], + } + } == s.to_dict() + + s = s.knn( + k=4, + num_candidates=40, + boost=0.8, + field="name", + query_vector_builder={ + "text_embedding": {"model_id": "foo", "model_text": "search text"} + }, + inner_hits={"size": 1}, + ) + assert { + "knn": [ + { + "field": "field", + "k": 5, + "num_candidates": 100, + "query_vector": [1, 2, 3], + }, + { + "field": "name", + "k": 4, + "num_candidates": 40, + "query_vector_builder": { + "text_embedding": {"model_id": "foo", "model_text": "search text"} + }, + "boost": 0.8, + "inner_hits": {"size": 1}, + }, + ] + } == s.to_dict() + + +def test_rank() -> None: + s = Search() + s.rank(rrf=False) + assert {} == s.to_dict() + + s = s.rank(rrf=True) + assert {"rank": {"rrf": {}}} == s.to_dict() + + s = s.rank(rrf={"window_size": 50, "rank_constant": 20}) + assert {"rank": {"rrf": {"window_size": 50, "rank_constant": 20}}} == s.to_dict() + + +def test_sort() -> None: + s = Search() + s = s.sort("fielda", "-fieldb") + + assert ["fielda", {"fieldb": {"order": "desc"}}] == s._sort + assert {"sort": ["fielda", {"fieldb": {"order": "desc"}}]} == s.to_dict() + + s = s.sort() + assert [] == s._sort + assert Search().to_dict() == s.to_dict() + + +def test_sort_by_score() -> None: + s = Search() + s = s.sort("_score") + assert {"sort": ["_score"]} == s.to_dict() + + s = Search() + with raises(IllegalOperation): + s.sort("-_score") + + +def test_collapse() -> None: + s = Search() + + inner_hits = {"name": "most_recent", "size": 5, "sort": [{"@timestamp": "desc"}]} + s = s.collapse("user.id", inner_hits=inner_hits, max_concurrent_group_searches=4) + + assert { + "field": "user.id", + "inner_hits": { + "name": "most_recent", + "size": 5, + "sort": [{"@timestamp": "desc"}], + }, + "max_concurrent_group_searches": 4, + } == s._collapse + assert { + "collapse": { + "field": "user.id", + "inner_hits": { + "name": "most_recent", + "size": 5, + "sort": [{"@timestamp": "desc"}], + }, + "max_concurrent_group_searches": 4, + } + } == s.to_dict() + + s = s.collapse() + assert {} == s._collapse + assert Search().to_dict() == s.to_dict() + + +def test_slice() -> None: + s = Search() + assert {"from": 3, "size": 7} == s[3:10].to_dict() + assert {"size": 5} == s[:5].to_dict() + assert {"from": 3} == s[3:].to_dict() + assert {"from": 0, "size": 0} == s[0:0].to_dict() + assert {"from": 20, "size": 0} == s[20:0].to_dict() + assert {"from": 10, "size": 5} == s[10:][:5].to_dict() + assert {"from": 10, "size": 0} == s[:5][10:].to_dict() + assert {"size": 10} == s[:10][:40].to_dict() + assert {"size": 10} == s[:40][:10].to_dict() + assert {"size": 40} == s[:40][:80].to_dict() + assert {"from": 12, "size": 0} == s[:5][10:][2:].to_dict() + assert {"from": 15, "size": 0} == s[10:][:5][5:].to_dict() + assert {} == s[:].to_dict() + with raises(ValueError): + s[-1:] + with raises(ValueError): + s[4:-1] + with raises(ValueError): + s[-3:-2] + + +def test_index() -> None: + s = Search() + assert {"from": 3, "size": 1} == s[3].to_dict() + assert {"from": 3, "size": 1} == s[3][0].to_dict() + assert {"from": 8, "size": 0} == s[3][5].to_dict() + assert {"from": 4, "size": 1} == s[3:10][1].to_dict() + with raises(ValueError): + s[-3] + + +def test_search_to_dict() -> None: + s = Search() + assert {} == s.to_dict() + + s = s.query("match", f=42) + assert {"query": {"match": {"f": 42}}} == s.to_dict() + + assert {"query": {"match": {"f": 42}}, "size": 10} == s.to_dict(size=10) + + s.aggs.bucket("per_tag", "terms", field="f").metric( + "max_score", "max", field="score" + ) + d = { + "aggs": { + "per_tag": { + "terms": {"field": "f"}, + "aggs": {"max_score": {"max": {"field": "score"}}}, + } + }, + "query": {"match": {"f": 42}}, + } + assert d == s.to_dict() + + s = Search(extra={"size": 5}) + assert {"size": 5} == s.to_dict() + s = s.extra(from_=42) + assert {"size": 5, "from": 42} == s.to_dict() + + +def test_complex_example() -> None: + s = Search() + s = ( + s.query("match", title="python") + .query(~Q("match", title="ruby")) + .filter(Q("term", category="meetup") | Q("term", category="conference")) + .collapse("user_id") + .post_filter("terms", tags=["prague", "czech"]) + .script_fields(more_attendees="doc['attendees'].value + 42") + ) + + s.aggs.bucket("per_country", "terms", field="country").metric( + "avg_attendees", "avg", field="attendees" + ) + + s.query.minimum_should_match = 2 + + s = s.highlight_options(order="score").highlight("title", "body", fragment_size=50) + + assert { + "query": { + "bool": { + "filter": [ + { + "bool": { + "should": [ + {"term": {"category": "meetup"}}, + {"term": {"category": "conference"}}, + ] + } + } + ], + "must": [{"match": {"title": "python"}}], + "must_not": [{"match": {"title": "ruby"}}], + "minimum_should_match": 2, + } + }, + "post_filter": {"terms": {"tags": ["prague", "czech"]}}, + "aggs": { + "per_country": { + "terms": {"field": "country"}, + "aggs": {"avg_attendees": {"avg": {"field": "attendees"}}}, + } + }, + "collapse": {"field": "user_id"}, + "highlight": { + "order": "score", + "fields": {"title": {"fragment_size": 50}, "body": {"fragment_size": 50}}, + }, + "script_fields": {"more_attendees": {"script": "doc['attendees'].value + 42"}}, + } == s.to_dict() + + +def test_reverse() -> None: + d = { + "query": { + "bool": { + "filter": [ + { + "bool": { + "should": [ + {"term": {"category": "meetup"}}, + {"term": {"category": "conference"}}, + ] + } + } + ], + "must": [ + { + "bool": { + "must": [{"match": {"title": "python"}}], + "must_not": [{"match": {"title": "ruby"}}], + "minimum_should_match": 2, + } + } + ], + } + }, + "post_filter": {"bool": {"must": [{"terms": {"tags": ["prague", "czech"]}}]}}, + "aggs": { + "per_country": { + "terms": {"field": "country"}, + "aggs": {"avg_attendees": {"avg": {"field": "attendees"}}}, + } + }, + "sort": ["title", {"category": {"order": "desc"}}, "_score"], + "size": 5, + "highlight": {"order": "score", "fields": {"title": {"fragment_size": 50}}}, + "suggest": { + "my-title-suggestions-1": { + "text": "devloping distibutd saerch engies", + "term": {"size": 3, "field": "title"}, + } + }, + "script_fields": {"more_attendees": {"script": "doc['attendees'].value + 42"}}, + } + + d2 = deepcopy(d) + + s = Search.from_dict(d) + + # make sure we haven't modified anything in place + assert d == d2 + assert {"size": 5} == s._extra + assert d == s.to_dict() + + +def test_code_generated_classes() -> None: + s = Search() + s = ( + s.query(query.Match("title", types.MatchQuery(query="python"))) + .query(~query.Match("title", types.MatchQuery(query="ruby"))) + .query( + query.Knn( + field="title", + query_vector=[1.0, 2.0, 3.0], + num_candidates=10, + k=3, + filter=query.Range("year", wrappers.Range(gt="2004")), + ) + ) + .filter( + query.Term("category", types.TermQuery(value="meetup")) + | query.Term("category", types.TermQuery(value="conference")) + ) + .collapse("user_id") + .post_filter(query.Terms(tags=["prague", "czech"])) + .script_fields(more_attendees="doc['attendees'].value + 42") + ) + assert { + "query": { + "bool": { + "filter": [ + { + "bool": { + "should": [ + {"term": {"category": {"value": "meetup"}}}, + {"term": {"category": {"value": "conference"}}}, + ] + } + } + ], + "must": [ + {"match": {"title": {"query": "python"}}}, + { + "knn": { + "field": "title", + "filter": [ + { + "range": { + "year": { + "gt": "2004", + }, + }, + }, + ], + "k": 3, + "num_candidates": 10, + "query_vector": [ + 1.0, + 2.0, + 3.0, + ], + }, + }, + ], + "must_not": [{"match": {"title": {"query": "ruby"}}}], + } + }, + "post_filter": {"terms": {"tags": ["prague", "czech"]}}, + "collapse": {"field": "user_id"}, + "script_fields": {"more_attendees": {"script": "doc['attendees'].value + 42"}}, + } == s.to_dict() + + +def test_from_dict_doesnt_need_query() -> None: + s = Search.from_dict({"size": 5}) + + assert {"size": 5} == s.to_dict() + + +@pytest.mark.sync +def test_params_being_passed_to_search(mock_client: Any) -> None: + s = Search(using="mock") + s = s.params(routing="42") + s.execute() + + mock_client.search.assert_called_once_with(index=None, body={}, routing="42") + + +def test_source() -> None: + assert {} == Search().source().to_dict() + + assert { + "_source": {"includes": ["foo.bar.*"], "excludes": ["foo.one"]} + } == Search().source(includes=["foo.bar.*"], excludes=("foo.one",)).to_dict() + + assert {"_source": False} == Search().source(False).to_dict() + + assert {"_source": ["f1", "f2"]} == Search().source( + includes=["foo.bar.*"], excludes=["foo.one"] + ).source(["f1", "f2"]).to_dict() + + +def test_source_on_clone() -> None: + assert { + "_source": {"includes": ["foo.bar.*"], "excludes": ["foo.one"]}, + "query": {"bool": {"filter": [{"term": {"title": "python"}}]}}, + } == Search().source(includes=["foo.bar.*"]).source(excludes=["foo.one"]).filter( + "term", title="python" + ).to_dict() + assert { + "_source": False, + "query": {"bool": {"filter": [{"term": {"title": "python"}}]}}, + } == Search().source(False).filter("term", title="python").to_dict() + + +def test_source_on_clear() -> None: + assert ( + {} + == Search() + .source(includes=["foo.bar.*"]) + .source(includes=None, excludes=None) + .to_dict() + ) + + +def test_suggest_accepts_global_text() -> None: + s = Search.from_dict( + { + "suggest": { + "text": "the amsterdma meetpu", + "my-suggest-1": {"term": {"field": "title"}}, + "my-suggest-2": {"text": "other", "term": {"field": "body"}}, + } + } + ) + + assert { + "suggest": { + "my-suggest-1": { + "term": {"field": "title"}, + "text": "the amsterdma meetpu", + }, + "my-suggest-2": {"term": {"field": "body"}, "text": "other"}, + } + } == s.to_dict() + + +def test_suggest() -> None: + s = Search() + s = s.suggest("my_suggestion", "pyhton", term={"field": "title"}) + + assert { + "suggest": {"my_suggestion": {"term": {"field": "title"}, "text": "pyhton"}} + } == s.to_dict() + + +def test_exclude() -> None: + s = Search() + s = s.exclude("match", title="python") + + assert { + "query": { + "bool": { + "filter": [{"bool": {"must_not": [{"match": {"title": "python"}}]}}] + } + } + } == s.to_dict() + + +@pytest.mark.sync +def test_delete_by_query(mock_client: Any) -> None: + s = Search(using="mock", index="i").query("match", lang="java") + s.delete() + + mock_client.delete_by_query.assert_called_once_with( + index=["i"], body={"query": {"match": {"lang": "java"}}} + ) + + +def test_update_from_dict() -> None: + s = Search() + s.update_from_dict({"indices_boost": [{"important-documents": 2}]}) + s.update_from_dict({"_source": ["id", "name"]}) + s.update_from_dict({"collapse": {"field": "user_id"}}) + + assert { + "indices_boost": [{"important-documents": 2}], + "_source": ["id", "name"], + "collapse": {"field": "user_id"}, + } == s.to_dict() + + +def test_rescore_query_to_dict() -> None: + s = Search(index="index-name") + + positive_query = Q( + "function_score", + query=Q("term", tags="a"), + script_score={"script": "_score * 1"}, + ) + + negative_query = Q( + "function_score", + query=Q("term", tags="b"), + script_score={"script": "_score * -100"}, + ) + + s = s.query(positive_query) + s = s.extra( + rescore={"window_size": 100, "query": {"rescore_query": negative_query}} + ) + assert s.to_dict() == { + "query": { + "function_score": { + "query": {"term": {"tags": "a"}}, + "functions": [{"script_score": {"script": "_score * 1"}}], + } + }, + "rescore": { + "window_size": 100, + "query": { + "rescore_query": { + "function_score": { + "query": {"term": {"tags": "b"}}, + "functions": [{"script_score": {"script": "_score * -100"}}], + } + } + }, + }, + } + + assert s.to_dict( + rescore={"window_size": 10, "query": {"rescore_query": positive_query}} + ) == { + "query": { + "function_score": { + "query": {"term": {"tags": "a"}}, + "functions": [{"script_score": {"script": "_score * 1"}}], + } + }, + "rescore": { + "window_size": 10, + "query": { + "rescore_query": { + "function_score": { + "query": {"term": {"tags": "a"}}, + "functions": [{"script_score": {"script": "_score * 1"}}], + } + } + }, + }, + } + + +@pytest.mark.sync +def test_empty_search() -> None: + s = EmptySearch(index="index-name") + s = s.query("match", lang="java") + s.aggs.bucket("versions", "terms", field="version") + + assert s.count() == 0 + assert [hit for hit in s] == [] + assert [hit for hit in s.scan()] == [] + s.delete() # should not error + + +def test_suggest_completion() -> None: + s = Search() + s = s.suggest("my_suggestion", "pyhton", completion={"field": "title"}) + + assert { + "suggest": { + "my_suggestion": {"completion": {"field": "title"}, "prefix": "pyhton"} + } + } == s.to_dict() + + +def test_suggest_regex_query() -> None: + s = Search() + s = s.suggest("my_suggestion", regex="py[thon|py]", completion={"field": "title"}) + + assert { + "suggest": { + "my_suggestion": {"completion": {"field": "title"}, "regex": "py[thon|py]"} + } + } == s.to_dict() + + +def test_suggest_must_pass_text_or_regex() -> None: + s = Search() + with raises(ValueError): + s.suggest("my_suggestion") + + +def test_suggest_can_only_pass_text_or_regex() -> None: + s = Search() + with raises(ValueError): + s.suggest("my_suggestion", text="python", regex="py[hton|py]") + + +def test_suggest_regex_must_be_wtih_completion() -> None: + s = Search() + with raises(ValueError): + s.suggest("my_suggestion", regex="py[thon|py]") diff --git a/test_elasticsearch/test_dsl/_sync/test_update_by_query.py b/test_elasticsearch/test_dsl/_sync/test_update_by_query.py new file mode 100644 index 000000000..390257ffb --- /dev/null +++ b/test_elasticsearch/test_dsl/_sync/test_update_by_query.py @@ -0,0 +1,180 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from copy import deepcopy +from typing import Any + +import pytest + +from elasticsearch.dsl import Q, UpdateByQuery +from elasticsearch.dsl.response import UpdateByQueryResponse +from elasticsearch.dsl.search_base import SearchBase + + +def test_ubq_starts_with_no_query() -> None: + ubq = UpdateByQuery() + + assert ubq.query._proxied is None + + +def test_ubq_to_dict() -> None: + ubq = UpdateByQuery() + assert {} == ubq.to_dict() + + ubq = ubq.query("match", f=42) + assert {"query": {"match": {"f": 42}}} == ubq.to_dict() + + assert {"query": {"match": {"f": 42}}, "size": 10} == ubq.to_dict(size=10) + + ubq = UpdateByQuery(extra={"size": 5}) + assert {"size": 5} == ubq.to_dict() + + ubq = UpdateByQuery(extra={"extra_q": Q("term", category="conference")}) + assert {"extra_q": {"term": {"category": "conference"}}} == ubq.to_dict() + + +def test_complex_example() -> None: + ubq = UpdateByQuery() + ubq = ( + ubq.query("match", title="python") + .query(~Q("match", title="ruby")) + .filter(Q("term", category="meetup") | Q("term", category="conference")) + .script( + source="ctx._source.likes += params.f", lang="painless", params={"f": 3} + ) + ) + + ubq.query.minimum_should_match = 2 + assert { + "query": { + "bool": { + "filter": [ + { + "bool": { + "should": [ + {"term": {"category": "meetup"}}, + {"term": {"category": "conference"}}, + ] + } + } + ], + "must": [{"match": {"title": "python"}}], + "must_not": [{"match": {"title": "ruby"}}], + "minimum_should_match": 2, + } + }, + "script": { + "source": "ctx._source.likes += params.f", + "lang": "painless", + "params": {"f": 3}, + }, + } == ubq.to_dict() + + +def test_exclude() -> None: + ubq = UpdateByQuery() + ubq = ubq.exclude("match", title="python") + + assert { + "query": { + "bool": { + "filter": [{"bool": {"must_not": [{"match": {"title": "python"}}]}}] + } + } + } == ubq.to_dict() + + +def test_reverse() -> None: + d = { + "query": { + "bool": { + "filter": [ + { + "bool": { + "should": [ + {"term": {"category": "meetup"}}, + {"term": {"category": "conference"}}, + ] + } + } + ], + "must": [ + { + "bool": { + "must": [{"match": {"title": "python"}}], + "must_not": [{"match": {"title": "ruby"}}], + "minimum_should_match": 2, + } + } + ], + } + }, + "script": { + "source": "ctx._source.likes += params.f", + "lang": "painless", + "params": {"f": 3}, + }, + } + + d2 = deepcopy(d) + + ubq = UpdateByQuery.from_dict(d) + + assert d == d2 + assert d == ubq.to_dict() + + +def test_from_dict_doesnt_need_query() -> None: + ubq = UpdateByQuery.from_dict({"script": {"source": "test"}}) + + assert {"script": {"source": "test"}} == ubq.to_dict() + + +@pytest.mark.sync +def test_params_being_passed_to_search(mock_client: Any) -> None: + ubq = UpdateByQuery(using="mock", index="i") + ubq = ubq.params(routing="42") + ubq.execute() + + mock_client.update_by_query.assert_called_once_with(index=["i"], routing="42") + + +def test_overwrite_script() -> None: + ubq = UpdateByQuery() + ubq = ubq.script( + source="ctx._source.likes += params.f", lang="painless", params={"f": 3} + ) + assert { + "script": { + "source": "ctx._source.likes += params.f", + "lang": "painless", + "params": {"f": 3}, + } + } == ubq.to_dict() + ubq = ubq.script(source="ctx._source.likes++") + assert {"script": {"source": "ctx._source.likes++"}} == ubq.to_dict() + + +def test_update_by_query_response_success() -> None: + ubqr = UpdateByQueryResponse(SearchBase(), {"timed_out": False, "failures": []}) + assert ubqr.success() + + ubqr = UpdateByQueryResponse(SearchBase(), {"timed_out": True, "failures": []}) + assert not ubqr.success() + + ubqr = UpdateByQueryResponse(SearchBase(), {"timed_out": False, "failures": [{}]}) + assert not ubqr.success() diff --git a/test_elasticsearch/test_dsl/async_sleep.py b/test_elasticsearch/test_dsl/async_sleep.py new file mode 100644 index 000000000..ce5ced1c5 --- /dev/null +++ b/test_elasticsearch/test_dsl/async_sleep.py @@ -0,0 +1,24 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import asyncio +from typing import Union + + +async def sleep(secs: Union[int, float]) -> None: + """Tests can use this function to sleep.""" + await asyncio.sleep(secs) diff --git a/test_elasticsearch/test_dsl/conftest.py b/test_elasticsearch/test_dsl/conftest.py new file mode 100644 index 000000000..5dd83e54c --- /dev/null +++ b/test_elasticsearch/test_dsl/conftest.py @@ -0,0 +1,466 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +import asyncio +import os +import re +import time +from datetime import datetime +from typing import Any, AsyncGenerator, Dict, Generator, Tuple, cast +from unittest import SkipTest +from unittest.mock import AsyncMock, Mock + +import pytest_asyncio +from elastic_transport import ObjectApiResponse +from pytest import fixture, skip + +from elasticsearch import AsyncElasticsearch, Elasticsearch +from elasticsearch.dsl import Search +from elasticsearch.dsl.async_connections import add_connection as add_async_connection +from elasticsearch.dsl.async_connections import connections as async_connections +from elasticsearch.dsl.connections import add_connection, connections +from elasticsearch.exceptions import ConnectionError +from elasticsearch.helpers import bulk + +from ..utils import CA_CERTS, wipe_cluster +from .test_integration._async import test_document as async_document +from .test_integration._sync import test_document as sync_document +from .test_integration.test_data import ( + DATA, + FLAT_DATA, + TEST_GIT_DATA, + create_flat_git_index, + create_git_index, +) + + +def get_test_client( + elasticsearch_url, wait: bool = True, **kwargs: Any +) -> Elasticsearch: + # construct kwargs from the environment + kw: Dict[str, Any] = {"request_timeout": 30} + + if elasticsearch_url.startswith("https://"): + kw["ca_certs"] = CA_CERTS + + if "PYTHON_CONNECTION_CLASS" in os.environ: + kw["node_class"] = os.environ["PYTHON_CONNECTION_CLASS"] + + kw.update(kwargs) + client = Elasticsearch(elasticsearch_url, **kw) + + # wait for yellow status + for tries_left in range(100 if wait else 1, 0, -1): + try: + client.cluster.health(wait_for_status="yellow") + return client + except ConnectionError: + if wait and tries_left == 1: + raise + time.sleep(0.1) + + raise SkipTest("Elasticsearch failed to start.") + + +async def get_async_test_client( + elasticsearch_url, wait: bool = True, **kwargs: Any +) -> AsyncElasticsearch: + # construct kwargs from the environment + kw: Dict[str, Any] = {"request_timeout": 30} + + if elasticsearch_url.startswith("https://"): + kw["ca_certs"] = CA_CERTS + + kw.update(kwargs) + client = AsyncElasticsearch(elasticsearch_url, **kw) + + # wait for yellow status + for tries_left in range(100 if wait else 1, 0, -1): + try: + await client.cluster.health(wait_for_status="yellow") + return client + except ConnectionError: + if wait and tries_left == 1: + raise + await asyncio.sleep(0.1) + + await client.close() + raise SkipTest("Elasticsearch failed to start.") + + +def _get_version(version_string: str) -> Tuple[int, ...]: + if "." not in version_string: + return () + version = version_string.strip().split(".") + return tuple(int(v) if v.isdigit() else 999 for v in version) + + +@fixture +def client(elasticsearch_url) -> Elasticsearch: + try: + connection = get_test_client( + elasticsearch_url, wait="WAIT_FOR_ES" in os.environ + ) + add_connection("default", connection) + yield connection + wipe_cluster(connection) + connection.close() + except SkipTest: + skip() + + +@pytest_asyncio.fixture +async def async_client(elasticsearch_url) -> AsyncGenerator[AsyncElasticsearch, None]: + try: + connection = await get_async_test_client( + elasticsearch_url, wait="WAIT_FOR_ES" in os.environ + ) + add_async_connection("default", connection) + yield connection + wipe_cluster(connection) + await connection.close() + except SkipTest: + skip() + + +@fixture +def es_version(client: Elasticsearch) -> Generator[Tuple[int, ...], None, None]: + info = client.info() + yield tuple( + int(x) + for x in re.match(r"^([0-9.]+)", info["version"]["number"]).group(1).split(".") # type: ignore + ) + + +@fixture +def write_client(client: Elasticsearch) -> Generator[Elasticsearch, None, None]: + yield client + for index_name in client.indices.get(index="test-*", expand_wildcards="all"): + client.indices.delete(index=index_name) + client.options(ignore_status=404).indices.delete_template(name="test-template") + client.options(ignore_status=404).indices.delete_index_template( + name="test-template" + ) + + +@pytest_asyncio.fixture +async def async_write_client( + write_client: Elasticsearch, async_client: AsyncElasticsearch +) -> AsyncGenerator[AsyncElasticsearch, None]: + yield async_client + + +@fixture +def mock_client( + dummy_response: ObjectApiResponse[Any], +) -> Generator[Elasticsearch, None, None]: + client = Mock() + client.search.return_value = dummy_response + client.update_by_query.return_value = dummy_response + add_connection("mock", client) + + yield client + connections._conns = {} + connections._kwargs = {} + + +@fixture +def async_mock_client( + dummy_response: ObjectApiResponse[Any], +) -> Generator[Elasticsearch, None, None]: + client = Mock() + client.search = AsyncMock(return_value=dummy_response) + client.indices = AsyncMock() + client.update_by_query = AsyncMock() + client.delete_by_query = AsyncMock() + add_async_connection("mock", client) + + yield client + async_connections._conns = {} + async_connections._kwargs = {} + + +@fixture +def data_client(client: Elasticsearch) -> Generator[Elasticsearch, None, None]: + # create mappings + create_git_index(client, "git") + create_flat_git_index(client, "flat-git") + # load data + bulk(client, DATA, raise_on_error=True, refresh=True) + bulk(client, FLAT_DATA, raise_on_error=True, refresh=True) + yield client + client.options(ignore_status=404).indices.delete(index="git") + client.options(ignore_status=404).indices.delete(index="flat-git") + + +@pytest_asyncio.fixture +async def async_data_client( + data_client: Elasticsearch, async_client: AsyncElasticsearch +) -> AsyncGenerator[AsyncElasticsearch, None]: + yield async_client + + +@fixture +def dummy_response() -> ObjectApiResponse[Any]: + return ObjectApiResponse( + meta=None, + body={ + "_shards": {"failed": 0, "successful": 10, "total": 10}, + "hits": { + "hits": [ + { + "_index": "test-index", + "_type": "company", + "_id": "elasticsearch", + "_score": 12.0, + "_source": {"city": "Amsterdam", "name": "Elasticsearch"}, + }, + { + "_index": "test-index", + "_type": "employee", + "_id": "42", + "_score": 11.123, + "_routing": "elasticsearch", + "_source": { + "name": {"first": "Shay", "last": "Bannon"}, + "lang": "java", + "twitter": "kimchy", + }, + }, + { + "_index": "test-index", + "_type": "employee", + "_id": "47", + "_score": 1, + "_routing": "elasticsearch", + "_source": { + "name": {"first": "Honza", "last": "Král"}, + "lang": "python", + "twitter": "honzakral", + }, + }, + { + "_index": "test-index", + "_type": "employee", + "_id": "53", + "_score": 16.0, + "_routing": "elasticsearch", + }, + ], + "max_score": 12.0, + "total": 123, + }, + "timed_out": False, + "took": 123, + }, + ) + + +@fixture +def aggs_search() -> Search: + s = Search(index="flat-git") + s.aggs.bucket("popular_files", "terms", field="files", size=2).metric( + "line_stats", "stats", field="stats.lines" + ).metric("top_commits", "top_hits", size=2, _source=["stats.*", "committed_date"]) + s.aggs.bucket( + "per_month", "date_histogram", interval="month", field="info.committed_date" + ) + s.aggs.metric("sum_lines", "sum", field="stats.lines") + return s + + +@fixture +def aggs_data() -> Dict[str, Any]: + return { + "took": 4, + "timed_out": False, + "_shards": {"total": 1, "successful": 1, "failed": 0}, + "hits": {"total": 52, "hits": [], "max_score": 0.0}, + "aggregations": { + "sum_lines": {"value": 25052.0}, + "per_month": { + "buckets": [ + { + "doc_count": 38, + "key": 1393632000000, + "key_as_string": "2014-03-01T00:00:00.000Z", + }, + { + "doc_count": 11, + "key": 1396310400000, + "key_as_string": "2014-04-01T00:00:00.000Z", + }, + { + "doc_count": 3, + "key": 1398902400000, + "key_as_string": "2014-05-01T00:00:00.000Z", + }, + ] + }, + "popular_files": { + "buckets": [ + { + "key": "elasticsearch_dsl", + "line_stats": { + "count": 40, + "max": 228.0, + "min": 2.0, + "sum": 2151.0, + "avg": 53.775, + }, + "doc_count": 40, + "top_commits": { + "hits": { + "total": 40, + "hits": [ + { + "_id": "3ca6e1e73a071a705b4babd2f581c91a2a3e5037", + "_type": "doc", + "_source": { + "stats": { + "files": 4, + "deletions": 7, + "lines": 30, + "insertions": 23, + }, + "committed_date": "2014-05-02T13:47:19", + }, + "_score": 1.0, + "_index": "flat-git", + }, + { + "_id": "eb3e543323f189fd7b698e66295427204fff5755", + "_type": "doc", + "_source": { + "stats": { + "files": 1, + "deletions": 0, + "lines": 18, + "insertions": 18, + }, + "committed_date": "2014-05-01T13:32:14", + }, + "_score": 1.0, + "_index": "flat-git", + }, + ], + "max_score": 1.0, + } + }, + }, + { + "key": "test_elasticsearch_dsl", + "line_stats": { + "count": 35, + "max": 228.0, + "min": 2.0, + "sum": 1939.0, + "avg": 55.4, + }, + "doc_count": 35, + "top_commits": { + "hits": { + "total": 35, + "hits": [ + { + "_id": "3ca6e1e73a071a705b4babd2f581c91a2a3e5037", + "_type": "doc", + "_source": { + "stats": { + "files": 4, + "deletions": 7, + "lines": 30, + "insertions": 23, + }, + "committed_date": "2014-05-02T13:47:19", + }, + "_score": 1.0, + "_index": "flat-git", + }, + { + "_id": "dd15b6ba17dd9ba16363a51f85b31f66f1fb1157", + "_type": "doc", + "_source": { + "stats": { + "files": 3, + "deletions": 18, + "lines": 62, + "insertions": 44, + }, + "committed_date": "2014-05-01T13:30:44", + }, + "_score": 1.0, + "_index": "flat-git", + }, + ], + "max_score": 1.0, + } + }, + }, + ], + "doc_count_error_upper_bound": 0, + "sum_other_doc_count": 120, + }, + }, + } + + +def make_pr(pr_module: Any) -> Any: + return pr_module.PullRequest( + _id=42, + comments=[ + pr_module.Comment( + content="Hello World!", + author=pr_module.User(name="honzakral"), + created_at=datetime(2018, 1, 9, 10, 17, 3, 21184), + history=[ + pr_module.History( + timestamp=datetime(2012, 1, 1), + diff="-Ahoj Svete!\n+Hello World!", + ) + ], + ), + ], + created_at=datetime(2018, 1, 9, 9, 17, 3, 21184), + ) + + +@fixture +def pull_request(write_client: Elasticsearch) -> sync_document.PullRequest: + sync_document.PullRequest.init() + pr = cast(sync_document.PullRequest, make_pr(sync_document)) + pr.save(refresh=True) + return pr + + +@pytest_asyncio.fixture +async def async_pull_request( + async_write_client: AsyncElasticsearch, +) -> async_document.PullRequest: + await async_document.PullRequest.init() + pr = cast(async_document.PullRequest, make_pr(async_document)) + await pr.save(refresh=True) + return pr + + +@fixture +def setup_ubq_tests(client: Elasticsearch) -> str: + index = "test-git" + create_git_index(client, index) + bulk(client, TEST_GIT_DATA, raise_on_error=True, refresh=True) + return index diff --git a/test_elasticsearch/test_dsl/sleep.py b/test_elasticsearch/test_dsl/sleep.py new file mode 100644 index 000000000..83009566e --- /dev/null +++ b/test_elasticsearch/test_dsl/sleep.py @@ -0,0 +1,24 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import time +from typing import Union + + +def sleep(secs: Union[int, float]) -> None: + """Tests can use this function to sleep.""" + time.sleep(secs) diff --git a/test_elasticsearch/test_dsl/test_aggs.py b/test_elasticsearch/test_dsl/test_aggs.py new file mode 100644 index 000000000..f1dc10aa5 --- /dev/null +++ b/test_elasticsearch/test_dsl/test_aggs.py @@ -0,0 +1,530 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from pytest import raises + +from elasticsearch.dsl import aggs, query, types + + +def test_repr() -> None: + max_score = aggs.Max(field="score") + a = aggs.A("terms", field="tags", aggs={"max_score": max_score}) + + assert "Terms(aggs={'max_score': Max(field='score')}, field='tags')" == repr(a) + + +def test_meta() -> None: + max_score = aggs.Max(field="score") + a = aggs.A( + "terms", field="tags", aggs={"max_score": max_score}, meta={"some": "metadata"} + ) + + assert { + "terms": {"field": "tags"}, + "aggs": {"max_score": {"max": {"field": "score"}}}, + "meta": {"some": "metadata"}, + } == a.to_dict() + + +def test_meta_from_dict() -> None: + max_score = aggs.Max(field="score") + a = aggs.A( + "terms", field="tags", aggs={"max_score": max_score}, meta={"some": "metadata"} + ) + + assert aggs.A(a.to_dict()) == a + + +def test_A_creates_proper_agg() -> None: + a = aggs.A("terms", field="tags") + + assert isinstance(a, aggs.Terms) + assert a._params == {"field": "tags"} + + +def test_A_handles_nested_aggs_properly() -> None: + max_score = aggs.Max(field="score") + a = aggs.A("terms", field="tags", aggs={"max_score": max_score}) + + assert isinstance(a, aggs.Terms) + assert a._params == {"field": "tags", "aggs": {"max_score": max_score}} + + +def test_A_passes_aggs_through() -> None: + a = aggs.A("terms", field="tags") + assert aggs.A(a) is a + + +def test_A_from_dict() -> None: + d = { + "terms": {"field": "tags"}, + "aggs": {"per_author": {"terms": {"field": "author.raw"}}}, + } + a = aggs.A(d) + + assert isinstance(a, aggs.Terms) + assert a._params == { + "field": "tags", + "aggs": {"per_author": aggs.A("terms", field="author.raw")}, + } + assert a["per_author"] == aggs.A("terms", field="author.raw") + assert a.aggs.per_author == aggs.A("terms", field="author.raw") # type: ignore[attr-defined] + + +def test_A_fails_with_incorrect_dict() -> None: + correct_d = { + "terms": {"field": "tags"}, + "aggs": {"per_author": {"terms": {"field": "author.raw"}}}, + } + + with raises(Exception): + aggs.A(correct_d, field="f") + + d = correct_d.copy() + del d["terms"] + with raises(Exception): + aggs.A(d) + + d = correct_d.copy() + d["xx"] = {} + with raises(Exception): + aggs.A(d) + + +def test_A_fails_with_agg_and_params() -> None: + a = aggs.A("terms", field="tags") + + with raises(Exception): + aggs.A(a, field="score") + + +def test_buckets_are_nestable() -> None: + a = aggs.Terms(field="tags") + b = a.bucket("per_author", "terms", field="author.raw") + + assert isinstance(b, aggs.Terms) + assert b._params == {"field": "author.raw"} + assert a.aggs == {"per_author": b} + + +def test_metric_inside_buckets() -> None: + a = aggs.Terms(field="tags") + b = a.metric("max_score", "max", field="score") + + # returns bucket so it's chainable + assert a is b + assert a.aggs["max_score"] == aggs.Max(field="score") + + +def test_buckets_equals_counts_subaggs() -> None: + a = aggs.Terms(field="tags") + a.bucket("per_author", "terms", field="author.raw") + b = aggs.Terms(field="tags") + + assert a != b + + +def test_buckets_to_dict() -> None: + a = aggs.Terms(field="tags") + a.bucket("per_author", "terms", field="author.raw") + + assert { + "terms": {"field": "tags"}, + "aggs": {"per_author": {"terms": {"field": "author.raw"}}}, + } == a.to_dict() + + a = aggs.Terms(field="tags") + a.metric("max_score", "max", field="score") + + assert { + "terms": {"field": "tags"}, + "aggs": {"max_score": {"max": {"field": "score"}}}, + } == a.to_dict() + + +def test_nested_buckets_are_reachable_as_getitem() -> None: + a = aggs.Terms(field="tags") + b = a.bucket("per_author", "terms", field="author.raw") + + assert a["per_author"] is not b + assert a["per_author"] == b + + +def test_nested_buckets_are_settable_as_getitem() -> None: + a = aggs.Terms(field="tags") + b = a["per_author"] = aggs.A("terms", field="author.raw") + + assert a.aggs["per_author"] is b + + +def test_filter_can_be_instantiated_using_positional_args() -> None: + a = aggs.Filter(query.Q("term", f=42)) + + assert {"filter": {"term": {"f": 42}}} == a.to_dict() + + assert a == aggs.A("filter", query.Q("term", f=42)) + + +def test_filter_aggregation_as_nested_agg() -> None: + a = aggs.Terms(field="tags") + a.bucket("filtered", "filter", query.Q("term", f=42)) + + assert { + "terms": {"field": "tags"}, + "aggs": {"filtered": {"filter": {"term": {"f": 42}}}}, + } == a.to_dict() + + +def test_filter_aggregation_with_nested_aggs() -> None: + a = aggs.Filter(query.Q("term", f=42)) + a.bucket("testing", "terms", field="tags") + + assert { + "filter": {"term": {"f": 42}}, + "aggs": {"testing": {"terms": {"field": "tags"}}}, + } == a.to_dict() + + +def test_filters_correctly_identifies_the_hash() -> None: + a = aggs.A( + "filters", + filters={ + "group_a": {"term": {"group": "a"}}, + "group_b": {"term": {"group": "b"}}, + }, + ) + + assert { + "filters": { + "filters": { + "group_a": {"term": {"group": "a"}}, + "group_b": {"term": {"group": "b"}}, + } + } + } == a.to_dict() + assert a.filters.group_a == query.Q("term", group="a") + + +def test_bucket_sort_agg() -> None: + # test the dictionary (type ignored) and fully typed alterantives + bucket_sort_agg = aggs.BucketSort(sort=[{"total_sales": {"order": "desc"}}], size=3) # type: ignore + assert bucket_sort_agg.to_dict() == { + "bucket_sort": {"sort": [{"total_sales": {"order": "desc"}}], "size": 3} + } + bucket_sort_agg = aggs.BucketSort( + sort=[types.SortOptions("total_sales", types.FieldSort(order="desc"))], size=3 + ) + assert bucket_sort_agg.to_dict() == { + "bucket_sort": {"sort": [{"total_sales": {"order": "desc"}}], "size": 3} + } + + a = aggs.DateHistogram(field="date", interval="month") + a.bucket("total_sales", "sum", field="price") + a.bucket( + "sales_bucket_sort", + "bucket_sort", + sort=[{"total_sales": {"order": "desc"}}], + size=3, + ) + assert { + "date_histogram": {"field": "date", "interval": "month"}, + "aggs": { + "total_sales": {"sum": {"field": "price"}}, + "sales_bucket_sort": { + "bucket_sort": {"sort": [{"total_sales": {"order": "desc"}}], "size": 3} + }, + }, + } == a.to_dict() + + +def test_bucket_sort_agg_only_trnunc() -> None: + # test the dictionary (type ignored) and fully typed alterantives + bucket_sort_agg = aggs.BucketSort(**{"from": 1, "size": 1, "_expand__to_dot": False}) # type: ignore + assert bucket_sort_agg.to_dict() == {"bucket_sort": {"from": 1, "size": 1}} + bucket_sort_agg = aggs.BucketSort(from_=1, size=1, _expand__to_dot=False) + assert bucket_sort_agg.to_dict() == {"bucket_sort": {"from": 1, "size": 1}} + + a = aggs.DateHistogram(field="date", interval="month") + a.bucket("bucket_truncate", "bucket_sort", **{"from": 1, "size": 1}) + assert { + "date_histogram": {"field": "date", "interval": "month"}, + "aggs": {"bucket_truncate": {"bucket_sort": {"from": 1, "size": 1}}}, + } == a.to_dict() + + +def test_geohash_grid_aggregation() -> None: + # test the dictionary (type ignored) and fully typed alterantives + a = aggs.GeohashGrid(**{"field": "centroid", "precision": 3}) # type: ignore + assert {"geohash_grid": {"field": "centroid", "precision": 3}} == a.to_dict() + a = aggs.GeohashGrid(field="centroid", precision=3) + assert {"geohash_grid": {"field": "centroid", "precision": 3}} == a.to_dict() + + +def test_geohex_grid_aggregation() -> None: + # test the dictionary (type ignored) and fully typed alterantives + a = aggs.GeohexGrid(**{"field": "centroid", "precision": 3}) # type: ignore + assert {"geohex_grid": {"field": "centroid", "precision": 3}} == a.to_dict() + a = aggs.GeohexGrid(field="centroid", precision=3) + assert {"geohex_grid": {"field": "centroid", "precision": 3}} == a.to_dict() + + +def test_geotile_grid_aggregation() -> None: + # test the dictionary (type ignored) and fully typed alterantives + a = aggs.GeotileGrid(**{"field": "centroid", "precision": 3}) # type: ignore + assert {"geotile_grid": {"field": "centroid", "precision": 3}} == a.to_dict() + a = aggs.GeotileGrid(field="centroid", precision=3) + assert {"geotile_grid": {"field": "centroid", "precision": 3}} == a.to_dict() + + +def test_boxplot_aggregation() -> None: + a = aggs.Boxplot(field="load_time") + + assert {"boxplot": {"field": "load_time"}} == a.to_dict() + + +def test_rare_terms_aggregation() -> None: + a = aggs.RareTerms(field="the-field") + a.bucket("total_sales", "sum", field="price") + a.bucket( + "sales_bucket_sort", + "bucket_sort", + sort=[{"total_sales": {"order": "desc"}}], + size=3, + ) + + assert { + "aggs": { + "sales_bucket_sort": { + "bucket_sort": {"size": 3, "sort": [{"total_sales": {"order": "desc"}}]} + }, + "total_sales": {"sum": {"field": "price"}}, + }, + "rare_terms": {"field": "the-field"}, + } == a.to_dict() + + +def test_variable_width_histogram_aggregation() -> None: + a = aggs.VariableWidthHistogram(field="price", buckets=2) + assert {"variable_width_histogram": {"buckets": 2, "field": "price"}} == a.to_dict() + + +def test_ip_prefix_aggregation() -> None: + # test the dictionary (type ignored) and fully typed alterantives + a = aggs.IPPrefix(**{"field": "ipv4", "prefix_length": 24}) # type: ignore + assert {"ip_prefix": {"field": "ipv4", "prefix_length": 24}} == a.to_dict() + a = aggs.IPPrefix(field="ipv4", prefix_length=24) + assert {"ip_prefix": {"field": "ipv4", "prefix_length": 24}} == a.to_dict() + + +def test_ip_prefix_aggregation_extra() -> None: + a = aggs.IPPrefix(field="ipv6", prefix_length=64, is_ipv6=True) + + assert { + "ip_prefix": { + "field": "ipv6", + "prefix_length": 64, + "is_ipv6": True, + }, + } == a.to_dict() + + +def test_multi_terms_aggregation() -> None: + a = aggs.MultiTerms(terms=[{"field": "tags"}, {"field": "author.row"}]) + assert { + "multi_terms": { + "terms": [ + {"field": "tags"}, + {"field": "author.row"}, + ] + } + } == a.to_dict() + a = aggs.MultiTerms( + terms=[ + types.MultiTermLookup(field="tags"), + types.MultiTermLookup(field="author.row"), + ] + ) + assert { + "multi_terms": { + "terms": [ + {"field": "tags"}, + {"field": "author.row"}, + ] + } + } == a.to_dict() + + +def test_categorize_text_aggregation() -> None: + a = aggs.CategorizeText( + field="tags", + categorization_filters=["\\w+\\_\\d{3}"], + max_matched_tokens=2, + similarity_threshold=30, + ) + assert { + "categorize_text": { + "field": "tags", + "categorization_filters": ["\\w+\\_\\d{3}"], + "max_matched_tokens": 2, + "similarity_threshold": 30, + } + } == a.to_dict() + + +def test_median_absolute_deviation_aggregation() -> None: + a = aggs.MedianAbsoluteDeviation(field="rating") + + assert {"median_absolute_deviation": {"field": "rating"}} == a.to_dict() + + +def test_t_test_aggregation() -> None: + a = aggs.TTest( + a={"field": "startup_time_before"}, + b={"field": "startup_time_after"}, + type="paired", + ) + + assert { + "t_test": { + "a": {"field": "startup_time_before"}, + "b": {"field": "startup_time_after"}, + "type": "paired", + } + } == a.to_dict() + + +def test_geo_line_aggregation() -> None: + a = aggs.GeoLine(point={"field": "centroid"}, sort={"field": "date"}) + + assert { + "geo_line": { + "point": {"field": "centroid"}, + "sort": {"field": "date"}, + }, + } == a.to_dict() + + +def test_inference_aggregation() -> None: + a = aggs.Inference(model_id="model-id", buckets_path={"agg_name": "agg_name"}) + assert { + "inference": {"buckets_path": {"agg_name": "agg_name"}, "model_id": "model-id"} + } == a.to_dict() + + +def test_matrix_stats_aggregation() -> None: + a = aggs.MatrixStats(fields=["poverty", "income"]) + + assert {"matrix_stats": {"fields": ["poverty", "income"]}} == a.to_dict() + + +def test_moving_percentiles_aggregation() -> None: + a = aggs.DateHistogram() + a.bucket("the_percentile", "percentiles", field="price", percents=[1.0, 99.0]) + a.pipeline( + "the_movperc", "moving_percentiles", buckets_path="the_percentile", window=10 + ) + + assert { + "aggs": { + "the_movperc": { + "moving_percentiles": {"buckets_path": "the_percentile", "window": 10} + }, + "the_percentile": { + "percentiles": {"field": "price", "percents": [1.0, 99.0]} + }, + }, + "date_histogram": {}, + } == a.to_dict() + + +def test_normalize_aggregation() -> None: + a = aggs.Normalize(buckets_path="normalized", method="percent_of_sum") + assert { + "normalize": {"buckets_path": "normalized", "method": "percent_of_sum"} + } == a.to_dict() + + +def test_random_sampler_aggregation() -> None: + a = aggs.RandomSampler(probability=0.1).metric( + "price_percentiles", + "percentiles", + field="price", + ) + + assert { + "random_sampler": { + "probability": 0.1, + }, + "aggs": { + "price_percentiles": { + "percentiles": {"field": "price"}, + }, + }, + } == a.to_dict() + + +def test_adjancecy_matrix_aggregation() -> None: + a = aggs.AdjacencyMatrix(filters={"grpA": {"terms": {"accounts": ["hillary", "sidney"]}}, "grpB": {"terms": {"accounts": ["donald", "mitt"]}}, "grpC": {"terms": {"accounts": ["vladimir", "nigel"]}}}) # type: ignore + assert { + "adjacency_matrix": { + "filters": { + "grpA": {"terms": {"accounts": ["hillary", "sidney"]}}, + "grpB": {"terms": {"accounts": ["donald", "mitt"]}}, + "grpC": {"terms": {"accounts": ["vladimir", "nigel"]}}, + } + } + } == a.to_dict() + a = aggs.AdjacencyMatrix( + filters={ + "grpA": query.Terms(accounts=["hillary", "sidney"]), + "grpB": query.Terms(accounts=["donald", "mitt"]), + "grpC": query.Terms(accounts=["vladimir", "nigel"]), + } + ) + assert { + "adjacency_matrix": { + "filters": { + "grpA": {"terms": {"accounts": ["hillary", "sidney"]}}, + "grpB": {"terms": {"accounts": ["donald", "mitt"]}}, + "grpC": {"terms": {"accounts": ["vladimir", "nigel"]}}, + } + } + } == a.to_dict() + + +def test_top_metrics_aggregation() -> None: + # test the dictionary (type ignored) and fully typed alterantives + a = aggs.TopMetrics(metrics={"field": "m"}, sort={"s": "desc"}) # type: ignore + assert { + "top_metrics": {"metrics": {"field": "m"}, "sort": {"s": "desc"}} + } == a.to_dict() + a = aggs.TopMetrics( + metrics=types.TopMetricsValue(field="m"), + sort=types.SortOptions("s", types.FieldSort(order="desc")), + ) + assert { + "top_metrics": {"metrics": {"field": "m"}, "sort": {"s": {"order": "desc"}}} + } == a.to_dict() + + +def test_bucket_agg_with_filter() -> None: + b = aggs.Filter(query.Terms(something=[1, 2, 3])) + + a = aggs.Terms(field="some_field", size=100) + a.bucket("b", b) + + assert a.aggs["b"] == a["b"] # a['b'] threw exception before patch #1902 diff --git a/test_elasticsearch/test_dsl/test_analysis.py b/test_elasticsearch/test_dsl/test_analysis.py new file mode 100644 index 000000000..47a08672d --- /dev/null +++ b/test_elasticsearch/test_dsl/test_analysis.py @@ -0,0 +1,216 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from pytest import raises + +from elasticsearch.dsl import analysis + + +def test_analyzer_serializes_as_name() -> None: + a = analysis.analyzer("my_analyzer") + + assert "my_analyzer" == a.to_dict() # type: ignore + + +def test_analyzer_has_definition() -> None: + a = analysis.CustomAnalyzer( + "my_analyzer", tokenizer="keyword", filter=["lowercase"] + ) + + assert { + "type": "custom", + "tokenizer": "keyword", + "filter": ["lowercase"], + } == a.get_definition() + + +def test_simple_multiplexer_filter() -> None: + a = analysis.analyzer( + "my_analyzer", + tokenizer="keyword", + filter=[ + analysis.token_filter( + "my_multi", "multiplexer", filters=["lowercase", "lowercase, stop"] + ) + ], + ) + + assert { + "analyzer": { + "my_analyzer": { + "filter": ["my_multi"], + "tokenizer": "keyword", + "type": "custom", + } + }, + "filter": { + "my_multi": { + "filters": ["lowercase", "lowercase, stop"], + "type": "multiplexer", + } + }, + } == a.get_analysis_definition() + + +def test_multiplexer_with_custom_filter() -> None: + a = analysis.analyzer( + "my_analyzer", + tokenizer="keyword", + filter=[ + analysis.token_filter( + "my_multi", + "multiplexer", + filters=[ + [analysis.token_filter("en", "snowball", language="English")], + "lowercase, stop", + ], + ) + ], + ) + + assert { + "analyzer": { + "my_analyzer": { + "filter": ["my_multi"], + "tokenizer": "keyword", + "type": "custom", + } + }, + "filter": { + "en": {"type": "snowball", "language": "English"}, + "my_multi": {"filters": ["en", "lowercase, stop"], "type": "multiplexer"}, + }, + } == a.get_analysis_definition() + + +def test_conditional_token_filter() -> None: + a = analysis.analyzer( + "my_cond", + tokenizer=analysis.tokenizer("keyword"), + filter=[ + analysis.token_filter( + "testing", + "condition", + script={"source": "return true"}, + filter=[ + "lowercase", + analysis.token_filter("en", "snowball", language="English"), + ], + ), + "stop", + ], + ) + + assert { + "analyzer": { + "my_cond": { + "filter": ["testing", "stop"], + "tokenizer": "keyword", + "type": "custom", + } + }, + "filter": { + "en": {"language": "English", "type": "snowball"}, + "testing": { + "script": {"source": "return true"}, + "filter": ["lowercase", "en"], + "type": "condition", + }, + }, + } == a.get_analysis_definition() + + +def test_conflicting_nested_filters_cause_error() -> None: + a = analysis.analyzer( + "my_cond", + tokenizer=analysis.tokenizer("keyword"), + filter=[ + analysis.token_filter("en", "stemmer", language="english"), + analysis.token_filter( + "testing", + "condition", + script={"source": "return true"}, + filter=[ + "lowercase", + analysis.token_filter("en", "snowball", language="English"), + ], + ), + ], + ) + + with raises(ValueError): + a.get_analysis_definition() + + +def test_normalizer_serializes_as_name() -> None: + n = analysis.normalizer("my_normalizer") + + assert "my_normalizer" == n.to_dict() # type: ignore + + +def test_normalizer_has_definition() -> None: + n = analysis.CustomNormalizer( + "my_normalizer", filter=["lowercase", "asciifolding"], char_filter=["quote"] + ) + + assert { + "type": "custom", + "filter": ["lowercase", "asciifolding"], + "char_filter": ["quote"], + } == n.get_definition() + + +def test_tokenizer() -> None: + t = analysis.tokenizer("trigram", "nGram", min_gram=3, max_gram=3) + + assert t.to_dict() == "trigram" # type: ignore + assert {"type": "nGram", "min_gram": 3, "max_gram": 3} == t.get_definition() + + +def test_custom_analyzer_can_collect_custom_items() -> None: + trigram = analysis.tokenizer("trigram", "nGram", min_gram=3, max_gram=3) + my_stop = analysis.token_filter("my_stop", "stop", stopwords=["a", "b"]) + umlauts = analysis.char_filter("umlauts", "pattern_replace", mappings=["ü=>ue"]) + a = analysis.analyzer( + "my_analyzer", + tokenizer=trigram, + filter=["lowercase", my_stop], + char_filter=["html_strip", umlauts], + ) + + assert a.to_dict() == "my_analyzer" # type: ignore + assert { + "analyzer": { + "my_analyzer": { + "type": "custom", + "tokenizer": "trigram", + "filter": ["lowercase", "my_stop"], + "char_filter": ["html_strip", "umlauts"], + } + }, + "tokenizer": {"trigram": trigram.get_definition()}, + "filter": {"my_stop": my_stop.get_definition()}, + "char_filter": {"umlauts": umlauts.get_definition()}, + } == a.get_analysis_definition() + + +def test_stemmer_analyzer_can_pass_name() -> None: + t = analysis.token_filter( + "my_english_filter", name="minimal_english", type="stemmer" + ) + assert t.to_dict() == "my_english_filter" # type: ignore + assert {"type": "stemmer", "name": "minimal_english"} == t.get_definition() diff --git a/test_elasticsearch/test_dsl/test_connections.py b/test_elasticsearch/test_dsl/test_connections.py new file mode 100644 index 000000000..dcaa59a98 --- /dev/null +++ b/test_elasticsearch/test_dsl/test_connections.py @@ -0,0 +1,143 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import Any, List + +from pytest import raises + +from elasticsearch import Elasticsearch +from elasticsearch.dsl import connections, serializer + + +class DummyElasticsearch: + def __init__(self, *args: Any, hosts: List[str], **kwargs: Any): + self.hosts = hosts + + +def test_default_connection_is_returned_by_default() -> None: + c = connections.Connections[object](elasticsearch_class=object) + + con, con2 = object(), object() + c.add_connection("default", con) + + c.add_connection("not-default", con2) + + assert c.get_connection() is con + + +def test_get_connection_created_connection_if_needed() -> None: + c = connections.Connections[DummyElasticsearch]( + elasticsearch_class=DummyElasticsearch + ) + c.configure( + default={"hosts": ["https://es.com:9200"]}, + local={"hosts": ["https://localhost:9200"]}, + ) + + default = c.get_connection() + local = c.get_connection("local") + + assert isinstance(default, DummyElasticsearch) + assert isinstance(local, DummyElasticsearch) + + assert default.hosts == ["https://es.com:9200"] + assert local.hosts == ["https://localhost:9200"] + + +def test_configure_preserves_unchanged_connections() -> None: + c = connections.Connections[DummyElasticsearch]( + elasticsearch_class=DummyElasticsearch + ) + + c.configure( + default={"hosts": ["https://es.com:9200"]}, + local={"hosts": ["https://localhost:9200"]}, + ) + default = c.get_connection() + local = c.get_connection("local") + + c.configure( + default={"hosts": ["https://not-es.com:9200"]}, + local={"hosts": ["https://localhost:9200"]}, + ) + new_default = c.get_connection() + new_local = c.get_connection("local") + + assert new_local is local + assert new_default is not default + + +def test_remove_connection_removes_both_conn_and_conf() -> None: + c = connections.Connections[object](elasticsearch_class=DummyElasticsearch) + + c.configure( + default={"hosts": ["https://es.com:9200"]}, + local={"hosts": ["https://localhost:9200"]}, + ) + c.add_connection("local2", object()) + + c.remove_connection("default") + c.get_connection("local2") + c.remove_connection("local2") + + with raises(Exception): + c.get_connection("local2") + c.get_connection("default") + + +def test_create_connection_constructs_client() -> None: + c = connections.Connections[DummyElasticsearch]( + elasticsearch_class=DummyElasticsearch + ) + c.create_connection("testing", hosts=["https://es.com:9200"]) + + con = c.get_connection("testing") + assert con.hosts == ["https://es.com:9200"] + + +def test_create_connection_adds_our_serializer() -> None: + c = connections.Connections[Elasticsearch](elasticsearch_class=Elasticsearch) + c.create_connection("testing", hosts=["https://es.com:9200"]) + + c_serializers = c.get_connection("testing").transport.serializers + assert c_serializers.serializers["application/json"] is serializer.serializer + + +def test_connection_has_correct_user_agent() -> None: + c = connections.Connections[Elasticsearch](elasticsearch_class=Elasticsearch) + + c.create_connection("testing", hosts=["https://es.com:9200"]) + assert ( + c.get_connection("testing") + ._headers["user-agent"] + .startswith("elasticsearch-dsl-py/") + ) + + my_client = Elasticsearch(hosts=["http://localhost:9200"]) + my_client = my_client.options(headers={"user-agent": "my-user-agent/1.0"}) + c.add_connection("default", my_client) + assert c.get_connection()._headers["user-agent"].startswith("elasticsearch-dsl-py/") + + my_client = Elasticsearch(hosts=["http://localhost:9200"]) + assert ( + c.get_connection(my_client) + ._headers["user-agent"] + .startswith("elasticsearch-dsl-py/") + ) + + not_a_client = object() + assert c.get_connection(not_a_client) == not_a_client # type: ignore[arg-type] diff --git a/test_elasticsearch/test_dsl/test_field.py b/test_elasticsearch/test_dsl/test_field.py new file mode 100644 index 000000000..423936ae3 --- /dev/null +++ b/test_elasticsearch/test_dsl/test_field.py @@ -0,0 +1,234 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import base64 +import ipaddress +from datetime import date, datetime, time +from typing import cast + +import pytest +from dateutil import tz + +from elasticsearch.dsl import InnerDoc, Range, ValidationException, field + + +def test_date_range_deserialization() -> None: + data = {"lt": "2018-01-01T00:30:10"} + + r = field.DateRange().deserialize(data) + + assert isinstance(r, Range) + assert r.lt == datetime(2018, 1, 1, 0, 30, 10) + + +def test_boolean_deserialization() -> None: + bf = field.Boolean() + + assert not bf.deserialize("false") + assert not bf.deserialize(False) + assert not bf.deserialize("") + assert not bf.deserialize(0) + + assert bf.deserialize(True) + assert bf.deserialize("true") + assert bf.deserialize(1) + + +def test_datetime_deserialization() -> None: + f = field.Date() + dt = datetime.now() + assert dt == f._deserialize(dt.isoformat()) + + d = date.today() + assert datetime.combine(d, time()) == f._deserialize(d.isoformat()) + + +def test_date_deserialization() -> None: + f = field.Date(format="yyyy-MM-dd") + d = date.today() + assert d == f._deserialize(d.isoformat()) + + dt = datetime.now() + assert dt.date() == f._deserialize(dt.isoformat()) + + +def test_date_field_can_have_default_tz() -> None: + f = field.Date(default_timezone="UTC") + now = datetime.now() + + now_with_tz = cast(datetime, f._deserialize(now)) + + assert now_with_tz.tzinfo == tz.gettz("UTC") + assert now.isoformat() + "+00:00" == now_with_tz.isoformat() + + now_with_tz = cast(datetime, f._deserialize(now.isoformat())) + + assert now_with_tz.tzinfo == tz.gettz("UTC") + assert now.isoformat() + "+00:00" == now_with_tz.isoformat() + + +def test_custom_field_car_wrap_other_field() -> None: + class MyField(field.CustomField): + @property + def builtin_type(self) -> field.Text: + return field.Text(**self._params) + + assert {"type": "text", "index": "not_analyzed"} == MyField( + index="not_analyzed" + ).to_dict() + + +def test_field_from_dict() -> None: + f = field.construct_field({"type": "text", "index": "not_analyzed"}) + + assert isinstance(f, field.Text) + assert {"type": "text", "index": "not_analyzed"} == f.to_dict() + + +def test_multi_fields_are_accepted_and_parsed() -> None: + f = field.construct_field( + "text", + fields={"raw": {"type": "keyword"}, "eng": field.Text(analyzer="english")}, + ) + + assert isinstance(f, field.Text) + assert { + "type": "text", + "fields": { + "raw": {"type": "keyword"}, + "eng": {"type": "text", "analyzer": "english"}, + }, + } == f.to_dict() + + +def test_nested_provides_direct_access_to_its_fields() -> None: + f = field.Nested(properties={"name": {"type": "text", "index": "not_analyzed"}}) + + assert "name" in f + assert f["name"] == field.Text(index="not_analyzed") + + +def test_field_supports_multiple_analyzers() -> None: + f = field.Text(analyzer="snowball", search_analyzer="keyword") + assert { + "analyzer": "snowball", + "search_analyzer": "keyword", + "type": "text", + } == f.to_dict() + + +def test_multifield_supports_multiple_analyzers() -> None: + f = field.Text( + fields={ + "f1": field.Text(search_analyzer="keyword", analyzer="snowball"), + "f2": field.Text(analyzer="keyword"), + } + ) + assert { + "fields": { + "f1": { + "analyzer": "snowball", + "search_analyzer": "keyword", + "type": "text", + }, + "f2": {"analyzer": "keyword", "type": "text"}, + }, + "type": "text", + } == f.to_dict() + + +def test_scaled_float() -> None: + with pytest.raises(TypeError): + field.ScaledFloat() # type: ignore + f = field.ScaledFloat(123) + assert f.to_dict() == {"scaling_factor": 123, "type": "scaled_float"} + + +def test_ipaddress() -> None: + f = field.Ip() + assert f.deserialize("127.0.0.1") == ipaddress.ip_address("127.0.0.1") + assert f.deserialize("::1") == ipaddress.ip_address("::1") + assert f.serialize(f.deserialize("::1")) == "::1" + assert f.deserialize(None) is None + with pytest.raises(ValueError): + assert f.deserialize("not_an_ipaddress") + + +def test_float() -> None: + f = field.Float() + assert f.deserialize("42") == 42.0 + assert f.deserialize(None) is None + with pytest.raises(ValueError): + assert f.deserialize("not_a_float") + + +def test_integer() -> None: + f = field.Integer() + assert f.deserialize("42") == 42 + assert f.deserialize(None) is None + with pytest.raises(ValueError): + assert f.deserialize("not_an_integer") + + +def test_binary() -> None: + f = field.Binary() + assert f.deserialize(base64.b64encode(b"42")) == b"42" + assert f.deserialize(f.serialize(b"42")) == b"42" + assert f.deserialize(None) is None + + +def test_constant_keyword() -> None: + f = field.ConstantKeyword() + assert f.to_dict() == {"type": "constant_keyword"} + + +def test_rank_features() -> None: + f = field.RankFeatures() + assert f.to_dict() == {"type": "rank_features"} + + +def test_object_dynamic_values() -> None: + f = field.Object(dynamic=True) + assert f.to_dict()["dynamic"] is True + f = field.Object(dynamic=False) + assert f.to_dict()["dynamic"] is False + f = field.Object(dynamic="strict") + assert f.to_dict()["dynamic"] == "strict" + + +def test_object_disabled() -> None: + f = field.Object(enabled=False) + assert f.to_dict() == {"type": "object", "enabled": False} + + +def test_object_constructor() -> None: + expected = {"type": "object", "properties": {"inner_int": {"type": "integer"}}} + + class Inner(InnerDoc): + inner_int = field.Integer() + + obj_from_doc = field.Object(doc_class=Inner) + assert obj_from_doc.to_dict() == expected + + obj_from_props = field.Object(properties={"inner_int": field.Integer()}) + assert obj_from_props.to_dict() == expected + + with pytest.raises(ValidationException): + field.Object(doc_class=Inner, properties={"inner_int": field.Integer()}) + + with pytest.raises(ValidationException): + field.Object(doc_class=Inner, dynamic=False) diff --git a/test_elasticsearch/test_dsl/test_integration/__init__.py b/test_elasticsearch/test_dsl/test_integration/__init__.py new file mode 100644 index 000000000..2a87d183f --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/__init__.py @@ -0,0 +1,16 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/test_elasticsearch/test_dsl/test_integration/_async/__init__.py b/test_elasticsearch/test_dsl/test_integration/_async/__init__.py new file mode 100644 index 000000000..2a87d183f --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/_async/__init__.py @@ -0,0 +1,16 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/test_elasticsearch/test_dsl/test_integration/_async/test_analysis.py b/test_elasticsearch/test_dsl/test_integration/_async/test_analysis.py new file mode 100644 index 000000000..00598d4d5 --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/_async/test_analysis.py @@ -0,0 +1,54 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest + +from elasticsearch import AsyncElasticsearch +from elasticsearch.dsl import analyzer, token_filter, tokenizer + + +@pytest.mark.asyncio +async def test_simulate_with_just__builtin_tokenizer( + async_client: AsyncElasticsearch, +) -> None: + a = analyzer("my-analyzer", tokenizer="keyword") + tokens = (await a.async_simulate("Hello World!", using=async_client)).tokens + + assert len(tokens) == 1 + assert tokens[0].token == "Hello World!" + + +@pytest.mark.asyncio +async def test_simulate_complex(async_client: AsyncElasticsearch) -> None: + a = analyzer( + "my-analyzer", + tokenizer=tokenizer("split_words", "simple_pattern_split", pattern=":"), + filter=["lowercase", token_filter("no-ifs", "stop", stopwords=["if"])], + ) + + tokens = (await a.async_simulate("if:this:works", using=async_client)).tokens + + assert len(tokens) == 2 + assert ["this", "works"] == [t.token for t in tokens] + + +@pytest.mark.asyncio +async def test_simulate_builtin(async_client: AsyncElasticsearch) -> None: + a = analyzer("my-analyzer", "english") + tokens = (await a.async_simulate("fixes running")).tokens + + assert ["fix", "run"] == [t.token for t in tokens] diff --git a/test_elasticsearch/test_dsl/test_integration/_async/test_document.py b/test_elasticsearch/test_dsl/test_integration/_async/test_document.py new file mode 100644 index 000000000..e72955a0a --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/_async/test_document.py @@ -0,0 +1,852 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# this file creates several documents using bad or no types because +# these are still supported and should be kept functional in spite +# of not having appropriate type hints. For that reason the comment +# below disables many mypy checks that fails as a result of this. +# mypy: disable-error-code="assignment, index, arg-type, call-arg, operator, comparison-overlap, attr-defined" + +from datetime import datetime +from ipaddress import ip_address +from typing import TYPE_CHECKING, Any, AsyncIterator, Dict, List, Tuple, Union + +import pytest +from pytest import raises +from pytz import timezone + +from elasticsearch import AsyncElasticsearch, ConflictError, NotFoundError +from elasticsearch.dsl import ( + AsyncDocument, + AsyncSearch, + Binary, + Boolean, + Date, + DenseVector, + Double, + InnerDoc, + Ip, + Keyword, + Long, + Mapping, + MetaField, + Nested, + Object, + Q, + RankFeatures, + Text, + analyzer, + mapped_field, +) +from elasticsearch.dsl.utils import AttrList +from elasticsearch.helpers.errors import BulkIndexError + +snowball = analyzer("my_snow", tokenizer="standard", filter=["lowercase", "snowball"]) + + +class User(InnerDoc): + name = Text(fields={"raw": Keyword()}) + + +class Wiki(AsyncDocument): + owner = Object(User) + views = Long() + ranked = RankFeatures() + + class Index: + name = "test-wiki" + + +class Repository(AsyncDocument): + owner = Object(User) + created_at = Date() + description = Text(analyzer=snowball) + tags = Keyword() + + @classmethod + def search(cls) -> AsyncSearch["Repository"]: # type: ignore[override] + return super().search().filter("term", commit_repo="repo") + + class Index: + name = "git" + + +class Commit(AsyncDocument): + committed_date = Date() + authored_date = Date() + description = Text(analyzer=snowball) + + class Index: + name = "flat-git" + + class Meta: + mapping = Mapping() + + +class History(InnerDoc): + timestamp = Date() + diff = Text() + + +class Comment(InnerDoc): + content = Text() + created_at = Date() + author = Object(User) + history = Nested(History) + + class Meta: + dynamic = MetaField(False) + + +class PullRequest(AsyncDocument): + comments = Nested(Comment) + created_at = Date() + + class Index: + name = "test-prs" + + +class SerializationDoc(AsyncDocument): + i = Long() + b = Boolean() + d = Double() + bin = Binary() + ip = Ip() + + class Index: + name = "test-serialization" + + +class Tags(AsyncDocument): + tags = Keyword(multi=True) + + class Index: + name = "tags" + + +@pytest.mark.asyncio +async def test_serialization(async_write_client: AsyncElasticsearch) -> None: + await SerializationDoc.init() + await async_write_client.index( + index="test-serialization", + id=42, + body={ + "i": [1, 2, "3", None], + "b": [True, False, "true", "false", None], + "d": [0.1, "-0.1", None], + "bin": ["SGVsbG8gV29ybGQ=", None], + "ip": ["::1", "127.0.0.1", None], + }, + ) + sd = await SerializationDoc.get(id=42) + assert sd is not None + + assert sd.i == [1, 2, 3, None] + assert sd.b == [True, False, True, False, None] + assert sd.d == [0.1, -0.1, None] + assert sd.bin == [b"Hello World", None] + assert sd.ip == [ip_address("::1"), ip_address("127.0.0.1"), None] + + assert sd.to_dict() == { + "b": [True, False, True, False, None], + "bin": ["SGVsbG8gV29ybGQ=", None], + "d": [0.1, -0.1, None], + "i": [1, 2, 3, None], + "ip": ["::1", "127.0.0.1", None], + } + + +@pytest.mark.asyncio +async def test_nested_inner_hits_are_wrapped_properly(async_pull_request: Any) -> None: + history_query = Q( + "nested", + path="comments.history", + inner_hits={}, + query=Q("match", comments__history__diff="ahoj"), + ) + s = PullRequest.search().query( + "nested", inner_hits={}, path="comments", query=history_query + ) + + response = await s.execute() + pr = response.hits[0] + assert isinstance(pr, PullRequest) + assert isinstance(pr.comments[0], Comment) + assert isinstance(pr.comments[0].history[0], History) + + comment = pr.meta.inner_hits.comments.hits[0] + assert isinstance(comment, Comment) + assert comment.author.name == "honzakral" + assert isinstance(comment.history[0], History) + + history = comment.meta.inner_hits["comments.history"].hits[0] + assert isinstance(history, History) + assert history.timestamp == datetime(2012, 1, 1) + assert "score" in history.meta + + +@pytest.mark.asyncio +async def test_nested_inner_hits_are_deserialized_properly( + async_pull_request: Any, +) -> None: + s = PullRequest.search().query( + "nested", + inner_hits={}, + path="comments", + query=Q("match", comments__content="hello"), + ) + + response = await s.execute() + pr = response.hits[0] + assert isinstance(pr.created_at, datetime) + assert isinstance(pr.comments[0], Comment) + assert isinstance(pr.comments[0].created_at, datetime) + + +@pytest.mark.asyncio +async def test_nested_top_hits_are_wrapped_properly(async_pull_request: Any) -> None: + s = PullRequest.search() + s.aggs.bucket("comments", "nested", path="comments").metric( + "hits", "top_hits", size=1 + ) + + r = await s.execute() + + print(r._d_) + assert isinstance(r.aggregations.comments.hits.hits[0], Comment) + + +@pytest.mark.asyncio +async def test_update_object_field(async_write_client: AsyncElasticsearch) -> None: + await Wiki.init() + w = Wiki( + owner=User(name="Honza Kral"), + _id="elasticsearch-py", + ranked={"test1": 0.1, "topic2": 0.2}, + ) + await w.save() + + assert "updated" == await w.update(owner=[{"name": "Honza"}, User(name="Nick")]) + assert w.owner[0].name == "Honza" + assert w.owner[1].name == "Nick" + + w = await Wiki.get(id="elasticsearch-py") + assert w.owner[0].name == "Honza" + assert w.owner[1].name == "Nick" + + assert w.ranked == {"test1": 0.1, "topic2": 0.2} + + +@pytest.mark.asyncio +async def test_update_script(async_write_client: AsyncElasticsearch) -> None: + await Wiki.init() + w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42) + await w.save() + + await w.update(script="ctx._source.views += params.inc", inc=5) + w = await Wiki.get(id="elasticsearch-py") + assert w.views == 47 + + +@pytest.mark.asyncio +async def test_update_script_with_dict(async_write_client: AsyncElasticsearch) -> None: + await Wiki.init() + w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42) + await w.save() + + await w.update( + script={ + "source": "ctx._source.views += params.inc1 + params.inc2", + "params": {"inc1": 2}, + "lang": "painless", + }, + inc2=3, + ) + w = await Wiki.get(id="elasticsearch-py") + assert w.views == 47 + + +@pytest.mark.asyncio +async def test_update_retry_on_conflict(async_write_client: AsyncElasticsearch) -> None: + await Wiki.init() + w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42) + await w.save() + + w1 = await Wiki.get(id="elasticsearch-py") + w2 = await Wiki.get(id="elasticsearch-py") + assert w1 is not None + assert w2 is not None + + await w1.update( + script="ctx._source.views += params.inc", inc=5, retry_on_conflict=1 + ) + await w2.update( + script="ctx._source.views += params.inc", inc=5, retry_on_conflict=1 + ) + + w = await Wiki.get(id="elasticsearch-py") + assert w.views == 52 + + +@pytest.mark.asyncio +@pytest.mark.parametrize("retry_on_conflict", [None, 0]) +async def test_update_conflicting_version( + async_write_client: AsyncElasticsearch, retry_on_conflict: bool +) -> None: + await Wiki.init() + w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42) + await w.save() + + w1 = await Wiki.get(id="elasticsearch-py") + w2 = await Wiki.get(id="elasticsearch-py") + assert w1 is not None + assert w2 is not None + + await w1.update(script="ctx._source.views += params.inc", inc=5) + + with raises(ConflictError): + await w2.update( + script="ctx._source.views += params.inc", + inc=5, + retry_on_conflict=retry_on_conflict, + ) + + +@pytest.mark.asyncio +async def test_save_and_update_return_doc_meta( + async_write_client: AsyncElasticsearch, +) -> None: + await Wiki.init() + w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42) + resp = await w.save(return_doc_meta=True) + assert resp["_index"] == "test-wiki" + assert resp["result"] == "created" + assert set(resp.keys()) == { + "_id", + "_index", + "_primary_term", + "_seq_no", + "_shards", + "_version", + "result", + } + + resp = await w.update( + script="ctx._source.views += params.inc", inc=5, return_doc_meta=True + ) + assert resp["_index"] == "test-wiki" + assert resp["result"] == "updated" + assert set(resp.keys()) == { + "_id", + "_index", + "_primary_term", + "_seq_no", + "_shards", + "_version", + "result", + } + + +@pytest.mark.asyncio +async def test_init(async_write_client: AsyncElasticsearch) -> None: + await Repository.init(index="test-git") + + assert await async_write_client.indices.exists(index="test-git") + + +@pytest.mark.asyncio +async def test_get_raises_404_on_index_missing( + async_data_client: AsyncElasticsearch, +) -> None: + with raises(NotFoundError): + await Repository.get("elasticsearch-dsl-php", index="not-there") + + +@pytest.mark.asyncio +async def test_get_raises_404_on_non_existent_id( + async_data_client: AsyncElasticsearch, +) -> None: + with raises(NotFoundError): + await Repository.get("elasticsearch-dsl-php") + + +@pytest.mark.asyncio +async def test_get_returns_none_if_404_ignored( + async_data_client: AsyncElasticsearch, +) -> None: + assert None is await Repository.get( + "elasticsearch-dsl-php", using=async_data_client.options(ignore_status=404) + ) + + +@pytest.mark.asyncio +async def test_get_returns_none_if_404_ignored_and_index_doesnt_exist( + async_data_client: AsyncElasticsearch, +) -> None: + assert None is await Repository.get( + "42", index="not-there", using=async_data_client.options(ignore_status=404) + ) + + +@pytest.mark.asyncio +async def test_get(async_data_client: AsyncElasticsearch) -> None: + elasticsearch_repo = await Repository.get("elasticsearch-dsl-py") + + assert isinstance(elasticsearch_repo, Repository) + assert elasticsearch_repo.owner.name == "elasticsearch" + assert datetime(2014, 3, 3) == elasticsearch_repo.created_at + + +@pytest.mark.asyncio +async def test_exists_return_true(async_data_client: AsyncElasticsearch) -> None: + assert await Repository.exists("elasticsearch-dsl-py") + + +@pytest.mark.asyncio +async def test_exists_false(async_data_client: AsyncElasticsearch) -> None: + assert not await Repository.exists("elasticsearch-dsl-php") + + +@pytest.mark.asyncio +async def test_get_with_tz_date(async_data_client: AsyncElasticsearch) -> None: + first_commit = await Commit.get( + id="3ca6e1e73a071a705b4babd2f581c91a2a3e5037", routing="elasticsearch-dsl-py" + ) + assert first_commit is not None + + tzinfo = timezone("Europe/Prague") + assert ( + tzinfo.localize(datetime(2014, 5, 2, 13, 47, 19, 123000)) + == first_commit.authored_date + ) + + +@pytest.mark.asyncio +async def test_save_with_tz_date(async_data_client: AsyncElasticsearch) -> None: + tzinfo = timezone("Europe/Prague") + first_commit = await Commit.get( + id="3ca6e1e73a071a705b4babd2f581c91a2a3e5037", routing="elasticsearch-dsl-py" + ) + assert first_commit is not None + + first_commit.committed_date = tzinfo.localize( + datetime(2014, 5, 2, 13, 47, 19, 123456) + ) + await first_commit.save() + + first_commit = await Commit.get( + id="3ca6e1e73a071a705b4babd2f581c91a2a3e5037", routing="elasticsearch-dsl-py" + ) + assert first_commit is not None + + assert ( + tzinfo.localize(datetime(2014, 5, 2, 13, 47, 19, 123456)) + == first_commit.committed_date + ) + + +COMMIT_DOCS_WITH_MISSING = [ + {"_id": "0"}, # Missing + {"_id": "3ca6e1e73a071a705b4babd2f581c91a2a3e5037"}, # Existing + {"_id": "f"}, # Missing + {"_id": "eb3e543323f189fd7b698e66295427204fff5755"}, # Existing +] + + +@pytest.mark.asyncio +async def test_mget(async_data_client: AsyncElasticsearch) -> None: + commits = await Commit.mget(COMMIT_DOCS_WITH_MISSING) + assert commits[0] is None + assert commits[1] is not None + assert commits[1].meta.id == "3ca6e1e73a071a705b4babd2f581c91a2a3e5037" + assert commits[2] is None + assert commits[3] is not None + assert commits[3].meta.id == "eb3e543323f189fd7b698e66295427204fff5755" + + +@pytest.mark.asyncio +async def test_mget_raises_exception_when_missing_param_is_invalid( + async_data_client: AsyncElasticsearch, +) -> None: + with raises(ValueError): + await Commit.mget(COMMIT_DOCS_WITH_MISSING, missing="raj") + + +@pytest.mark.asyncio +async def test_mget_raises_404_when_missing_param_is_raise( + async_data_client: AsyncElasticsearch, +) -> None: + with raises(NotFoundError): + await Commit.mget(COMMIT_DOCS_WITH_MISSING, missing="raise") + + +@pytest.mark.asyncio +async def test_mget_ignores_missing_docs_when_missing_param_is_skip( + async_data_client: AsyncElasticsearch, +) -> None: + commits = await Commit.mget(COMMIT_DOCS_WITH_MISSING, missing="skip") + assert commits[0] is not None + assert commits[0].meta.id == "3ca6e1e73a071a705b4babd2f581c91a2a3e5037" + assert commits[1] is not None + assert commits[1].meta.id == "eb3e543323f189fd7b698e66295427204fff5755" + + +@pytest.mark.asyncio +async def test_update_works_from_search_response( + async_data_client: AsyncElasticsearch, +) -> None: + elasticsearch_repo = (await Repository.search().execute())[0] + + await elasticsearch_repo.update(owner={"other_name": "elastic"}) + assert "elastic" == elasticsearch_repo.owner.other_name + + new_version = await Repository.get("elasticsearch-dsl-py") + assert new_version is not None + assert "elastic" == new_version.owner.other_name + assert "elasticsearch" == new_version.owner.name + + +@pytest.mark.asyncio +async def test_update(async_data_client: AsyncElasticsearch) -> None: + elasticsearch_repo = await Repository.get("elasticsearch-dsl-py") + assert elasticsearch_repo is not None + v = elasticsearch_repo.meta.version + + old_seq_no = elasticsearch_repo.meta.seq_no + await elasticsearch_repo.update( + owner={"new_name": "elastic"}, new_field="testing-update" + ) + + assert "elastic" == elasticsearch_repo.owner.new_name + assert "testing-update" == elasticsearch_repo.new_field + + # assert version has been updated + assert elasticsearch_repo.meta.version == v + 1 + + new_version = await Repository.get("elasticsearch-dsl-py") + assert new_version is not None + assert "testing-update" == new_version.new_field + assert "elastic" == new_version.owner.new_name + assert "elasticsearch" == new_version.owner.name + assert "seq_no" in new_version.meta + assert new_version.meta.seq_no != old_seq_no + assert "primary_term" in new_version.meta + + +@pytest.mark.asyncio +async def test_save_updates_existing_doc(async_data_client: AsyncElasticsearch) -> None: + elasticsearch_repo = await Repository.get("elasticsearch-dsl-py") + assert elasticsearch_repo is not None + + elasticsearch_repo.new_field = "testing-save" + old_seq_no = elasticsearch_repo.meta.seq_no + assert "updated" == await elasticsearch_repo.save() + + new_repo = await async_data_client.get(index="git", id="elasticsearch-dsl-py") + assert "testing-save" == new_repo["_source"]["new_field"] + assert new_repo["_seq_no"] != old_seq_no + assert new_repo["_seq_no"] == elasticsearch_repo.meta.seq_no + + +@pytest.mark.asyncio +async def test_update_empty_field(async_client: AsyncElasticsearch) -> None: + await Tags._index.delete(ignore_unavailable=True) + await Tags.init() + d = Tags(id="123", tags=["a", "b"]) + await d.save(refresh=True) + await d.update(tags=[], refresh=True) + assert d.tags == [] + + r = await Tags.search().execute() + assert r.hits[0].tags == [] + + +@pytest.mark.asyncio +async def test_save_automatically_uses_seq_no_and_primary_term( + async_data_client: AsyncElasticsearch, +) -> None: + elasticsearch_repo = await Repository.get("elasticsearch-dsl-py") + assert elasticsearch_repo is not None + elasticsearch_repo.meta.seq_no += 1 + + with raises(ConflictError): + await elasticsearch_repo.save() + + +@pytest.mark.asyncio +async def test_delete_automatically_uses_seq_no_and_primary_term( + async_data_client: AsyncElasticsearch, +) -> None: + elasticsearch_repo = await Repository.get("elasticsearch-dsl-py") + assert elasticsearch_repo is not None + elasticsearch_repo.meta.seq_no += 1 + + with raises(ConflictError): + await elasticsearch_repo.delete() + + +def assert_doc_equals(expected: Any, actual: Any) -> None: + for f in expected: + assert f in actual + assert actual[f] == expected[f] + + +@pytest.mark.asyncio +async def test_can_save_to_different_index( + async_write_client: AsyncElasticsearch, +) -> None: + test_repo = Repository(description="testing", meta={"id": 42}) + assert await test_repo.save(index="test-document") + + assert_doc_equals( + { + "found": True, + "_index": "test-document", + "_id": "42", + "_source": {"description": "testing"}, + }, + await async_write_client.get(index="test-document", id=42), + ) + + +@pytest.mark.asyncio +async def test_save_without_skip_empty_will_include_empty_fields( + async_write_client: AsyncElasticsearch, +) -> None: + test_repo = Repository(field_1=[], field_2=None, field_3={}, meta={"id": 42}) + assert await test_repo.save(index="test-document", skip_empty=False) + + assert_doc_equals( + { + "found": True, + "_index": "test-document", + "_id": "42", + "_source": {"field_1": [], "field_2": None, "field_3": {}}, + }, + await async_write_client.get(index="test-document", id=42), + ) + + +@pytest.mark.asyncio +async def test_delete(async_write_client: AsyncElasticsearch) -> None: + await async_write_client.create( + index="test-document", + id="elasticsearch-dsl-py", + body={ + "organization": "elasticsearch", + "created_at": "2014-03-03", + "owner": {"name": "elasticsearch"}, + }, + ) + + test_repo = Repository(meta={"id": "elasticsearch-dsl-py"}) + test_repo.meta.index = "test-document" + await test_repo.delete() + + assert not await async_write_client.exists( + index="test-document", + id="elasticsearch-dsl-py", + ) + + +@pytest.mark.asyncio +async def test_search(async_data_client: AsyncElasticsearch) -> None: + assert await Repository.search().count() == 1 + + +@pytest.mark.asyncio +async def test_search_returns_proper_doc_classes( + async_data_client: AsyncElasticsearch, +) -> None: + result = await Repository.search().execute() + + elasticsearch_repo = result.hits[0] + + assert isinstance(elasticsearch_repo, Repository) + assert elasticsearch_repo.owner.name == "elasticsearch" + + +@pytest.mark.asyncio +async def test_refresh_mapping(async_data_client: AsyncElasticsearch) -> None: + class Commit(AsyncDocument): + class Index: + name = "git" + + await Commit._index.load_mappings() + + assert "stats" in Commit._index._mapping + assert "committer" in Commit._index._mapping + assert "description" in Commit._index._mapping + assert "committed_date" in Commit._index._mapping + assert isinstance(Commit._index._mapping["committed_date"], Date) + + +@pytest.mark.asyncio +async def test_highlight_in_meta(async_data_client: AsyncElasticsearch) -> None: + commit = ( + await Commit.search() + .query("match", description="inverting") + .highlight("description") + .execute() + )[0] + + assert isinstance(commit, Commit) + assert "description" in commit.meta.highlight + assert isinstance(commit.meta.highlight["description"], AttrList) + assert len(commit.meta.highlight["description"]) > 0 + + +@pytest.mark.asyncio +async def test_bulk(async_data_client: AsyncElasticsearch) -> None: + class Address(InnerDoc): + street: str + active: bool + + class Doc(AsyncDocument): + if TYPE_CHECKING: + _id: int + name: str + age: int + languages: List[str] = mapped_field(Keyword()) + addresses: List[Address] + + class Index: + name = "bulk-index" + + await Doc._index.delete(ignore_unavailable=True) + await Doc.init() + + async def gen1() -> AsyncIterator[Union[Doc, Dict[str, Any]]]: + yield Doc( + name="Joe", + age=33, + languages=["en", "fr"], + addresses=[ + Address(street="123 Main St", active=True), + Address(street="321 Park Dr.", active=False), + ], + ) + yield Doc(name="Susan", age=20, languages=["en"]) + yield {"_op_type": "create", "_id": "45", "_source": Doc(name="Sarah", age=45)} + + await Doc.bulk(gen1(), refresh=True) + docs = list(await Doc.search().execute()) + assert len(docs) == 3 + assert docs[0].to_dict() == { + "name": "Joe", + "age": 33, + "languages": [ + "en", + "fr", + ], + "addresses": [ + { + "active": True, + "street": "123 Main St", + }, + { + "active": False, + "street": "321 Park Dr.", + }, + ], + } + assert docs[1].to_dict() == { + "name": "Susan", + "age": 20, + "languages": ["en"], + } + assert docs[2].to_dict() == { + "name": "Sarah", + "age": 45, + } + assert docs[2].meta.id == "45" + + async def gen2() -> AsyncIterator[Union[Doc, Dict[str, Any]]]: + yield {"_op_type": "create", "_id": "45", "_source": Doc(name="Sarah", age=45)} + + # a "create" action with an existing id should fail + with raises(BulkIndexError): + await Doc.bulk(gen2(), refresh=True) + + async def gen3() -> AsyncIterator[Union[Doc, Dict[str, Any]]]: + yield Doc(_id="45", name="Sarah", age=45, languages=["es"]) + yield {"_op_type": "delete", "_id": docs[1].meta.id} + + await Doc.bulk(gen3(), refresh=True) + with raises(NotFoundError): + await Doc.get(docs[1].meta.id) + doc = await Doc.get("45") + assert doc is not None + assert (doc).to_dict() == { + "name": "Sarah", + "age": 45, + "languages": ["es"], + } + + +@pytest.mark.asyncio +async def test_legacy_dense_vector( + async_client: AsyncElasticsearch, es_version: Tuple[int, ...] +) -> None: + if es_version >= (8, 16): + pytest.skip("this test is a legacy version for Elasticsearch 8.15 or older") + + class Doc(AsyncDocument): + float_vector: List[float] = mapped_field(DenseVector(dims=3)) + + class Index: + name = "vectors" + + await Doc._index.delete(ignore_unavailable=True) + await Doc.init() + + doc = Doc(float_vector=[1.0, 1.2, 2.3]) + await doc.save(refresh=True) + + docs = await Doc.search().execute() + assert len(docs) == 1 + assert docs[0].float_vector == doc.float_vector + + +@pytest.mark.asyncio +async def test_dense_vector( + async_client: AsyncElasticsearch, es_version: Tuple[int, ...] +) -> None: + if es_version < (8, 16): + pytest.skip("this test requires Elasticsearch 8.16 or newer") + + class Doc(AsyncDocument): + float_vector: List[float] = mapped_field(DenseVector()) + byte_vector: List[int] = mapped_field(DenseVector(element_type="byte")) + bit_vector: str = mapped_field(DenseVector(element_type="bit")) + + class Index: + name = "vectors" + + await Doc._index.delete(ignore_unavailable=True) + await Doc.init() + + doc = Doc( + float_vector=[1.0, 1.2, 2.3], byte_vector=[12, 23, 34, 45], bit_vector="12abf0" + ) + await doc.save(refresh=True) + + docs = await Doc.search().execute() + assert len(docs) == 1 + assert docs[0].float_vector == doc.float_vector + assert docs[0].byte_vector == doc.byte_vector + assert docs[0].bit_vector == doc.bit_vector diff --git a/test_elasticsearch/test_dsl/test_integration/_async/test_faceted_search.py b/test_elasticsearch/test_dsl/test_integration/_async/test_faceted_search.py new file mode 100644 index 000000000..bb0fd9257 --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/_async/test_faceted_search.py @@ -0,0 +1,305 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from datetime import datetime +from typing import Tuple, Type + +import pytest + +from elasticsearch import AsyncElasticsearch +from elasticsearch.dsl import A, AsyncDocument, AsyncSearch, Boolean, Date, Keyword +from elasticsearch.dsl.faceted_search import ( + AsyncFacetedSearch, + DateHistogramFacet, + NestedFacet, + RangeFacet, + TermsFacet, +) + +from .test_document import PullRequest + + +class Repos(AsyncDocument): + is_public = Boolean() + created_at = Date() + + class Index: + name = "git" + + +class Commit(AsyncDocument): + files = Keyword() + committed_date = Date() + + class Index: + name = "git" + + +class MetricSearch(AsyncFacetedSearch): + index = "git" + doc_types = [Commit] + + facets = { + "files": TermsFacet(field="files", metric=A("max", field="committed_date")), + } + + +@pytest.fixture +def commit_search_cls(es_version: Tuple[int, ...]) -> Type[AsyncFacetedSearch]: + if es_version >= (7, 2): + interval_kwargs = {"fixed_interval": "1d"} + else: + interval_kwargs = {"interval": "day"} + + class CommitSearch(AsyncFacetedSearch): + index = "flat-git" + fields = ( + "description", + "files", + ) + + facets = { + "files": TermsFacet(field="files"), + "frequency": DateHistogramFacet( + field="authored_date", min_doc_count=1, **interval_kwargs + ), + "deletions": RangeFacet( + field="stats.deletions", + ranges=[("ok", (None, 1)), ("good", (1, 5)), ("better", (5, None))], + ), + } + + return CommitSearch + + +@pytest.fixture +def repo_search_cls(es_version: Tuple[int, ...]) -> Type[AsyncFacetedSearch]: + interval_type = "calendar_interval" if es_version >= (7, 2) else "interval" + + class RepoSearch(AsyncFacetedSearch): + index = "git" + doc_types = [Repos] + facets = { + "public": TermsFacet(field="is_public"), + "created": DateHistogramFacet( + field="created_at", **{interval_type: "month"} + ), + } + + def search(self) -> AsyncSearch: + s = super().search() + return s.filter("term", commit_repo="repo") + + return RepoSearch + + +@pytest.fixture +def pr_search_cls(es_version: Tuple[int, ...]) -> Type[AsyncFacetedSearch]: + interval_type = "calendar_interval" if es_version >= (7, 2) else "interval" + + class PRSearch(AsyncFacetedSearch): + index = "test-prs" + doc_types = [PullRequest] + facets = { + "comments": NestedFacet( + "comments", + DateHistogramFacet( + field="comments.created_at", **{interval_type: "month"} + ), + ) + } + + return PRSearch + + +@pytest.mark.asyncio +async def test_facet_with_custom_metric(async_data_client: AsyncElasticsearch) -> None: + ms = MetricSearch() + r = await ms.execute() + + dates = [f[1] for f in r.facets.files] + assert dates == list(sorted(dates, reverse=True)) + assert dates[0] == 1399038439000 + + +@pytest.mark.asyncio +async def test_nested_facet( + async_pull_request: PullRequest, pr_search_cls: Type[AsyncFacetedSearch] +) -> None: + prs = pr_search_cls() + r = await prs.execute() + + assert r.hits.total.value == 1 # type: ignore[attr-defined] + assert [(datetime(2018, 1, 1, 0, 0), 1, False)] == r.facets.comments + + +@pytest.mark.asyncio +async def test_nested_facet_with_filter( + async_pull_request: PullRequest, pr_search_cls: Type[AsyncFacetedSearch] +) -> None: + prs = pr_search_cls(filters={"comments": datetime(2018, 1, 1, 0, 0)}) + r = await prs.execute() + + assert r.hits.total.value == 1 # type: ignore[attr-defined] + assert [(datetime(2018, 1, 1, 0, 0), 1, True)] == r.facets.comments + + prs = pr_search_cls(filters={"comments": datetime(2018, 2, 1, 0, 0)}) + r = await prs.execute() + assert not r.hits + + +@pytest.mark.asyncio +async def test_datehistogram_facet( + async_data_client: AsyncElasticsearch, repo_search_cls: Type[AsyncFacetedSearch] +) -> None: + rs = repo_search_cls() + r = await rs.execute() + + assert r.hits.total.value == 1 # type: ignore[attr-defined] + assert [(datetime(2014, 3, 1, 0, 0), 1, False)] == r.facets.created + + +@pytest.mark.asyncio +async def test_boolean_facet( + async_data_client: AsyncElasticsearch, repo_search_cls: Type[AsyncFacetedSearch] +) -> None: + rs = repo_search_cls() + r = await rs.execute() + + assert r.hits.total.value == 1 # type: ignore[attr-defined] + assert [(True, 1, False)] == r.facets.public + value, count, selected = r.facets.public[0] + assert value is True + + +@pytest.mark.asyncio +async def test_empty_search_finds_everything( + async_data_client: AsyncElasticsearch, + es_version: Tuple[int, ...], + commit_search_cls: Type[AsyncFacetedSearch], +) -> None: + cs = commit_search_cls() + r = await cs.execute() + + assert r.hits.total.value == 52 # type: ignore[attr-defined] + assert [ + ("elasticsearch_dsl", 40, False), + ("test_elasticsearch_dsl", 35, False), + ("elasticsearch_dsl/query.py", 19, False), + ("test_elasticsearch_dsl/test_search.py", 15, False), + ("elasticsearch_dsl/utils.py", 14, False), + ("test_elasticsearch_dsl/test_query.py", 13, False), + ("elasticsearch_dsl/search.py", 12, False), + ("elasticsearch_dsl/aggs.py", 11, False), + ("test_elasticsearch_dsl/test_result.py", 5, False), + ("elasticsearch_dsl/result.py", 3, False), + ] == r.facets.files + + assert [ + (datetime(2014, 3, 3, 0, 0), 2, False), + (datetime(2014, 3, 4, 0, 0), 1, False), + (datetime(2014, 3, 5, 0, 0), 3, False), + (datetime(2014, 3, 6, 0, 0), 3, False), + (datetime(2014, 3, 7, 0, 0), 9, False), + (datetime(2014, 3, 10, 0, 0), 2, False), + (datetime(2014, 3, 15, 0, 0), 4, False), + (datetime(2014, 3, 21, 0, 0), 2, False), + (datetime(2014, 3, 23, 0, 0), 2, False), + (datetime(2014, 3, 24, 0, 0), 10, False), + (datetime(2014, 4, 20, 0, 0), 2, False), + (datetime(2014, 4, 22, 0, 0), 2, False), + (datetime(2014, 4, 25, 0, 0), 3, False), + (datetime(2014, 4, 26, 0, 0), 2, False), + (datetime(2014, 4, 27, 0, 0), 2, False), + (datetime(2014, 5, 1, 0, 0), 2, False), + (datetime(2014, 5, 2, 0, 0), 1, False), + ] == r.facets.frequency + + assert [ + ("ok", 19, False), + ("good", 14, False), + ("better", 19, False), + ] == r.facets.deletions + + +@pytest.mark.asyncio +async def test_term_filters_are_shown_as_selected_and_data_is_filtered( + async_data_client: AsyncElasticsearch, commit_search_cls: Type[AsyncFacetedSearch] +) -> None: + cs = commit_search_cls(filters={"files": "test_elasticsearch_dsl"}) + + r = await cs.execute() + + assert 35 == r.hits.total.value # type: ignore[attr-defined] + assert [ + ("elasticsearch_dsl", 40, False), + ("test_elasticsearch_dsl", 35, True), # selected + ("elasticsearch_dsl/query.py", 19, False), + ("test_elasticsearch_dsl/test_search.py", 15, False), + ("elasticsearch_dsl/utils.py", 14, False), + ("test_elasticsearch_dsl/test_query.py", 13, False), + ("elasticsearch_dsl/search.py", 12, False), + ("elasticsearch_dsl/aggs.py", 11, False), + ("test_elasticsearch_dsl/test_result.py", 5, False), + ("elasticsearch_dsl/result.py", 3, False), + ] == r.facets.files + + assert [ + (datetime(2014, 3, 3, 0, 0), 1, False), + (datetime(2014, 3, 5, 0, 0), 2, False), + (datetime(2014, 3, 6, 0, 0), 3, False), + (datetime(2014, 3, 7, 0, 0), 6, False), + (datetime(2014, 3, 10, 0, 0), 1, False), + (datetime(2014, 3, 15, 0, 0), 3, False), + (datetime(2014, 3, 21, 0, 0), 2, False), + (datetime(2014, 3, 23, 0, 0), 1, False), + (datetime(2014, 3, 24, 0, 0), 7, False), + (datetime(2014, 4, 20, 0, 0), 1, False), + (datetime(2014, 4, 25, 0, 0), 3, False), + (datetime(2014, 4, 26, 0, 0), 2, False), + (datetime(2014, 4, 27, 0, 0), 1, False), + (datetime(2014, 5, 1, 0, 0), 1, False), + (datetime(2014, 5, 2, 0, 0), 1, False), + ] == r.facets.frequency + + assert [ + ("ok", 12, False), + ("good", 10, False), + ("better", 13, False), + ] == r.facets.deletions + + +@pytest.mark.asyncio +async def test_range_filters_are_shown_as_selected_and_data_is_filtered( + async_data_client: AsyncElasticsearch, commit_search_cls: Type[AsyncFacetedSearch] +) -> None: + cs = commit_search_cls(filters={"deletions": "better"}) + + r = await cs.execute() + + assert 19 == r.hits.total.value # type: ignore[attr-defined] + + +@pytest.mark.asyncio +async def test_pagination( + async_data_client: AsyncElasticsearch, commit_search_cls: Type[AsyncFacetedSearch] +) -> None: + cs = commit_search_cls() + cs = cs[0:20] + + assert 52 == await cs.count() + assert 20 == len(await cs.execute()) diff --git a/test_elasticsearch/test_dsl/test_integration/_async/test_index.py b/test_elasticsearch/test_dsl/test_integration/_async/test_index.py new file mode 100644 index 000000000..e150d1e59 --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/_async/test_index.py @@ -0,0 +1,162 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest + +from elasticsearch import AsyncElasticsearch +from elasticsearch.dsl import ( + AsyncComposableIndexTemplate, + AsyncDocument, + AsyncIndex, + AsyncIndexTemplate, + Date, + Text, + analysis, +) + + +class Post(AsyncDocument): + title = Text(analyzer=analysis.analyzer("my_analyzer", tokenizer="keyword")) + published_from = Date() + + +@pytest.mark.asyncio +async def test_index_template_works(async_write_client: AsyncElasticsearch) -> None: + it = AsyncIndexTemplate("test-template", "test-legacy-*") + it.document(Post) + it.settings(number_of_replicas=0, number_of_shards=1) + await it.save() + + i = AsyncIndex("test-legacy-blog") + await i.create() + + assert { + "test-legacy-blog": { + "mappings": { + "properties": { + "title": {"type": "text", "analyzer": "my_analyzer"}, + "published_from": {"type": "date"}, + } + } + } + } == await async_write_client.indices.get_mapping(index="test-legacy-blog") + + +@pytest.mark.asyncio +async def test_composable_index_template_works( + async_write_client: AsyncElasticsearch, +) -> None: + it = AsyncComposableIndexTemplate("test-template", "test-*") + it.document(Post) + it.settings(number_of_replicas=0, number_of_shards=1) + await it.save() + + i = AsyncIndex("test-blog") + await i.create() + + assert { + "test-blog": { + "mappings": { + "properties": { + "title": {"type": "text", "analyzer": "my_analyzer"}, + "published_from": {"type": "date"}, + } + } + } + } == await async_write_client.indices.get_mapping(index="test-blog") + + +@pytest.mark.asyncio +async def test_index_can_be_saved_even_with_settings( + async_write_client: AsyncElasticsearch, +) -> None: + i = AsyncIndex("test-blog", using=async_write_client) + i.settings(number_of_shards=3, number_of_replicas=0) + await i.save() + i.settings(number_of_replicas=1) + await i.save() + + assert ( + "1" + == (await i.get_settings())["test-blog"]["settings"]["index"][ + "number_of_replicas" + ] + ) + + +@pytest.mark.asyncio +async def test_index_exists(async_data_client: AsyncElasticsearch) -> None: + assert await AsyncIndex("git").exists() + assert not await AsyncIndex("not-there").exists() + + +@pytest.mark.asyncio +async def test_index_can_be_created_with_settings_and_mappings( + async_write_client: AsyncElasticsearch, +) -> None: + i = AsyncIndex("test-blog", using=async_write_client) + i.document(Post) + i.settings(number_of_replicas=0, number_of_shards=1) + await i.create() + + assert { + "test-blog": { + "mappings": { + "properties": { + "title": {"type": "text", "analyzer": "my_analyzer"}, + "published_from": {"type": "date"}, + } + } + } + } == await async_write_client.indices.get_mapping(index="test-blog") + + settings = await async_write_client.indices.get_settings(index="test-blog") + assert settings["test-blog"]["settings"]["index"]["number_of_replicas"] == "0" + assert settings["test-blog"]["settings"]["index"]["number_of_shards"] == "1" + assert settings["test-blog"]["settings"]["index"]["analysis"] == { + "analyzer": {"my_analyzer": {"type": "custom", "tokenizer": "keyword"}} + } + + +@pytest.mark.asyncio +async def test_delete(async_write_client: AsyncElasticsearch) -> None: + await async_write_client.indices.create( + index="test-index", + body={"settings": {"number_of_replicas": 0, "number_of_shards": 1}}, + ) + + i = AsyncIndex("test-index", using=async_write_client) + await i.delete() + assert not await async_write_client.indices.exists(index="test-index") + + +@pytest.mark.asyncio +async def test_multiple_indices_with_same_doc_type_work( + async_write_client: AsyncElasticsearch, +) -> None: + i1 = AsyncIndex("test-index-1", using=async_write_client) + i2 = AsyncIndex("test-index-2", using=async_write_client) + + for i in (i1, i2): + i.document(Post) + await i.create() + + for j in ("test-index-1", "test-index-2"): + settings = await async_write_client.indices.get_settings(index=j) + assert settings[j]["settings"]["index"]["analysis"] == { + "analyzer": {"my_analyzer": {"type": "custom", "tokenizer": "keyword"}} + } diff --git a/test_elasticsearch/test_dsl/test_integration/_async/test_mapping.py b/test_elasticsearch/test_dsl/test_integration/_async/test_mapping.py new file mode 100644 index 000000000..f370c89c4 --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/_async/test_mapping.py @@ -0,0 +1,171 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest +from pytest import raises + +from elasticsearch import AsyncElasticsearch +from elasticsearch.dsl import AsyncMapping, analysis, exceptions + + +@pytest.mark.asyncio +async def test_mapping_saved_into_es(async_write_client: AsyncElasticsearch) -> None: + m = AsyncMapping() + m.field( + "name", "text", analyzer=analysis.analyzer("my_analyzer", tokenizer="keyword") + ) + m.field("tags", "keyword") + await m.save("test-mapping", using=async_write_client) + + assert { + "test-mapping": { + "mappings": { + "properties": { + "name": {"type": "text", "analyzer": "my_analyzer"}, + "tags": {"type": "keyword"}, + } + } + } + } == await async_write_client.indices.get_mapping(index="test-mapping") + + +@pytest.mark.asyncio +async def test_mapping_saved_into_es_when_index_already_exists_closed( + async_write_client: AsyncElasticsearch, +) -> None: + m = AsyncMapping() + m.field( + "name", "text", analyzer=analysis.analyzer("my_analyzer", tokenizer="keyword") + ) + await async_write_client.indices.create(index="test-mapping") + + with raises(exceptions.IllegalOperation): + await m.save("test-mapping", using=async_write_client) + + await async_write_client.cluster.health( + index="test-mapping", wait_for_status="yellow" + ) + await async_write_client.indices.close(index="test-mapping") + await m.save("test-mapping", using=async_write_client) + + assert { + "test-mapping": { + "mappings": { + "properties": {"name": {"type": "text", "analyzer": "my_analyzer"}} + } + } + } == await async_write_client.indices.get_mapping(index="test-mapping") + + +@pytest.mark.asyncio +async def test_mapping_saved_into_es_when_index_already_exists_with_analysis( + async_write_client: AsyncElasticsearch, +) -> None: + m = AsyncMapping() + analyzer = analysis.analyzer("my_analyzer", tokenizer="keyword") + m.field("name", "text", analyzer=analyzer) + + new_analysis = analyzer.get_analysis_definition() + new_analysis["analyzer"]["other_analyzer"] = { + "type": "custom", + "tokenizer": "whitespace", + } + await async_write_client.indices.create( + index="test-mapping", body={"settings": {"analysis": new_analysis}} + ) + + m.field("title", "text", analyzer=analyzer) + await m.save("test-mapping", using=async_write_client) + + assert { + "test-mapping": { + "mappings": { + "properties": { + "name": {"type": "text", "analyzer": "my_analyzer"}, + "title": {"type": "text", "analyzer": "my_analyzer"}, + } + } + } + } == await async_write_client.indices.get_mapping(index="test-mapping") + + +@pytest.mark.asyncio +async def test_mapping_gets_updated_from_es( + async_write_client: AsyncElasticsearch, +) -> None: + await async_write_client.indices.create( + index="test-mapping", + body={ + "settings": {"number_of_shards": 1, "number_of_replicas": 0}, + "mappings": { + "date_detection": False, + "properties": { + "title": { + "type": "text", + "analyzer": "snowball", + "fields": {"raw": {"type": "keyword"}}, + }, + "created_at": {"type": "date"}, + "comments": { + "type": "nested", + "properties": { + "created": {"type": "date"}, + "author": { + "type": "text", + "analyzer": "snowball", + "fields": {"raw": {"type": "keyword"}}, + }, + }, + }, + }, + }, + }, + ) + + m = await AsyncMapping.from_es("test-mapping", using=async_write_client) + + assert ["comments", "created_at", "title"] == list( + sorted(m.properties.properties._d_.keys()) # type: ignore[attr-defined] + ) + assert { + "date_detection": False, + "properties": { + "comments": { + "type": "nested", + "properties": { + "created": {"type": "date"}, + "author": { + "analyzer": "snowball", + "fields": {"raw": {"type": "keyword"}}, + "type": "text", + }, + }, + }, + "created_at": {"type": "date"}, + "title": { + "analyzer": "snowball", + "fields": {"raw": {"type": "keyword"}}, + "type": "text", + }, + }, + } == m.to_dict() + + # test same with alias + await async_write_client.indices.put_alias(index="test-mapping", name="test-alias") + + m2 = await AsyncMapping.from_es("test-alias", using=async_write_client) + assert m2.to_dict() == m.to_dict() diff --git a/test_elasticsearch/test_dsl/test_integration/_async/test_search.py b/test_elasticsearch/test_dsl/test_integration/_async/test_search.py new file mode 100644 index 000000000..a63f6746a --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/_async/test_search.py @@ -0,0 +1,304 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +import pytest +from pytest import raises + +from elasticsearch import ApiError, AsyncElasticsearch +from elasticsearch.dsl import ( + AsyncDocument, + AsyncMultiSearch, + AsyncSearch, + Date, + Keyword, + Q, + Text, +) +from elasticsearch.dsl.response import aggs + +from ..test_data import FLAT_DATA + + +class Repository(AsyncDocument): + created_at = Date() + description = Text(analyzer="snowball") + tags = Keyword() + + @classmethod + def search(cls) -> AsyncSearch["Repository"]: # type: ignore[override] + return super().search().filter("term", commit_repo="repo") + + class Index: + name = "git" + + +class Commit(AsyncDocument): + class Index: + name = "flat-git" + + +@pytest.mark.asyncio +async def test_filters_aggregation_buckets_are_accessible( + async_data_client: AsyncElasticsearch, +) -> None: + has_tests_query = Q("term", files="test_elasticsearch_dsl") + s = Commit.search()[0:0] + s.aggs.bucket("top_authors", "terms", field="author.name.raw").bucket( + "has_tests", "filters", filters={"yes": has_tests_query, "no": ~has_tests_query} + ).metric("lines", "stats", field="stats.lines") + + response = await s.execute() + + assert isinstance( + response.aggregations.top_authors.buckets[0].has_tests.buckets.yes, aggs.Bucket + ) + assert ( + 35 + == response.aggregations.top_authors.buckets[0].has_tests.buckets.yes.doc_count + ) + assert ( + 228 + == response.aggregations.top_authors.buckets[0].has_tests.buckets.yes.lines.max + ) + + +@pytest.mark.asyncio +async def test_top_hits_are_wrapped_in_response( + async_data_client: AsyncElasticsearch, +) -> None: + s = Commit.search()[0:0] + s.aggs.bucket("top_authors", "terms", field="author.name.raw").metric( + "top_commits", "top_hits", size=5 + ) + response = await s.execute() + + top_commits = response.aggregations.top_authors.buckets[0].top_commits + assert isinstance(top_commits, aggs.TopHitsData) + assert 5 == len(top_commits) + + hits = [h for h in top_commits] + assert 5 == len(hits) + assert isinstance(hits[0], Commit) + + +@pytest.mark.asyncio +async def test_inner_hits_are_wrapped_in_response( + async_data_client: AsyncElasticsearch, +) -> None: + s = AsyncSearch(index="git")[0:1].query( + "has_parent", parent_type="repo", inner_hits={}, query=Q("match_all") + ) + response = await s.execute() + + commit = response.hits[0] + assert isinstance(commit.meta.inner_hits.repo, response.__class__) + assert repr(commit.meta.inner_hits.repo[0]).startswith( + " None: + s = AsyncSearch(index="git")[0:1].query( + "has_parent", parent_type="repo", inner_hits={}, query=Q("match_all") + ) + response = await s.execute() + d = response.to_dict(recursive=True) + assert isinstance(d, dict) + assert isinstance(d["hits"]["hits"][0]["inner_hits"]["repo"], dict) + + # iterating over the results changes the format of the internal AttrDict + for hit in response: + pass + + d = response.to_dict(recursive=True) + assert isinstance(d, dict) + assert isinstance(d["hits"]["hits"][0]["inner_hits"]["repo"], dict) + + +@pytest.mark.asyncio +async def test_scan_respects_doc_types(async_data_client: AsyncElasticsearch) -> None: + repos = [repo async for repo in Repository.search().scan()] + + assert 1 == len(repos) + assert isinstance(repos[0], Repository) + assert repos[0].organization == "elasticsearch" + + +@pytest.mark.asyncio +async def test_scan_iterates_through_all_docs( + async_data_client: AsyncElasticsearch, +) -> None: + s = AsyncSearch(index="flat-git") + + commits = [commit async for commit in s.scan()] + + assert 52 == len(commits) + assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits} + + +@pytest.mark.asyncio +async def test_search_after(async_data_client: AsyncElasticsearch) -> None: + page_size = 7 + s = AsyncSearch(index="flat-git")[:page_size].sort("authored_date") + commits = [] + while True: + r = await s.execute() + commits += r.hits + if len(r.hits) < page_size: + break + s = s.search_after() + + assert 52 == len(commits) + assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits} + + +@pytest.mark.asyncio +async def test_search_after_no_search(async_data_client: AsyncElasticsearch) -> None: + s = AsyncSearch(index="flat-git") + with raises( + ValueError, match="A search must be executed before using search_after" + ): + s.search_after() + await s.count() + with raises( + ValueError, match="A search must be executed before using search_after" + ): + s.search_after() + + +@pytest.mark.asyncio +async def test_search_after_no_sort(async_data_client: AsyncElasticsearch) -> None: + s = AsyncSearch(index="flat-git") + r = await s.execute() + with raises( + ValueError, match="Cannot use search_after when results are not sorted" + ): + r.search_after() + + +@pytest.mark.asyncio +async def test_search_after_no_results(async_data_client: AsyncElasticsearch) -> None: + s = AsyncSearch(index="flat-git")[:100].sort("authored_date") + r = await s.execute() + assert 52 == len(r.hits) + s = s.search_after() + r = await s.execute() + assert 0 == len(r.hits) + with raises( + ValueError, match="Cannot use search_after when there are no search results" + ): + r.search_after() + + +@pytest.mark.asyncio +async def test_point_in_time(async_data_client: AsyncElasticsearch) -> None: + page_size = 7 + commits = [] + async with AsyncSearch(index="flat-git")[:page_size].point_in_time( + keep_alive="30s" + ) as s: + pit_id = s._extra["pit"]["id"] + while True: + r = await s.execute() + commits += r.hits + if len(r.hits) < page_size: + break + s = s.search_after() + assert pit_id == s._extra["pit"]["id"] + assert "30s" == s._extra["pit"]["keep_alive"] + + assert 52 == len(commits) + assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits} + + +@pytest.mark.asyncio +async def test_iterate(async_data_client: AsyncElasticsearch) -> None: + s = AsyncSearch(index="flat-git") + + commits = [commit async for commit in s.iterate()] + + assert 52 == len(commits) + assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits} + + +@pytest.mark.asyncio +async def test_response_is_cached(async_data_client: AsyncElasticsearch) -> None: + s = Repository.search() + repos = [repo async for repo in s] + + assert hasattr(s, "_response") + assert s._response.hits == repos + + +@pytest.mark.asyncio +async def test_multi_search(async_data_client: AsyncElasticsearch) -> None: + s1 = Repository.search() + s2 = AsyncSearch[Repository](index="flat-git") + + ms = AsyncMultiSearch[Repository]() + ms = ms.add(s1).add(s2) + + r1, r2 = await ms.execute() + + assert 1 == len(r1) + assert isinstance(r1[0], Repository) + assert r1._search is s1 + + assert 52 == r2.hits.total.value # type: ignore[attr-defined] + assert r2._search is s2 + + +@pytest.mark.asyncio +async def test_multi_missing(async_data_client: AsyncElasticsearch) -> None: + s1 = Repository.search() + s2 = AsyncSearch[Repository](index="flat-git") + s3 = AsyncSearch[Repository](index="does_not_exist") + + ms = AsyncMultiSearch[Repository]() + ms = ms.add(s1).add(s2).add(s3) + + with raises(ApiError): + await ms.execute() + + r1, r2, r3 = await ms.execute(raise_on_error=False) + + assert 1 == len(r1) + assert isinstance(r1[0], Repository) + assert r1._search is s1 + + assert 52 == r2.hits.total.value # type: ignore[attr-defined] + assert r2._search is s2 + + assert r3 is None + + +@pytest.mark.asyncio +async def test_raw_subfield_can_be_used_in_aggs( + async_data_client: AsyncElasticsearch, +) -> None: + s = AsyncSearch(index="git")[0:0] + s.aggs.bucket("authors", "terms", field="author.name.raw", size=1) + + r = await s.execute() + + authors = r.aggregations.authors + assert 1 == len(authors) + assert {"key": "Honza Král", "doc_count": 52} == authors[0] diff --git a/test_elasticsearch/test_dsl/test_integration/_async/test_update_by_query.py b/test_elasticsearch/test_dsl/test_integration/_async/test_update_by_query.py new file mode 100644 index 000000000..b051d284a --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/_async/test_update_by_query.py @@ -0,0 +1,85 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest + +from elasticsearch import AsyncElasticsearch +from elasticsearch.dsl import AsyncUpdateByQuery +from elasticsearch.dsl.search import Q + + +@pytest.mark.asyncio +async def test_update_by_query_no_script( + async_write_client: AsyncElasticsearch, setup_ubq_tests: str +) -> None: + index = setup_ubq_tests + + ubq = ( + AsyncUpdateByQuery(using=async_write_client) + .index(index) + .filter(~Q("exists", field="is_public")) + ) + response = await ubq.execute() + + assert response.total == 52 + assert response["took"] > 0 + assert not response.timed_out + assert response.updated == 52 + assert response.deleted == 0 + assert response.took > 0 + assert response.success() + + +@pytest.mark.asyncio +async def test_update_by_query_with_script( + async_write_client: AsyncElasticsearch, setup_ubq_tests: str +) -> None: + index = setup_ubq_tests + + ubq = ( + AsyncUpdateByQuery(using=async_write_client) + .index(index) + .filter(~Q("exists", field="parent_shas")) + .script(source="ctx._source.is_public = false") + ) + ubq = ubq.params(conflicts="proceed") + + response = await ubq.execute() + assert response.total == 2 + assert response.updated == 2 + assert response.version_conflicts == 0 + + +@pytest.mark.asyncio +async def test_delete_by_query_with_script( + async_write_client: AsyncElasticsearch, setup_ubq_tests: str +) -> None: + index = setup_ubq_tests + + ubq = ( + AsyncUpdateByQuery(using=async_write_client) + .index(index) + .filter(Q("match", parent_shas="1dd19210b5be92b960f7db6f66ae526288edccc3")) + .script(source='ctx.op = "delete"') + ) + ubq = ubq.params(conflicts="proceed") + + response = await ubq.execute() + + assert response.total == 1 + assert response.deleted == 1 + assert response.success() diff --git a/test_elasticsearch/test_dsl/test_integration/_sync/__init__.py b/test_elasticsearch/test_dsl/test_integration/_sync/__init__.py new file mode 100644 index 000000000..2a87d183f --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/_sync/__init__.py @@ -0,0 +1,16 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/test_elasticsearch/test_dsl/test_integration/_sync/test_analysis.py b/test_elasticsearch/test_dsl/test_integration/_sync/test_analysis.py new file mode 100644 index 000000000..87e5350ba --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/_sync/test_analysis.py @@ -0,0 +1,54 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest + +from elasticsearch import Elasticsearch +from elasticsearch.dsl import analyzer, token_filter, tokenizer + + +@pytest.mark.sync +def test_simulate_with_just__builtin_tokenizer( + client: Elasticsearch, +) -> None: + a = analyzer("my-analyzer", tokenizer="keyword") + tokens = (a.simulate("Hello World!", using=client)).tokens + + assert len(tokens) == 1 + assert tokens[0].token == "Hello World!" + + +@pytest.mark.sync +def test_simulate_complex(client: Elasticsearch) -> None: + a = analyzer( + "my-analyzer", + tokenizer=tokenizer("split_words", "simple_pattern_split", pattern=":"), + filter=["lowercase", token_filter("no-ifs", "stop", stopwords=["if"])], + ) + + tokens = (a.simulate("if:this:works", using=client)).tokens + + assert len(tokens) == 2 + assert ["this", "works"] == [t.token for t in tokens] + + +@pytest.mark.sync +def test_simulate_builtin(client: Elasticsearch) -> None: + a = analyzer("my-analyzer", "english") + tokens = (a.simulate("fixes running")).tokens + + assert ["fix", "run"] == [t.token for t in tokens] diff --git a/test_elasticsearch/test_dsl/test_integration/_sync/test_document.py b/test_elasticsearch/test_dsl/test_integration/_sync/test_document.py new file mode 100644 index 000000000..13b60f71b --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/_sync/test_document.py @@ -0,0 +1,844 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# this file creates several documents using bad or no types because +# these are still supported and should be kept functional in spite +# of not having appropriate type hints. For that reason the comment +# below disables many mypy checks that fails as a result of this. +# mypy: disable-error-code="assignment, index, arg-type, call-arg, operator, comparison-overlap, attr-defined" + +from datetime import datetime +from ipaddress import ip_address +from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Tuple, Union + +import pytest +from pytest import raises +from pytz import timezone + +from elasticsearch import ConflictError, Elasticsearch, NotFoundError +from elasticsearch.dsl import ( + Binary, + Boolean, + Date, + DenseVector, + Document, + Double, + InnerDoc, + Ip, + Keyword, + Long, + Mapping, + MetaField, + Nested, + Object, + Q, + RankFeatures, + Search, + Text, + analyzer, + mapped_field, +) +from elasticsearch.dsl.utils import AttrList +from elasticsearch.helpers.errors import BulkIndexError + +snowball = analyzer("my_snow", tokenizer="standard", filter=["lowercase", "snowball"]) + + +class User(InnerDoc): + name = Text(fields={"raw": Keyword()}) + + +class Wiki(Document): + owner = Object(User) + views = Long() + ranked = RankFeatures() + + class Index: + name = "test-wiki" + + +class Repository(Document): + owner = Object(User) + created_at = Date() + description = Text(analyzer=snowball) + tags = Keyword() + + @classmethod + def search(cls) -> Search["Repository"]: # type: ignore[override] + return super().search().filter("term", commit_repo="repo") + + class Index: + name = "git" + + +class Commit(Document): + committed_date = Date() + authored_date = Date() + description = Text(analyzer=snowball) + + class Index: + name = "flat-git" + + class Meta: + mapping = Mapping() + + +class History(InnerDoc): + timestamp = Date() + diff = Text() + + +class Comment(InnerDoc): + content = Text() + created_at = Date() + author = Object(User) + history = Nested(History) + + class Meta: + dynamic = MetaField(False) + + +class PullRequest(Document): + comments = Nested(Comment) + created_at = Date() + + class Index: + name = "test-prs" + + +class SerializationDoc(Document): + i = Long() + b = Boolean() + d = Double() + bin = Binary() + ip = Ip() + + class Index: + name = "test-serialization" + + +class Tags(Document): + tags = Keyword(multi=True) + + class Index: + name = "tags" + + +@pytest.mark.sync +def test_serialization(write_client: Elasticsearch) -> None: + SerializationDoc.init() + write_client.index( + index="test-serialization", + id=42, + body={ + "i": [1, 2, "3", None], + "b": [True, False, "true", "false", None], + "d": [0.1, "-0.1", None], + "bin": ["SGVsbG8gV29ybGQ=", None], + "ip": ["::1", "127.0.0.1", None], + }, + ) + sd = SerializationDoc.get(id=42) + assert sd is not None + + assert sd.i == [1, 2, 3, None] + assert sd.b == [True, False, True, False, None] + assert sd.d == [0.1, -0.1, None] + assert sd.bin == [b"Hello World", None] + assert sd.ip == [ip_address("::1"), ip_address("127.0.0.1"), None] + + assert sd.to_dict() == { + "b": [True, False, True, False, None], + "bin": ["SGVsbG8gV29ybGQ=", None], + "d": [0.1, -0.1, None], + "i": [1, 2, 3, None], + "ip": ["::1", "127.0.0.1", None], + } + + +@pytest.mark.sync +def test_nested_inner_hits_are_wrapped_properly(pull_request: Any) -> None: + history_query = Q( + "nested", + path="comments.history", + inner_hits={}, + query=Q("match", comments__history__diff="ahoj"), + ) + s = PullRequest.search().query( + "nested", inner_hits={}, path="comments", query=history_query + ) + + response = s.execute() + pr = response.hits[0] + assert isinstance(pr, PullRequest) + assert isinstance(pr.comments[0], Comment) + assert isinstance(pr.comments[0].history[0], History) + + comment = pr.meta.inner_hits.comments.hits[0] + assert isinstance(comment, Comment) + assert comment.author.name == "honzakral" + assert isinstance(comment.history[0], History) + + history = comment.meta.inner_hits["comments.history"].hits[0] + assert isinstance(history, History) + assert history.timestamp == datetime(2012, 1, 1) + assert "score" in history.meta + + +@pytest.mark.sync +def test_nested_inner_hits_are_deserialized_properly( + pull_request: Any, +) -> None: + s = PullRequest.search().query( + "nested", + inner_hits={}, + path="comments", + query=Q("match", comments__content="hello"), + ) + + response = s.execute() + pr = response.hits[0] + assert isinstance(pr.created_at, datetime) + assert isinstance(pr.comments[0], Comment) + assert isinstance(pr.comments[0].created_at, datetime) + + +@pytest.mark.sync +def test_nested_top_hits_are_wrapped_properly(pull_request: Any) -> None: + s = PullRequest.search() + s.aggs.bucket("comments", "nested", path="comments").metric( + "hits", "top_hits", size=1 + ) + + r = s.execute() + + print(r._d_) + assert isinstance(r.aggregations.comments.hits.hits[0], Comment) + + +@pytest.mark.sync +def test_update_object_field(write_client: Elasticsearch) -> None: + Wiki.init() + w = Wiki( + owner=User(name="Honza Kral"), + _id="elasticsearch-py", + ranked={"test1": 0.1, "topic2": 0.2}, + ) + w.save() + + assert "updated" == w.update(owner=[{"name": "Honza"}, User(name="Nick")]) + assert w.owner[0].name == "Honza" + assert w.owner[1].name == "Nick" + + w = Wiki.get(id="elasticsearch-py") + assert w.owner[0].name == "Honza" + assert w.owner[1].name == "Nick" + + assert w.ranked == {"test1": 0.1, "topic2": 0.2} + + +@pytest.mark.sync +def test_update_script(write_client: Elasticsearch) -> None: + Wiki.init() + w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42) + w.save() + + w.update(script="ctx._source.views += params.inc", inc=5) + w = Wiki.get(id="elasticsearch-py") + assert w.views == 47 + + +@pytest.mark.sync +def test_update_script_with_dict(write_client: Elasticsearch) -> None: + Wiki.init() + w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42) + w.save() + + w.update( + script={ + "source": "ctx._source.views += params.inc1 + params.inc2", + "params": {"inc1": 2}, + "lang": "painless", + }, + inc2=3, + ) + w = Wiki.get(id="elasticsearch-py") + assert w.views == 47 + + +@pytest.mark.sync +def test_update_retry_on_conflict(write_client: Elasticsearch) -> None: + Wiki.init() + w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42) + w.save() + + w1 = Wiki.get(id="elasticsearch-py") + w2 = Wiki.get(id="elasticsearch-py") + assert w1 is not None + assert w2 is not None + + w1.update(script="ctx._source.views += params.inc", inc=5, retry_on_conflict=1) + w2.update(script="ctx._source.views += params.inc", inc=5, retry_on_conflict=1) + + w = Wiki.get(id="elasticsearch-py") + assert w.views == 52 + + +@pytest.mark.sync +@pytest.mark.parametrize("retry_on_conflict", [None, 0]) +def test_update_conflicting_version( + write_client: Elasticsearch, retry_on_conflict: bool +) -> None: + Wiki.init() + w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42) + w.save() + + w1 = Wiki.get(id="elasticsearch-py") + w2 = Wiki.get(id="elasticsearch-py") + assert w1 is not None + assert w2 is not None + + w1.update(script="ctx._source.views += params.inc", inc=5) + + with raises(ConflictError): + w2.update( + script="ctx._source.views += params.inc", + inc=5, + retry_on_conflict=retry_on_conflict, + ) + + +@pytest.mark.sync +def test_save_and_update_return_doc_meta( + write_client: Elasticsearch, +) -> None: + Wiki.init() + w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42) + resp = w.save(return_doc_meta=True) + assert resp["_index"] == "test-wiki" + assert resp["result"] == "created" + assert set(resp.keys()) == { + "_id", + "_index", + "_primary_term", + "_seq_no", + "_shards", + "_version", + "result", + } + + resp = w.update( + script="ctx._source.views += params.inc", inc=5, return_doc_meta=True + ) + assert resp["_index"] == "test-wiki" + assert resp["result"] == "updated" + assert set(resp.keys()) == { + "_id", + "_index", + "_primary_term", + "_seq_no", + "_shards", + "_version", + "result", + } + + +@pytest.mark.sync +def test_init(write_client: Elasticsearch) -> None: + Repository.init(index="test-git") + + assert write_client.indices.exists(index="test-git") + + +@pytest.mark.sync +def test_get_raises_404_on_index_missing( + data_client: Elasticsearch, +) -> None: + with raises(NotFoundError): + Repository.get("elasticsearch-dsl-php", index="not-there") + + +@pytest.mark.sync +def test_get_raises_404_on_non_existent_id( + data_client: Elasticsearch, +) -> None: + with raises(NotFoundError): + Repository.get("elasticsearch-dsl-php") + + +@pytest.mark.sync +def test_get_returns_none_if_404_ignored( + data_client: Elasticsearch, +) -> None: + assert None is Repository.get( + "elasticsearch-dsl-php", using=data_client.options(ignore_status=404) + ) + + +@pytest.mark.sync +def test_get_returns_none_if_404_ignored_and_index_doesnt_exist( + data_client: Elasticsearch, +) -> None: + assert None is Repository.get( + "42", index="not-there", using=data_client.options(ignore_status=404) + ) + + +@pytest.mark.sync +def test_get(data_client: Elasticsearch) -> None: + elasticsearch_repo = Repository.get("elasticsearch-dsl-py") + + assert isinstance(elasticsearch_repo, Repository) + assert elasticsearch_repo.owner.name == "elasticsearch" + assert datetime(2014, 3, 3) == elasticsearch_repo.created_at + + +@pytest.mark.sync +def test_exists_return_true(data_client: Elasticsearch) -> None: + assert Repository.exists("elasticsearch-dsl-py") + + +@pytest.mark.sync +def test_exists_false(data_client: Elasticsearch) -> None: + assert not Repository.exists("elasticsearch-dsl-php") + + +@pytest.mark.sync +def test_get_with_tz_date(data_client: Elasticsearch) -> None: + first_commit = Commit.get( + id="3ca6e1e73a071a705b4babd2f581c91a2a3e5037", routing="elasticsearch-dsl-py" + ) + assert first_commit is not None + + tzinfo = timezone("Europe/Prague") + assert ( + tzinfo.localize(datetime(2014, 5, 2, 13, 47, 19, 123000)) + == first_commit.authored_date + ) + + +@pytest.mark.sync +def test_save_with_tz_date(data_client: Elasticsearch) -> None: + tzinfo = timezone("Europe/Prague") + first_commit = Commit.get( + id="3ca6e1e73a071a705b4babd2f581c91a2a3e5037", routing="elasticsearch-dsl-py" + ) + assert first_commit is not None + + first_commit.committed_date = tzinfo.localize( + datetime(2014, 5, 2, 13, 47, 19, 123456) + ) + first_commit.save() + + first_commit = Commit.get( + id="3ca6e1e73a071a705b4babd2f581c91a2a3e5037", routing="elasticsearch-dsl-py" + ) + assert first_commit is not None + + assert ( + tzinfo.localize(datetime(2014, 5, 2, 13, 47, 19, 123456)) + == first_commit.committed_date + ) + + +COMMIT_DOCS_WITH_MISSING = [ + {"_id": "0"}, # Missing + {"_id": "3ca6e1e73a071a705b4babd2f581c91a2a3e5037"}, # Existing + {"_id": "f"}, # Missing + {"_id": "eb3e543323f189fd7b698e66295427204fff5755"}, # Existing +] + + +@pytest.mark.sync +def test_mget(data_client: Elasticsearch) -> None: + commits = Commit.mget(COMMIT_DOCS_WITH_MISSING) + assert commits[0] is None + assert commits[1] is not None + assert commits[1].meta.id == "3ca6e1e73a071a705b4babd2f581c91a2a3e5037" + assert commits[2] is None + assert commits[3] is not None + assert commits[3].meta.id == "eb3e543323f189fd7b698e66295427204fff5755" + + +@pytest.mark.sync +def test_mget_raises_exception_when_missing_param_is_invalid( + data_client: Elasticsearch, +) -> None: + with raises(ValueError): + Commit.mget(COMMIT_DOCS_WITH_MISSING, missing="raj") + + +@pytest.mark.sync +def test_mget_raises_404_when_missing_param_is_raise( + data_client: Elasticsearch, +) -> None: + with raises(NotFoundError): + Commit.mget(COMMIT_DOCS_WITH_MISSING, missing="raise") + + +@pytest.mark.sync +def test_mget_ignores_missing_docs_when_missing_param_is_skip( + data_client: Elasticsearch, +) -> None: + commits = Commit.mget(COMMIT_DOCS_WITH_MISSING, missing="skip") + assert commits[0] is not None + assert commits[0].meta.id == "3ca6e1e73a071a705b4babd2f581c91a2a3e5037" + assert commits[1] is not None + assert commits[1].meta.id == "eb3e543323f189fd7b698e66295427204fff5755" + + +@pytest.mark.sync +def test_update_works_from_search_response( + data_client: Elasticsearch, +) -> None: + elasticsearch_repo = (Repository.search().execute())[0] + + elasticsearch_repo.update(owner={"other_name": "elastic"}) + assert "elastic" == elasticsearch_repo.owner.other_name + + new_version = Repository.get("elasticsearch-dsl-py") + assert new_version is not None + assert "elastic" == new_version.owner.other_name + assert "elasticsearch" == new_version.owner.name + + +@pytest.mark.sync +def test_update(data_client: Elasticsearch) -> None: + elasticsearch_repo = Repository.get("elasticsearch-dsl-py") + assert elasticsearch_repo is not None + v = elasticsearch_repo.meta.version + + old_seq_no = elasticsearch_repo.meta.seq_no + elasticsearch_repo.update(owner={"new_name": "elastic"}, new_field="testing-update") + + assert "elastic" == elasticsearch_repo.owner.new_name + assert "testing-update" == elasticsearch_repo.new_field + + # assert version has been updated + assert elasticsearch_repo.meta.version == v + 1 + + new_version = Repository.get("elasticsearch-dsl-py") + assert new_version is not None + assert "testing-update" == new_version.new_field + assert "elastic" == new_version.owner.new_name + assert "elasticsearch" == new_version.owner.name + assert "seq_no" in new_version.meta + assert new_version.meta.seq_no != old_seq_no + assert "primary_term" in new_version.meta + + +@pytest.mark.sync +def test_save_updates_existing_doc(data_client: Elasticsearch) -> None: + elasticsearch_repo = Repository.get("elasticsearch-dsl-py") + assert elasticsearch_repo is not None + + elasticsearch_repo.new_field = "testing-save" + old_seq_no = elasticsearch_repo.meta.seq_no + assert "updated" == elasticsearch_repo.save() + + new_repo = data_client.get(index="git", id="elasticsearch-dsl-py") + assert "testing-save" == new_repo["_source"]["new_field"] + assert new_repo["_seq_no"] != old_seq_no + assert new_repo["_seq_no"] == elasticsearch_repo.meta.seq_no + + +@pytest.mark.sync +def test_update_empty_field(client: Elasticsearch) -> None: + Tags._index.delete(ignore_unavailable=True) + Tags.init() + d = Tags(id="123", tags=["a", "b"]) + d.save(refresh=True) + d.update(tags=[], refresh=True) + assert d.tags == [] + + r = Tags.search().execute() + assert r.hits[0].tags == [] + + +@pytest.mark.sync +def test_save_automatically_uses_seq_no_and_primary_term( + data_client: Elasticsearch, +) -> None: + elasticsearch_repo = Repository.get("elasticsearch-dsl-py") + assert elasticsearch_repo is not None + elasticsearch_repo.meta.seq_no += 1 + + with raises(ConflictError): + elasticsearch_repo.save() + + +@pytest.mark.sync +def test_delete_automatically_uses_seq_no_and_primary_term( + data_client: Elasticsearch, +) -> None: + elasticsearch_repo = Repository.get("elasticsearch-dsl-py") + assert elasticsearch_repo is not None + elasticsearch_repo.meta.seq_no += 1 + + with raises(ConflictError): + elasticsearch_repo.delete() + + +def assert_doc_equals(expected: Any, actual: Any) -> None: + for f in expected: + assert f in actual + assert actual[f] == expected[f] + + +@pytest.mark.sync +def test_can_save_to_different_index( + write_client: Elasticsearch, +) -> None: + test_repo = Repository(description="testing", meta={"id": 42}) + assert test_repo.save(index="test-document") + + assert_doc_equals( + { + "found": True, + "_index": "test-document", + "_id": "42", + "_source": {"description": "testing"}, + }, + write_client.get(index="test-document", id=42), + ) + + +@pytest.mark.sync +def test_save_without_skip_empty_will_include_empty_fields( + write_client: Elasticsearch, +) -> None: + test_repo = Repository(field_1=[], field_2=None, field_3={}, meta={"id": 42}) + assert test_repo.save(index="test-document", skip_empty=False) + + assert_doc_equals( + { + "found": True, + "_index": "test-document", + "_id": "42", + "_source": {"field_1": [], "field_2": None, "field_3": {}}, + }, + write_client.get(index="test-document", id=42), + ) + + +@pytest.mark.sync +def test_delete(write_client: Elasticsearch) -> None: + write_client.create( + index="test-document", + id="elasticsearch-dsl-py", + body={ + "organization": "elasticsearch", + "created_at": "2014-03-03", + "owner": {"name": "elasticsearch"}, + }, + ) + + test_repo = Repository(meta={"id": "elasticsearch-dsl-py"}) + test_repo.meta.index = "test-document" + test_repo.delete() + + assert not write_client.exists( + index="test-document", + id="elasticsearch-dsl-py", + ) + + +@pytest.mark.sync +def test_search(data_client: Elasticsearch) -> None: + assert Repository.search().count() == 1 + + +@pytest.mark.sync +def test_search_returns_proper_doc_classes( + data_client: Elasticsearch, +) -> None: + result = Repository.search().execute() + + elasticsearch_repo = result.hits[0] + + assert isinstance(elasticsearch_repo, Repository) + assert elasticsearch_repo.owner.name == "elasticsearch" + + +@pytest.mark.sync +def test_refresh_mapping(data_client: Elasticsearch) -> None: + class Commit(Document): + class Index: + name = "git" + + Commit._index.load_mappings() + + assert "stats" in Commit._index._mapping + assert "committer" in Commit._index._mapping + assert "description" in Commit._index._mapping + assert "committed_date" in Commit._index._mapping + assert isinstance(Commit._index._mapping["committed_date"], Date) + + +@pytest.mark.sync +def test_highlight_in_meta(data_client: Elasticsearch) -> None: + commit = ( + Commit.search() + .query("match", description="inverting") + .highlight("description") + .execute() + )[0] + + assert isinstance(commit, Commit) + assert "description" in commit.meta.highlight + assert isinstance(commit.meta.highlight["description"], AttrList) + assert len(commit.meta.highlight["description"]) > 0 + + +@pytest.mark.sync +def test_bulk(data_client: Elasticsearch) -> None: + class Address(InnerDoc): + street: str + active: bool + + class Doc(Document): + if TYPE_CHECKING: + _id: int + name: str + age: int + languages: List[str] = mapped_field(Keyword()) + addresses: List[Address] + + class Index: + name = "bulk-index" + + Doc._index.delete(ignore_unavailable=True) + Doc.init() + + def gen1() -> Iterator[Union[Doc, Dict[str, Any]]]: + yield Doc( + name="Joe", + age=33, + languages=["en", "fr"], + addresses=[ + Address(street="123 Main St", active=True), + Address(street="321 Park Dr.", active=False), + ], + ) + yield Doc(name="Susan", age=20, languages=["en"]) + yield {"_op_type": "create", "_id": "45", "_source": Doc(name="Sarah", age=45)} + + Doc.bulk(gen1(), refresh=True) + docs = list(Doc.search().execute()) + assert len(docs) == 3 + assert docs[0].to_dict() == { + "name": "Joe", + "age": 33, + "languages": [ + "en", + "fr", + ], + "addresses": [ + { + "active": True, + "street": "123 Main St", + }, + { + "active": False, + "street": "321 Park Dr.", + }, + ], + } + assert docs[1].to_dict() == { + "name": "Susan", + "age": 20, + "languages": ["en"], + } + assert docs[2].to_dict() == { + "name": "Sarah", + "age": 45, + } + assert docs[2].meta.id == "45" + + def gen2() -> Iterator[Union[Doc, Dict[str, Any]]]: + yield {"_op_type": "create", "_id": "45", "_source": Doc(name="Sarah", age=45)} + + # a "create" action with an existing id should fail + with raises(BulkIndexError): + Doc.bulk(gen2(), refresh=True) + + def gen3() -> Iterator[Union[Doc, Dict[str, Any]]]: + yield Doc(_id="45", name="Sarah", age=45, languages=["es"]) + yield {"_op_type": "delete", "_id": docs[1].meta.id} + + Doc.bulk(gen3(), refresh=True) + with raises(NotFoundError): + Doc.get(docs[1].meta.id) + doc = Doc.get("45") + assert doc is not None + assert (doc).to_dict() == { + "name": "Sarah", + "age": 45, + "languages": ["es"], + } + + +@pytest.mark.sync +def test_legacy_dense_vector( + client: Elasticsearch, es_version: Tuple[int, ...] +) -> None: + if es_version >= (8, 16): + pytest.skip("this test is a legacy version for Elasticsearch 8.15 or older") + + class Doc(Document): + float_vector: List[float] = mapped_field(DenseVector(dims=3)) + + class Index: + name = "vectors" + + Doc._index.delete(ignore_unavailable=True) + Doc.init() + + doc = Doc(float_vector=[1.0, 1.2, 2.3]) + doc.save(refresh=True) + + docs = Doc.search().execute() + assert len(docs) == 1 + assert docs[0].float_vector == doc.float_vector + + +@pytest.mark.sync +def test_dense_vector(client: Elasticsearch, es_version: Tuple[int, ...]) -> None: + if es_version < (8, 16): + pytest.skip("this test requires Elasticsearch 8.16 or newer") + + class Doc(Document): + float_vector: List[float] = mapped_field(DenseVector()) + byte_vector: List[int] = mapped_field(DenseVector(element_type="byte")) + bit_vector: str = mapped_field(DenseVector(element_type="bit")) + + class Index: + name = "vectors" + + Doc._index.delete(ignore_unavailable=True) + Doc.init() + + doc = Doc( + float_vector=[1.0, 1.2, 2.3], byte_vector=[12, 23, 34, 45], bit_vector="12abf0" + ) + doc.save(refresh=True) + + docs = Doc.search().execute() + assert len(docs) == 1 + assert docs[0].float_vector == doc.float_vector + assert docs[0].byte_vector == doc.byte_vector + assert docs[0].bit_vector == doc.bit_vector diff --git a/test_elasticsearch/test_dsl/test_integration/_sync/test_faceted_search.py b/test_elasticsearch/test_dsl/test_integration/_sync/test_faceted_search.py new file mode 100644 index 000000000..00ce01cc3 --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/_sync/test_faceted_search.py @@ -0,0 +1,305 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from datetime import datetime +from typing import Tuple, Type + +import pytest + +from elasticsearch import Elasticsearch +from elasticsearch.dsl import A, Boolean, Date, Document, Keyword, Search +from elasticsearch.dsl.faceted_search import ( + DateHistogramFacet, + FacetedSearch, + NestedFacet, + RangeFacet, + TermsFacet, +) + +from .test_document import PullRequest + + +class Repos(Document): + is_public = Boolean() + created_at = Date() + + class Index: + name = "git" + + +class Commit(Document): + files = Keyword() + committed_date = Date() + + class Index: + name = "git" + + +class MetricSearch(FacetedSearch): + index = "git" + doc_types = [Commit] + + facets = { + "files": TermsFacet(field="files", metric=A("max", field="committed_date")), + } + + +@pytest.fixture +def commit_search_cls(es_version: Tuple[int, ...]) -> Type[FacetedSearch]: + if es_version >= (7, 2): + interval_kwargs = {"fixed_interval": "1d"} + else: + interval_kwargs = {"interval": "day"} + + class CommitSearch(FacetedSearch): + index = "flat-git" + fields = ( + "description", + "files", + ) + + facets = { + "files": TermsFacet(field="files"), + "frequency": DateHistogramFacet( + field="authored_date", min_doc_count=1, **interval_kwargs + ), + "deletions": RangeFacet( + field="stats.deletions", + ranges=[("ok", (None, 1)), ("good", (1, 5)), ("better", (5, None))], + ), + } + + return CommitSearch + + +@pytest.fixture +def repo_search_cls(es_version: Tuple[int, ...]) -> Type[FacetedSearch]: + interval_type = "calendar_interval" if es_version >= (7, 2) else "interval" + + class RepoSearch(FacetedSearch): + index = "git" + doc_types = [Repos] + facets = { + "public": TermsFacet(field="is_public"), + "created": DateHistogramFacet( + field="created_at", **{interval_type: "month"} + ), + } + + def search(self) -> Search: + s = super().search() + return s.filter("term", commit_repo="repo") + + return RepoSearch + + +@pytest.fixture +def pr_search_cls(es_version: Tuple[int, ...]) -> Type[FacetedSearch]: + interval_type = "calendar_interval" if es_version >= (7, 2) else "interval" + + class PRSearch(FacetedSearch): + index = "test-prs" + doc_types = [PullRequest] + facets = { + "comments": NestedFacet( + "comments", + DateHistogramFacet( + field="comments.created_at", **{interval_type: "month"} + ), + ) + } + + return PRSearch + + +@pytest.mark.sync +def test_facet_with_custom_metric(data_client: Elasticsearch) -> None: + ms = MetricSearch() + r = ms.execute() + + dates = [f[1] for f in r.facets.files] + assert dates == list(sorted(dates, reverse=True)) + assert dates[0] == 1399038439000 + + +@pytest.mark.sync +def test_nested_facet( + pull_request: PullRequest, pr_search_cls: Type[FacetedSearch] +) -> None: + prs = pr_search_cls() + r = prs.execute() + + assert r.hits.total.value == 1 # type: ignore[attr-defined] + assert [(datetime(2018, 1, 1, 0, 0), 1, False)] == r.facets.comments + + +@pytest.mark.sync +def test_nested_facet_with_filter( + pull_request: PullRequest, pr_search_cls: Type[FacetedSearch] +) -> None: + prs = pr_search_cls(filters={"comments": datetime(2018, 1, 1, 0, 0)}) + r = prs.execute() + + assert r.hits.total.value == 1 # type: ignore[attr-defined] + assert [(datetime(2018, 1, 1, 0, 0), 1, True)] == r.facets.comments + + prs = pr_search_cls(filters={"comments": datetime(2018, 2, 1, 0, 0)}) + r = prs.execute() + assert not r.hits + + +@pytest.mark.sync +def test_datehistogram_facet( + data_client: Elasticsearch, repo_search_cls: Type[FacetedSearch] +) -> None: + rs = repo_search_cls() + r = rs.execute() + + assert r.hits.total.value == 1 # type: ignore[attr-defined] + assert [(datetime(2014, 3, 1, 0, 0), 1, False)] == r.facets.created + + +@pytest.mark.sync +def test_boolean_facet( + data_client: Elasticsearch, repo_search_cls: Type[FacetedSearch] +) -> None: + rs = repo_search_cls() + r = rs.execute() + + assert r.hits.total.value == 1 # type: ignore[attr-defined] + assert [(True, 1, False)] == r.facets.public + value, count, selected = r.facets.public[0] + assert value is True + + +@pytest.mark.sync +def test_empty_search_finds_everything( + data_client: Elasticsearch, + es_version: Tuple[int, ...], + commit_search_cls: Type[FacetedSearch], +) -> None: + cs = commit_search_cls() + r = cs.execute() + + assert r.hits.total.value == 52 # type: ignore[attr-defined] + assert [ + ("elasticsearch_dsl", 40, False), + ("test_elasticsearch_dsl", 35, False), + ("elasticsearch_dsl/query.py", 19, False), + ("test_elasticsearch_dsl/test_search.py", 15, False), + ("elasticsearch_dsl/utils.py", 14, False), + ("test_elasticsearch_dsl/test_query.py", 13, False), + ("elasticsearch_dsl/search.py", 12, False), + ("elasticsearch_dsl/aggs.py", 11, False), + ("test_elasticsearch_dsl/test_result.py", 5, False), + ("elasticsearch_dsl/result.py", 3, False), + ] == r.facets.files + + assert [ + (datetime(2014, 3, 3, 0, 0), 2, False), + (datetime(2014, 3, 4, 0, 0), 1, False), + (datetime(2014, 3, 5, 0, 0), 3, False), + (datetime(2014, 3, 6, 0, 0), 3, False), + (datetime(2014, 3, 7, 0, 0), 9, False), + (datetime(2014, 3, 10, 0, 0), 2, False), + (datetime(2014, 3, 15, 0, 0), 4, False), + (datetime(2014, 3, 21, 0, 0), 2, False), + (datetime(2014, 3, 23, 0, 0), 2, False), + (datetime(2014, 3, 24, 0, 0), 10, False), + (datetime(2014, 4, 20, 0, 0), 2, False), + (datetime(2014, 4, 22, 0, 0), 2, False), + (datetime(2014, 4, 25, 0, 0), 3, False), + (datetime(2014, 4, 26, 0, 0), 2, False), + (datetime(2014, 4, 27, 0, 0), 2, False), + (datetime(2014, 5, 1, 0, 0), 2, False), + (datetime(2014, 5, 2, 0, 0), 1, False), + ] == r.facets.frequency + + assert [ + ("ok", 19, False), + ("good", 14, False), + ("better", 19, False), + ] == r.facets.deletions + + +@pytest.mark.sync +def test_term_filters_are_shown_as_selected_and_data_is_filtered( + data_client: Elasticsearch, commit_search_cls: Type[FacetedSearch] +) -> None: + cs = commit_search_cls(filters={"files": "test_elasticsearch_dsl"}) + + r = cs.execute() + + assert 35 == r.hits.total.value # type: ignore[attr-defined] + assert [ + ("elasticsearch_dsl", 40, False), + ("test_elasticsearch_dsl", 35, True), # selected + ("elasticsearch_dsl/query.py", 19, False), + ("test_elasticsearch_dsl/test_search.py", 15, False), + ("elasticsearch_dsl/utils.py", 14, False), + ("test_elasticsearch_dsl/test_query.py", 13, False), + ("elasticsearch_dsl/search.py", 12, False), + ("elasticsearch_dsl/aggs.py", 11, False), + ("test_elasticsearch_dsl/test_result.py", 5, False), + ("elasticsearch_dsl/result.py", 3, False), + ] == r.facets.files + + assert [ + (datetime(2014, 3, 3, 0, 0), 1, False), + (datetime(2014, 3, 5, 0, 0), 2, False), + (datetime(2014, 3, 6, 0, 0), 3, False), + (datetime(2014, 3, 7, 0, 0), 6, False), + (datetime(2014, 3, 10, 0, 0), 1, False), + (datetime(2014, 3, 15, 0, 0), 3, False), + (datetime(2014, 3, 21, 0, 0), 2, False), + (datetime(2014, 3, 23, 0, 0), 1, False), + (datetime(2014, 3, 24, 0, 0), 7, False), + (datetime(2014, 4, 20, 0, 0), 1, False), + (datetime(2014, 4, 25, 0, 0), 3, False), + (datetime(2014, 4, 26, 0, 0), 2, False), + (datetime(2014, 4, 27, 0, 0), 1, False), + (datetime(2014, 5, 1, 0, 0), 1, False), + (datetime(2014, 5, 2, 0, 0), 1, False), + ] == r.facets.frequency + + assert [ + ("ok", 12, False), + ("good", 10, False), + ("better", 13, False), + ] == r.facets.deletions + + +@pytest.mark.sync +def test_range_filters_are_shown_as_selected_and_data_is_filtered( + data_client: Elasticsearch, commit_search_cls: Type[FacetedSearch] +) -> None: + cs = commit_search_cls(filters={"deletions": "better"}) + + r = cs.execute() + + assert 19 == r.hits.total.value # type: ignore[attr-defined] + + +@pytest.mark.sync +def test_pagination( + data_client: Elasticsearch, commit_search_cls: Type[FacetedSearch] +) -> None: + cs = commit_search_cls() + cs = cs[0:20] + + assert 52 == cs.count() + assert 20 == len(cs.execute()) diff --git a/test_elasticsearch/test_dsl/test_integration/_sync/test_index.py b/test_elasticsearch/test_dsl/test_integration/_sync/test_index.py new file mode 100644 index 000000000..51a7dc40e --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/_sync/test_index.py @@ -0,0 +1,160 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest + +from elasticsearch import Elasticsearch +from elasticsearch.dsl import ( + ComposableIndexTemplate, + Date, + Document, + Index, + IndexTemplate, + Text, + analysis, +) + + +class Post(Document): + title = Text(analyzer=analysis.analyzer("my_analyzer", tokenizer="keyword")) + published_from = Date() + + +@pytest.mark.sync +def test_index_template_works(write_client: Elasticsearch) -> None: + it = IndexTemplate("test-template", "test-legacy-*") + it.document(Post) + it.settings(number_of_replicas=0, number_of_shards=1) + it.save() + + i = Index("test-legacy-blog") + i.create() + + assert { + "test-legacy-blog": { + "mappings": { + "properties": { + "title": {"type": "text", "analyzer": "my_analyzer"}, + "published_from": {"type": "date"}, + } + } + } + } == write_client.indices.get_mapping(index="test-legacy-blog") + + +@pytest.mark.sync +def test_composable_index_template_works( + write_client: Elasticsearch, +) -> None: + it = ComposableIndexTemplate("test-template", "test-*") + it.document(Post) + it.settings(number_of_replicas=0, number_of_shards=1) + it.save() + + i = Index("test-blog") + i.create() + + assert { + "test-blog": { + "mappings": { + "properties": { + "title": {"type": "text", "analyzer": "my_analyzer"}, + "published_from": {"type": "date"}, + } + } + } + } == write_client.indices.get_mapping(index="test-blog") + + +@pytest.mark.sync +def test_index_can_be_saved_even_with_settings( + write_client: Elasticsearch, +) -> None: + i = Index("test-blog", using=write_client) + i.settings(number_of_shards=3, number_of_replicas=0) + i.save() + i.settings(number_of_replicas=1) + i.save() + + assert ( + "1" + == (i.get_settings())["test-blog"]["settings"]["index"]["number_of_replicas"] + ) + + +@pytest.mark.sync +def test_index_exists(data_client: Elasticsearch) -> None: + assert Index("git").exists() + assert not Index("not-there").exists() + + +@pytest.mark.sync +def test_index_can_be_created_with_settings_and_mappings( + write_client: Elasticsearch, +) -> None: + i = Index("test-blog", using=write_client) + i.document(Post) + i.settings(number_of_replicas=0, number_of_shards=1) + i.create() + + assert { + "test-blog": { + "mappings": { + "properties": { + "title": {"type": "text", "analyzer": "my_analyzer"}, + "published_from": {"type": "date"}, + } + } + } + } == write_client.indices.get_mapping(index="test-blog") + + settings = write_client.indices.get_settings(index="test-blog") + assert settings["test-blog"]["settings"]["index"]["number_of_replicas"] == "0" + assert settings["test-blog"]["settings"]["index"]["number_of_shards"] == "1" + assert settings["test-blog"]["settings"]["index"]["analysis"] == { + "analyzer": {"my_analyzer": {"type": "custom", "tokenizer": "keyword"}} + } + + +@pytest.mark.sync +def test_delete(write_client: Elasticsearch) -> None: + write_client.indices.create( + index="test-index", + body={"settings": {"number_of_replicas": 0, "number_of_shards": 1}}, + ) + + i = Index("test-index", using=write_client) + i.delete() + assert not write_client.indices.exists(index="test-index") + + +@pytest.mark.sync +def test_multiple_indices_with_same_doc_type_work( + write_client: Elasticsearch, +) -> None: + i1 = Index("test-index-1", using=write_client) + i2 = Index("test-index-2", using=write_client) + + for i in (i1, i2): + i.document(Post) + i.create() + + for j in ("test-index-1", "test-index-2"): + settings = write_client.indices.get_settings(index=j) + assert settings[j]["settings"]["index"]["analysis"] == { + "analyzer": {"my_analyzer": {"type": "custom", "tokenizer": "keyword"}} + } diff --git a/test_elasticsearch/test_dsl/test_integration/_sync/test_mapping.py b/test_elasticsearch/test_dsl/test_integration/_sync/test_mapping.py new file mode 100644 index 000000000..3ce1737db --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/_sync/test_mapping.py @@ -0,0 +1,169 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest +from pytest import raises + +from elasticsearch import Elasticsearch +from elasticsearch.dsl import Mapping, analysis, exceptions + + +@pytest.mark.sync +def test_mapping_saved_into_es(write_client: Elasticsearch) -> None: + m = Mapping() + m.field( + "name", "text", analyzer=analysis.analyzer("my_analyzer", tokenizer="keyword") + ) + m.field("tags", "keyword") + m.save("test-mapping", using=write_client) + + assert { + "test-mapping": { + "mappings": { + "properties": { + "name": {"type": "text", "analyzer": "my_analyzer"}, + "tags": {"type": "keyword"}, + } + } + } + } == write_client.indices.get_mapping(index="test-mapping") + + +@pytest.mark.sync +def test_mapping_saved_into_es_when_index_already_exists_closed( + write_client: Elasticsearch, +) -> None: + m = Mapping() + m.field( + "name", "text", analyzer=analysis.analyzer("my_analyzer", tokenizer="keyword") + ) + write_client.indices.create(index="test-mapping") + + with raises(exceptions.IllegalOperation): + m.save("test-mapping", using=write_client) + + write_client.cluster.health(index="test-mapping", wait_for_status="yellow") + write_client.indices.close(index="test-mapping") + m.save("test-mapping", using=write_client) + + assert { + "test-mapping": { + "mappings": { + "properties": {"name": {"type": "text", "analyzer": "my_analyzer"}} + } + } + } == write_client.indices.get_mapping(index="test-mapping") + + +@pytest.mark.sync +def test_mapping_saved_into_es_when_index_already_exists_with_analysis( + write_client: Elasticsearch, +) -> None: + m = Mapping() + analyzer = analysis.analyzer("my_analyzer", tokenizer="keyword") + m.field("name", "text", analyzer=analyzer) + + new_analysis = analyzer.get_analysis_definition() + new_analysis["analyzer"]["other_analyzer"] = { + "type": "custom", + "tokenizer": "whitespace", + } + write_client.indices.create( + index="test-mapping", body={"settings": {"analysis": new_analysis}} + ) + + m.field("title", "text", analyzer=analyzer) + m.save("test-mapping", using=write_client) + + assert { + "test-mapping": { + "mappings": { + "properties": { + "name": {"type": "text", "analyzer": "my_analyzer"}, + "title": {"type": "text", "analyzer": "my_analyzer"}, + } + } + } + } == write_client.indices.get_mapping(index="test-mapping") + + +@pytest.mark.sync +def test_mapping_gets_updated_from_es( + write_client: Elasticsearch, +) -> None: + write_client.indices.create( + index="test-mapping", + body={ + "settings": {"number_of_shards": 1, "number_of_replicas": 0}, + "mappings": { + "date_detection": False, + "properties": { + "title": { + "type": "text", + "analyzer": "snowball", + "fields": {"raw": {"type": "keyword"}}, + }, + "created_at": {"type": "date"}, + "comments": { + "type": "nested", + "properties": { + "created": {"type": "date"}, + "author": { + "type": "text", + "analyzer": "snowball", + "fields": {"raw": {"type": "keyword"}}, + }, + }, + }, + }, + }, + }, + ) + + m = Mapping.from_es("test-mapping", using=write_client) + + assert ["comments", "created_at", "title"] == list( + sorted(m.properties.properties._d_.keys()) # type: ignore[attr-defined] + ) + assert { + "date_detection": False, + "properties": { + "comments": { + "type": "nested", + "properties": { + "created": {"type": "date"}, + "author": { + "analyzer": "snowball", + "fields": {"raw": {"type": "keyword"}}, + "type": "text", + }, + }, + }, + "created_at": {"type": "date"}, + "title": { + "analyzer": "snowball", + "fields": {"raw": {"type": "keyword"}}, + "type": "text", + }, + }, + } == m.to_dict() + + # test same with alias + write_client.indices.put_alias(index="test-mapping", name="test-alias") + + m2 = Mapping.from_es("test-alias", using=write_client) + assert m2.to_dict() == m.to_dict() diff --git a/test_elasticsearch/test_dsl/test_integration/_sync/test_search.py b/test_elasticsearch/test_dsl/test_integration/_sync/test_search.py new file mode 100644 index 000000000..54060d311 --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/_sync/test_search.py @@ -0,0 +1,294 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +import pytest +from pytest import raises + +from elasticsearch import ApiError, Elasticsearch +from elasticsearch.dsl import Date, Document, Keyword, MultiSearch, Q, Search, Text +from elasticsearch.dsl.response import aggs + +from ..test_data import FLAT_DATA + + +class Repository(Document): + created_at = Date() + description = Text(analyzer="snowball") + tags = Keyword() + + @classmethod + def search(cls) -> Search["Repository"]: # type: ignore[override] + return super().search().filter("term", commit_repo="repo") + + class Index: + name = "git" + + +class Commit(Document): + class Index: + name = "flat-git" + + +@pytest.mark.sync +def test_filters_aggregation_buckets_are_accessible( + data_client: Elasticsearch, +) -> None: + has_tests_query = Q("term", files="test_elasticsearch_dsl") + s = Commit.search()[0:0] + s.aggs.bucket("top_authors", "terms", field="author.name.raw").bucket( + "has_tests", "filters", filters={"yes": has_tests_query, "no": ~has_tests_query} + ).metric("lines", "stats", field="stats.lines") + + response = s.execute() + + assert isinstance( + response.aggregations.top_authors.buckets[0].has_tests.buckets.yes, aggs.Bucket + ) + assert ( + 35 + == response.aggregations.top_authors.buckets[0].has_tests.buckets.yes.doc_count + ) + assert ( + 228 + == response.aggregations.top_authors.buckets[0].has_tests.buckets.yes.lines.max + ) + + +@pytest.mark.sync +def test_top_hits_are_wrapped_in_response( + data_client: Elasticsearch, +) -> None: + s = Commit.search()[0:0] + s.aggs.bucket("top_authors", "terms", field="author.name.raw").metric( + "top_commits", "top_hits", size=5 + ) + response = s.execute() + + top_commits = response.aggregations.top_authors.buckets[0].top_commits + assert isinstance(top_commits, aggs.TopHitsData) + assert 5 == len(top_commits) + + hits = [h for h in top_commits] + assert 5 == len(hits) + assert isinstance(hits[0], Commit) + + +@pytest.mark.sync +def test_inner_hits_are_wrapped_in_response( + data_client: Elasticsearch, +) -> None: + s = Search(index="git")[0:1].query( + "has_parent", parent_type="repo", inner_hits={}, query=Q("match_all") + ) + response = s.execute() + + commit = response.hits[0] + assert isinstance(commit.meta.inner_hits.repo, response.__class__) + assert repr(commit.meta.inner_hits.repo[0]).startswith( + " None: + s = Search(index="git")[0:1].query( + "has_parent", parent_type="repo", inner_hits={}, query=Q("match_all") + ) + response = s.execute() + d = response.to_dict(recursive=True) + assert isinstance(d, dict) + assert isinstance(d["hits"]["hits"][0]["inner_hits"]["repo"], dict) + + # iterating over the results changes the format of the internal AttrDict + for hit in response: + pass + + d = response.to_dict(recursive=True) + assert isinstance(d, dict) + assert isinstance(d["hits"]["hits"][0]["inner_hits"]["repo"], dict) + + +@pytest.mark.sync +def test_scan_respects_doc_types(data_client: Elasticsearch) -> None: + repos = [repo for repo in Repository.search().scan()] + + assert 1 == len(repos) + assert isinstance(repos[0], Repository) + assert repos[0].organization == "elasticsearch" + + +@pytest.mark.sync +def test_scan_iterates_through_all_docs( + data_client: Elasticsearch, +) -> None: + s = Search(index="flat-git") + + commits = [commit for commit in s.scan()] + + assert 52 == len(commits) + assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits} + + +@pytest.mark.sync +def test_search_after(data_client: Elasticsearch) -> None: + page_size = 7 + s = Search(index="flat-git")[:page_size].sort("authored_date") + commits = [] + while True: + r = s.execute() + commits += r.hits + if len(r.hits) < page_size: + break + s = s.search_after() + + assert 52 == len(commits) + assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits} + + +@pytest.mark.sync +def test_search_after_no_search(data_client: Elasticsearch) -> None: + s = Search(index="flat-git") + with raises( + ValueError, match="A search must be executed before using search_after" + ): + s.search_after() + s.count() + with raises( + ValueError, match="A search must be executed before using search_after" + ): + s.search_after() + + +@pytest.mark.sync +def test_search_after_no_sort(data_client: Elasticsearch) -> None: + s = Search(index="flat-git") + r = s.execute() + with raises( + ValueError, match="Cannot use search_after when results are not sorted" + ): + r.search_after() + + +@pytest.mark.sync +def test_search_after_no_results(data_client: Elasticsearch) -> None: + s = Search(index="flat-git")[:100].sort("authored_date") + r = s.execute() + assert 52 == len(r.hits) + s = s.search_after() + r = s.execute() + assert 0 == len(r.hits) + with raises( + ValueError, match="Cannot use search_after when there are no search results" + ): + r.search_after() + + +@pytest.mark.sync +def test_point_in_time(data_client: Elasticsearch) -> None: + page_size = 7 + commits = [] + with Search(index="flat-git")[:page_size].point_in_time(keep_alive="30s") as s: + pit_id = s._extra["pit"]["id"] + while True: + r = s.execute() + commits += r.hits + if len(r.hits) < page_size: + break + s = s.search_after() + assert pit_id == s._extra["pit"]["id"] + assert "30s" == s._extra["pit"]["keep_alive"] + + assert 52 == len(commits) + assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits} + + +@pytest.mark.sync +def test_iterate(data_client: Elasticsearch) -> None: + s = Search(index="flat-git") + + commits = [commit for commit in s.iterate()] + + assert 52 == len(commits) + assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits} + + +@pytest.mark.sync +def test_response_is_cached(data_client: Elasticsearch) -> None: + s = Repository.search() + repos = [repo for repo in s] + + assert hasattr(s, "_response") + assert s._response.hits == repos + + +@pytest.mark.sync +def test_multi_search(data_client: Elasticsearch) -> None: + s1 = Repository.search() + s2 = Search[Repository](index="flat-git") + + ms = MultiSearch[Repository]() + ms = ms.add(s1).add(s2) + + r1, r2 = ms.execute() + + assert 1 == len(r1) + assert isinstance(r1[0], Repository) + assert r1._search is s1 + + assert 52 == r2.hits.total.value # type: ignore[attr-defined] + assert r2._search is s2 + + +@pytest.mark.sync +def test_multi_missing(data_client: Elasticsearch) -> None: + s1 = Repository.search() + s2 = Search[Repository](index="flat-git") + s3 = Search[Repository](index="does_not_exist") + + ms = MultiSearch[Repository]() + ms = ms.add(s1).add(s2).add(s3) + + with raises(ApiError): + ms.execute() + + r1, r2, r3 = ms.execute(raise_on_error=False) + + assert 1 == len(r1) + assert isinstance(r1[0], Repository) + assert r1._search is s1 + + assert 52 == r2.hits.total.value # type: ignore[attr-defined] + assert r2._search is s2 + + assert r3 is None + + +@pytest.mark.sync +def test_raw_subfield_can_be_used_in_aggs( + data_client: Elasticsearch, +) -> None: + s = Search(index="git")[0:0] + s.aggs.bucket("authors", "terms", field="author.name.raw", size=1) + + r = s.execute() + + authors = r.aggregations.authors + assert 1 == len(authors) + assert {"key": "Honza Král", "doc_count": 52} == authors[0] diff --git a/test_elasticsearch/test_dsl/test_integration/_sync/test_update_by_query.py b/test_elasticsearch/test_dsl/test_integration/_sync/test_update_by_query.py new file mode 100644 index 000000000..e6c870a5c --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/_sync/test_update_by_query.py @@ -0,0 +1,85 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest + +from elasticsearch import Elasticsearch +from elasticsearch.dsl import UpdateByQuery +from elasticsearch.dsl.search import Q + + +@pytest.mark.sync +def test_update_by_query_no_script( + write_client: Elasticsearch, setup_ubq_tests: str +) -> None: + index = setup_ubq_tests + + ubq = ( + UpdateByQuery(using=write_client) + .index(index) + .filter(~Q("exists", field="is_public")) + ) + response = ubq.execute() + + assert response.total == 52 + assert response["took"] > 0 + assert not response.timed_out + assert response.updated == 52 + assert response.deleted == 0 + assert response.took > 0 + assert response.success() + + +@pytest.mark.sync +def test_update_by_query_with_script( + write_client: Elasticsearch, setup_ubq_tests: str +) -> None: + index = setup_ubq_tests + + ubq = ( + UpdateByQuery(using=write_client) + .index(index) + .filter(~Q("exists", field="parent_shas")) + .script(source="ctx._source.is_public = false") + ) + ubq = ubq.params(conflicts="proceed") + + response = ubq.execute() + assert response.total == 2 + assert response.updated == 2 + assert response.version_conflicts == 0 + + +@pytest.mark.sync +def test_delete_by_query_with_script( + write_client: Elasticsearch, setup_ubq_tests: str +) -> None: + index = setup_ubq_tests + + ubq = ( + UpdateByQuery(using=write_client) + .index(index) + .filter(Q("match", parent_shas="1dd19210b5be92b960f7db6f66ae526288edccc3")) + .script(source='ctx.op = "delete"') + ) + ubq = ubq.params(conflicts="proceed") + + response = ubq.execute() + + assert response.total == 1 + assert response.deleted == 1 + assert response.success() diff --git a/test_elasticsearch/test_dsl/test_integration/test_count.py b/test_elasticsearch/test_dsl/test_integration/test_count.py new file mode 100644 index 000000000..583a09dd2 --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/test_count.py @@ -0,0 +1,45 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import Any + +from elasticsearch import Elasticsearch +from elasticsearch.dsl.search import Q, Search + + +def test_count_all(data_client: Elasticsearch) -> None: + s = Search(using=data_client).index("git") + assert 53 == s.count() + + +def test_count_prefetch(data_client: Elasticsearch, mocker: Any) -> None: + mocker.spy(data_client, "count") + + search = Search(using=data_client).index("git") + search.execute() + assert search.count() == 53 + assert data_client.count.call_count == 0 # type: ignore[attr-defined] + + search._response.hits.total.relation = "gte" # type: ignore[attr-defined] + assert search.count() == 53 + assert data_client.count.call_count == 1 # type: ignore[attr-defined] + + +def test_count_filter(data_client: Elasticsearch) -> None: + s = Search(using=data_client).index("git").filter(~Q("exists", field="parent_shas")) + # initial commit + repo document + assert 2 == s.count() diff --git a/test_elasticsearch/test_dsl/test_integration/test_data.py b/test_elasticsearch/test_dsl/test_integration/test_data.py new file mode 100644 index 000000000..1e80896ab --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/test_data.py @@ -0,0 +1,1093 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import Any, Dict + +from elasticsearch import Elasticsearch + +user_mapping = { + "properties": {"name": {"type": "text", "fields": {"raw": {"type": "keyword"}}}} +} + +FLAT_GIT_INDEX: Dict[str, Any] = { + "settings": { + # custom analyzer for analyzing file paths + "analysis": { + "analyzer": { + "file_path": { + "type": "custom", + "tokenizer": "path_hierarchy", + "filter": ["lowercase"], + } + } + }, + }, + "mappings": { + "properties": { + "description": {"type": "text", "analyzer": "snowball"}, + "author": user_mapping, + "authored_date": {"type": "date"}, + "committer": user_mapping, + "committed_date": {"type": "date"}, + "parent_shas": {"type": "keyword"}, + "files": { + "type": "text", + "analyzer": "file_path", + "fielddata": True, + }, + } + }, +} + +GIT_INDEX: Dict[str, Any] = { + "settings": { + # custom analyzer for analyzing file paths + "analysis": { + "analyzer": { + "file_path": { + "type": "custom", + "tokenizer": "path_hierarchy", + "filter": ["lowercase"], + } + } + }, + }, + "mappings": { + "properties": { + # common fields + "description": {"type": "text", "analyzer": "snowball"}, + "commit_repo": {"type": "join", "relations": {"repo": "commit"}}, + # COMMIT mappings + "author": user_mapping, + "authored_date": {"type": "date"}, + "committer": user_mapping, + "committed_date": {"type": "date"}, + "parent_shas": {"type": "keyword"}, + "files": { + "type": "text", + "analyzer": "file_path", + "fielddata": True, + }, + # REPO mappings + "is_public": {"type": "boolean"}, + "owner": user_mapping, + "created_at": {"type": "date"}, + "tags": {"type": "keyword"}, + } + }, +} + + +def create_flat_git_index(client: Elasticsearch, index: str) -> None: + client.indices.create(index=index, body=FLAT_GIT_INDEX) + + +def create_git_index(client: Elasticsearch, index: str) -> None: + client.indices.create(index=index, body=GIT_INDEX) + + +DATA = [ + # repository + { + "_id": "elasticsearch-dsl-py", + "_source": { + "commit_repo": "repo", + "organization": "elasticsearch", + "created_at": "2014-03-03", + "owner": {"name": "elasticsearch"}, + "is_public": True, + }, + "_index": "git", + }, + # documents + { + "_id": "3ca6e1e73a071a705b4babd2f581c91a2a3e5037", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/aggs.py", + "elasticsearch_dsl/search.py", + "test_elasticsearch_dsl/test_aggs.py", + "test_elasticsearch_dsl/test_search.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 7, "insertions": 23, "lines": 30, "files": 4}, + "description": "Make sure buckets aren't modified in-place", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["eb3e543323f189fd7b698e66295427204fff5755"], + "committed_date": "2014-05-02T13:47:19", + "authored_date": "2014-05-02T13:47:19.123+02:00", + }, + "_index": "git", + }, + { + "_id": "eb3e543323f189fd7b698e66295427204fff5755", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": ["elasticsearch_dsl/search.py"], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 0, "insertions": 18, "lines": 18, "files": 1}, + "description": "Add communication with ES server", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["dd15b6ba17dd9ba16363a51f85b31f66f1fb1157"], + "committed_date": "2014-05-01T13:32:14", + "authored_date": "2014-05-01T13:32:14", + }, + "_index": "git", + }, + { + "_id": "dd15b6ba17dd9ba16363a51f85b31f66f1fb1157", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/utils.py", + "test_elasticsearch_dsl/test_result.py", + "elasticsearch_dsl/result.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 18, "insertions": 44, "lines": 62, "files": 3}, + "description": "Minor cleanup and adding helpers for interactive python", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["ed19caf25abd25300e707fadf3f81b05c5673446"], + "committed_date": "2014-05-01T13:30:44", + "authored_date": "2014-05-01T13:30:44", + }, + "_index": "git", + }, + { + "_id": "ed19caf25abd25300e707fadf3f81b05c5673446", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/aggs.py", + "elasticsearch_dsl/search.py", + "test_elasticsearch_dsl/test_search.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 0, "insertions": 28, "lines": 28, "files": 3}, + "description": "Make sure aggs do copy-on-write", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["583e52c71e9a72c1b291ec5843683d8fa8f1ce2d"], + "committed_date": "2014-04-27T16:28:09", + "authored_date": "2014-04-27T16:28:09", + }, + "_index": "git", + }, + { + "_id": "583e52c71e9a72c1b291ec5843683d8fa8f1ce2d", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": ["elasticsearch_dsl/aggs.py"], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 1, "insertions": 1, "lines": 2, "files": 1}, + "description": "Use __setitem__ from DslBase in AggsBase", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["1dd19210b5be92b960f7db6f66ae526288edccc3"], + "committed_date": "2014-04-27T15:51:53", + "authored_date": "2014-04-27T15:51:53", + }, + "_index": "git", + }, + { + "_id": "1dd19210b5be92b960f7db6f66ae526288edccc3", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/aggs.py", + "elasticsearch_dsl/query.py", + "test_elasticsearch_dsl/test_search.py", + "elasticsearch_dsl/search.py", + "elasticsearch_dsl/filter.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 21, "insertions": 98, "lines": 119, "files": 5}, + "description": "Have Search clone itself on any change besides aggs", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["b4c9e29376af2e42a4e6dc153f0f293b1a18bac3"], + "committed_date": "2014-04-26T14:49:43", + "authored_date": "2014-04-26T14:49:43", + }, + "_index": "git", + }, + { + "_id": "b4c9e29376af2e42a4e6dc153f0f293b1a18bac3", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": ["test_elasticsearch_dsl/test_result.py"], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 0, "insertions": 5, "lines": 5, "files": 1}, + "description": "Add tests for [] on response", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["a64a54181b232bb5943bd16960be9416e402f5f5"], + "committed_date": "2014-04-26T13:56:52", + "authored_date": "2014-04-26T13:56:52", + }, + "_index": "git", + }, + { + "_id": "a64a54181b232bb5943bd16960be9416e402f5f5", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": ["test_elasticsearch_dsl/test_result.py"], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 1, "insertions": 7, "lines": 8, "files": 1}, + "description": "Test access to missing fields raises appropriate exceptions", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["df3f778a3d37b170bde6979a4ef2d9e3e6400778"], + "committed_date": "2014-04-25T16:01:07", + "authored_date": "2014-04-25T16:01:07", + }, + "_index": "git", + }, + { + "_id": "df3f778a3d37b170bde6979a4ef2d9e3e6400778", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/utils.py", + "test_elasticsearch_dsl/test_result.py", + "elasticsearch_dsl/result.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 8, "insertions": 31, "lines": 39, "files": 3}, + "description": "Support attribute access even for inner/nested objects", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["7e599e116b5ff5d271ce3fe1ebc80e82ab3d5925"], + "committed_date": "2014-04-25T15:59:02", + "authored_date": "2014-04-25T15:59:02", + }, + "_index": "git", + }, + { + "_id": "7e599e116b5ff5d271ce3fe1ebc80e82ab3d5925", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "test_elasticsearch_dsl/test_result.py", + "elasticsearch_dsl/result.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 0, "insertions": 149, "lines": 149, "files": 2}, + "description": "Added a prototype of a Respose and Result classes", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["e2882d28cb8077eaa3e5d8ae76543482d4d90f7e"], + "committed_date": "2014-04-25T15:12:15", + "authored_date": "2014-04-25T15:12:15", + }, + "_index": "git", + }, + { + "_id": "e2882d28cb8077eaa3e5d8ae76543482d4d90f7e", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": ["docs/index.rst"], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 0, "insertions": 6, "lines": 6, "files": 1}, + "description": "add warning to the docs", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["51f94d83d1c47d3b81207736ca97a1ec6302678f"], + "committed_date": "2014-04-22T19:16:21", + "authored_date": "2014-04-22T19:16:21", + }, + "_index": "git", + }, + { + "_id": "51f94d83d1c47d3b81207736ca97a1ec6302678f", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": ["elasticsearch_dsl/utils.py"], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 3, "insertions": 29, "lines": 32, "files": 1}, + "description": "Add some comments to the code", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["0950f6c600b49e2bf012d03b02250fb71c848555"], + "committed_date": "2014-04-22T19:12:06", + "authored_date": "2014-04-22T19:12:06", + }, + "_index": "git", + }, + { + "_id": "0950f6c600b49e2bf012d03b02250fb71c848555", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": ["README.rst"], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 0, "insertions": 6, "lines": 6, "files": 1}, + "description": "Added a WIP warning", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["54d058f5ac6be8225ef61d5529772aada42ec6c8"], + "committed_date": "2014-04-20T00:19:25", + "authored_date": "2014-04-20T00:19:25", + }, + "_index": "git", + }, + { + "_id": "54d058f5ac6be8225ef61d5529772aada42ec6c8", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/__init__.py", + "elasticsearch_dsl/search.py", + "test_elasticsearch_dsl/test_search.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 36, "insertions": 7, "lines": 43, "files": 3}, + "description": "Remove the operator kwarg from .query", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["4cb07845e45787abc1f850c0b561e487e0034424"], + "committed_date": "2014-04-20T00:17:25", + "authored_date": "2014-04-20T00:17:25", + }, + "_index": "git", + }, + { + "_id": "4cb07845e45787abc1f850c0b561e487e0034424", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/aggs.py", + "test_elasticsearch_dsl/test_search.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 35, "insertions": 49, "lines": 84, "files": 2}, + "description": "Complex example", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["578abe80f76aafd7e81fe46a44403e601733a938"], + "committed_date": "2014-03-24T20:48:45", + "authored_date": "2014-03-24T20:48:45", + }, + "_index": "git", + }, + { + "_id": "578abe80f76aafd7e81fe46a44403e601733a938", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": ["test_elasticsearch_dsl/test_search.py"], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 2, "insertions": 0, "lines": 2, "files": 1}, + "description": "removing extra whitespace", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["ecb84f03565940c7d294dbc80723420dcfbab340"], + "committed_date": "2014-03-24T20:42:23", + "authored_date": "2014-03-24T20:42:23", + }, + "_index": "git", + }, + { + "_id": "ecb84f03565940c7d294dbc80723420dcfbab340", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": ["test_elasticsearch_dsl/test_search.py"], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 1, "insertions": 3, "lines": 4, "files": 1}, + "description": "Make sure attribute access works for .query on Search", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["9a247c876ab66e2bca56b25f392d054e613b1b2a"], + "committed_date": "2014-03-24T20:35:02", + "authored_date": "2014-03-24T20:34:46", + }, + "_index": "git", + }, + { + "_id": "9a247c876ab66e2bca56b25f392d054e613b1b2a", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": ["elasticsearch_dsl/search.py"], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 0, "insertions": 2, "lines": 2, "files": 1}, + "description": "Make sure .index and .doc_type methods are chainable", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["cee5e46947d510a49edd3609ff91aab7b1f3ac89"], + "committed_date": "2014-03-24T20:27:46", + "authored_date": "2014-03-24T20:27:46", + }, + "_index": "git", + }, + { + "_id": "cee5e46947d510a49edd3609ff91aab7b1f3ac89", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/search.py", + "test_elasticsearch_dsl/test_search.py", + "elasticsearch_dsl/filter.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 13, "insertions": 128, "lines": 141, "files": 3}, + "description": "Added .filter and .post_filter to Search", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["1d6857182b09a556d58c6bc5bdcb243092812ba3"], + "committed_date": "2014-03-24T20:26:57", + "authored_date": "2014-03-24T20:26:57", + }, + "_index": "git", + }, + { + "_id": "1d6857182b09a556d58c6bc5bdcb243092812ba3", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": ["elasticsearch_dsl/utils.py", "elasticsearch_dsl/query.py"], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 24, "insertions": 29, "lines": 53, "files": 2}, + "description": "Extracted combination logic into DslBase", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["4ad92f15a1955846c01642318303a821e8435b75"], + "committed_date": "2014-03-24T20:03:51", + "authored_date": "2014-03-24T20:03:51", + }, + "_index": "git", + }, + { + "_id": "4ad92f15a1955846c01642318303a821e8435b75", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": ["elasticsearch_dsl/utils.py", "elasticsearch_dsl/query.py"], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 43, "insertions": 45, "lines": 88, "files": 2}, + "description": "Extracted bool-related logic to a mixin to be reused by filters", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["6eb39dc2825605543ac1ed0b45b9b6baeecc44c2"], + "committed_date": "2014-03-24T19:16:16", + "authored_date": "2014-03-24T19:16:16", + }, + "_index": "git", + }, + { + "_id": "6eb39dc2825605543ac1ed0b45b9b6baeecc44c2", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/search.py", + "test_elasticsearch_dsl/test_search.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 1, "insertions": 32, "lines": 33, "files": 2}, + "description": "Enable otheroperators when querying on Search object", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["be094c7b307332cb6039bf9a7c984d2c7593ddff"], + "committed_date": "2014-03-24T18:25:10", + "authored_date": "2014-03-24T18:25:10", + }, + "_index": "git", + }, + { + "_id": "be094c7b307332cb6039bf9a7c984d2c7593ddff", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/utils.py", + "elasticsearch_dsl/query.py", + "test_elasticsearch_dsl/test_query.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 23, "insertions": 35, "lines": 58, "files": 3}, + "description": "make sure query operations always return copies", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["b2576e3b6437e2cb9d8971fee4ead60df91fd75b"], + "committed_date": "2014-03-24T18:10:37", + "authored_date": "2014-03-24T18:03:13", + }, + "_index": "git", + }, + { + "_id": "b2576e3b6437e2cb9d8971fee4ead60df91fd75b", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/query.py", + "test_elasticsearch_dsl/test_query.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 1, "insertions": 53, "lines": 54, "files": 2}, + "description": "Adding or operator for queries", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["1be002170ac3cd59d2e97824b83b88bb3c9c60ed"], + "committed_date": "2014-03-24T17:53:38", + "authored_date": "2014-03-24T17:53:38", + }, + "_index": "git", + }, + { + "_id": "1be002170ac3cd59d2e97824b83b88bb3c9c60ed", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/query.py", + "test_elasticsearch_dsl/test_query.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 0, "insertions": 35, "lines": 35, "files": 2}, + "description": "Added inverting of queries", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["24e1e38b2f704f65440d96c290b7c6cd54c2e00e"], + "committed_date": "2014-03-23T17:44:36", + "authored_date": "2014-03-23T17:44:36", + }, + "_index": "git", + }, + { + "_id": "24e1e38b2f704f65440d96c290b7c6cd54c2e00e", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": ["elasticsearch_dsl/aggs.py", "elasticsearch_dsl/utils.py"], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 5, "insertions": 1, "lines": 6, "files": 2}, + "description": "Change equality checks to use .to_dict()", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["277cfaedbaf3705ed74ad6296227e1172c97a63f"], + "committed_date": "2014-03-23T17:43:01", + "authored_date": "2014-03-23T17:43:01", + }, + "_index": "git", + }, + { + "_id": "277cfaedbaf3705ed74ad6296227e1172c97a63f", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/query.py", + "test_elasticsearch_dsl/test_query.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 1, "insertions": 11, "lines": 12, "files": 2}, + "description": "Test combining of bool queries", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["6aa3868a6a9f35f71553ce96f9d3d63c74d054fd"], + "committed_date": "2014-03-21T15:15:06", + "authored_date": "2014-03-21T15:15:06", + }, + "_index": "git", + }, + { + "_id": "6aa3868a6a9f35f71553ce96f9d3d63c74d054fd", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/query.py", + "test_elasticsearch_dsl/test_query.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 1, "insertions": 23, "lines": 24, "files": 2}, + "description": "Adding & operator for queries", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["bb311eb35e7eb53fb5ae01e3f80336866c7e3e37"], + "committed_date": "2014-03-21T15:10:08", + "authored_date": "2014-03-21T15:10:08", + }, + "_index": "git", + }, + { + "_id": "bb311eb35e7eb53fb5ae01e3f80336866c7e3e37", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/utils.py", + "test_elasticsearch_dsl/test_query.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 1, "insertions": 4, "lines": 5, "files": 2}, + "description": "Don't serialize empty typed fields into dict", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["aea8ea9e421bd53a5b058495e68c3fd57bb1dacc"], + "committed_date": "2014-03-15T16:29:37", + "authored_date": "2014-03-15T16:29:37", + }, + "_index": "git", + }, + { + "_id": "aea8ea9e421bd53a5b058495e68c3fd57bb1dacc", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/utils.py", + "elasticsearch_dsl/query.py", + "test_elasticsearch_dsl/test_query.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 3, "insertions": 37, "lines": 40, "files": 3}, + "description": "Bool queries, when combining just adds their params together", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["a8819a510b919be43ff3011b904f257798fb8916"], + "committed_date": "2014-03-15T16:16:40", + "authored_date": "2014-03-15T16:16:40", + }, + "_index": "git", + }, + { + "_id": "a8819a510b919be43ff3011b904f257798fb8916", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": ["test_elasticsearch_dsl/run_tests.py"], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 6, "insertions": 2, "lines": 8, "files": 1}, + "description": "Simpler run_tests.py", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["e35792a725be2325fc54d3fcb95a7d38d8075a99"], + "committed_date": "2014-03-15T16:02:21", + "authored_date": "2014-03-15T16:02:21", + }, + "_index": "git", + }, + { + "_id": "e35792a725be2325fc54d3fcb95a7d38d8075a99", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": ["elasticsearch_dsl/aggs.py", "elasticsearch_dsl/query.py"], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 2, "insertions": 2, "lines": 4, "files": 2}, + "description": "Maku we don't treat shortcuts as methods.", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["3179d778dc9e3f2883d5f7ffa63b9ae0399c16bc"], + "committed_date": "2014-03-15T15:59:21", + "authored_date": "2014-03-15T15:59:21", + }, + "_index": "git", + }, + { + "_id": "3179d778dc9e3f2883d5f7ffa63b9ae0399c16bc", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/aggs.py", + "elasticsearch_dsl/query.py", + "elasticsearch_dsl/utils.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 9, "insertions": 5, "lines": 14, "files": 3}, + "description": "Centralize == of Dsl objects", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["b5e7d0c4b284211df8f7b464fcece93a27a802fb"], + "committed_date": "2014-03-10T21:37:24", + "authored_date": "2014-03-10T21:37:24", + }, + "_index": "git", + }, + { + "_id": "b5e7d0c4b284211df8f7b464fcece93a27a802fb", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/aggs.py", + "elasticsearch_dsl/search.py", + "test_elasticsearch_dsl/test_search.py", + "elasticsearch_dsl/utils.py", + "elasticsearch_dsl/query.py", + "test_elasticsearch_dsl/test_aggs.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 75, "insertions": 115, "lines": 190, "files": 6}, + "description": "Experimental draft with more declarative DSL", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["0fe741b43adee5ca1424584ddd3f35fa33f8733c"], + "committed_date": "2014-03-10T21:34:39", + "authored_date": "2014-03-10T21:34:39", + }, + "_index": "git", + }, + { + "_id": "0fe741b43adee5ca1424584ddd3f35fa33f8733c", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": ["test_elasticsearch_dsl/test_search.py"], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 2, "insertions": 2, "lines": 4, "files": 1}, + "description": "Make sure .query is chainable", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["a22be5933d4b022cbacee867b1aece120208edf3"], + "committed_date": "2014-03-07T17:41:59", + "authored_date": "2014-03-07T17:41:59", + }, + "_index": "git", + }, + { + "_id": "a22be5933d4b022cbacee867b1aece120208edf3", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/aggs.py", + "elasticsearch_dsl/search.py", + "test_elasticsearch_dsl/test_search.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 14, "insertions": 44, "lines": 58, "files": 3}, + "description": "Search now does aggregations", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["e823686aacfc4bdcb34ffdab337a26fa09659a9a"], + "committed_date": "2014-03-07T17:29:55", + "authored_date": "2014-03-07T17:29:55", + }, + "_index": "git", + }, + { + "_id": "e823686aacfc4bdcb34ffdab337a26fa09659a9a", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [".gitignore"], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 0, "insertions": 1, "lines": 1, "files": 1}, + "description": "Ignore html coverage report", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["e0aedb3011c71d704deec03a8f32b2b360d6e364"], + "committed_date": "2014-03-07T17:03:23", + "authored_date": "2014-03-07T17:03:23", + }, + "_index": "git", + }, + { + "_id": "e0aedb3011c71d704deec03a8f32b2b360d6e364", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/aggs.py", + "test_elasticsearch_dsl/test_aggs.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 0, "insertions": 228, "lines": 228, "files": 2}, + "description": "Added aggregation DSL objects", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["61cbc0aa62a0b776ae5e333406659dbb2f5cfbbd"], + "committed_date": "2014-03-07T16:25:55", + "authored_date": "2014-03-07T16:25:55", + }, + "_index": "git", + }, + { + "_id": "61cbc0aa62a0b776ae5e333406659dbb2f5cfbbd", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": ["elasticsearch_dsl/utils.py", "elasticsearch_dsl/query.py"], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 12, "insertions": 7, "lines": 19, "files": 2}, + "description": "Only retrieve DslClass, leave the instantiation to the caller", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["647f1017a7b17a913e07af70a3b03202f6adbdfd"], + "committed_date": "2014-03-07T15:27:43", + "authored_date": "2014-03-07T15:27:43", + }, + "_index": "git", + }, + { + "_id": "647f1017a7b17a913e07af70a3b03202f6adbdfd", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "test_elasticsearch_dsl/test_search.py", + "elasticsearch_dsl/query.py", + "test_elasticsearch_dsl/test_query.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 19, "insertions": 19, "lines": 38, "files": 3}, + "description": "No need to replicate Query suffix when in query namespace", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["7c4f94ecdb38f0e91c7ee52f579c0ea148afcc7d"], + "committed_date": "2014-03-07T15:19:01", + "authored_date": "2014-03-07T15:19:01", + }, + "_index": "git", + }, + { + "_id": "7c4f94ecdb38f0e91c7ee52f579c0ea148afcc7d", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": ["elasticsearch_dsl/utils.py"], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 2, "insertions": 3, "lines": 5, "files": 1}, + "description": "Ask forgiveness, not permission", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["c10793c2ca43688195e415b25b674ff34d58eaff"], + "committed_date": "2014-03-07T15:13:22", + "authored_date": "2014-03-07T15:13:22", + }, + "_index": "git", + }, + { + "_id": "c10793c2ca43688195e415b25b674ff34d58eaff", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/utils.py", + "elasticsearch_dsl/query.py", + "test_elasticsearch_dsl/test_query.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 24, "insertions": 27, "lines": 51, "files": 3}, + "description": "Extract DSL object registration to DslMeta", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["d8867fdb17fcf4c696657740fa08d29c36adc6ec"], + "committed_date": "2014-03-07T15:12:13", + "authored_date": "2014-03-07T15:10:31", + }, + "_index": "git", + }, + { + "_id": "d8867fdb17fcf4c696657740fa08d29c36adc6ec", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/search.py", + "test_elasticsearch_dsl/test_search.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 0, "insertions": 13, "lines": 13, "files": 2}, + "description": "Search.to_dict", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["2eb7cd980d917ed6f4a4dd8e246804f710ec5082"], + "committed_date": "2014-03-07T02:58:33", + "authored_date": "2014-03-07T02:58:33", + }, + "_index": "git", + }, + { + "_id": "2eb7cd980d917ed6f4a4dd8e246804f710ec5082", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/search.py", + "test_elasticsearch_dsl/test_search.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 0, "insertions": 113, "lines": 113, "files": 2}, + "description": "Basic Search object", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["11708576f9118e0dbf27ae1f8a7b799cf281b511"], + "committed_date": "2014-03-06T21:02:03", + "authored_date": "2014-03-06T21:01:05", + }, + "_index": "git", + }, + { + "_id": "11708576f9118e0dbf27ae1f8a7b799cf281b511", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/query.py", + "test_elasticsearch_dsl/test_query.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 0, "insertions": 13, "lines": 13, "files": 2}, + "description": "MatchAll query + anything is anything", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["1dc496e5c7c1b2caf290df477fca2db61ebe37e0"], + "committed_date": "2014-03-06T20:40:39", + "authored_date": "2014-03-06T20:39:52", + }, + "_index": "git", + }, + { + "_id": "1dc496e5c7c1b2caf290df477fca2db61ebe37e0", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/query.py", + "test_elasticsearch_dsl/test_query.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 0, "insertions": 53, "lines": 53, "files": 2}, + "description": "From_dict, Q(dict) and bool query parses it's subqueries", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["d407f99d1959b7b862a541c066d9fd737ce913f3"], + "committed_date": "2014-03-06T20:24:30", + "authored_date": "2014-03-06T20:24:30", + }, + "_index": "git", + }, + { + "_id": "d407f99d1959b7b862a541c066d9fd737ce913f3", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": ["CONTRIBUTING.md", "README.rst"], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 6, "insertions": 21, "lines": 27, "files": 2}, + "description": "Housekeeping - licence and updated generic CONTRIBUTING.md", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["277e8ecc7395754d1ba1f2411ec32337a3e9d73f"], + "committed_date": "2014-03-05T16:21:44", + "authored_date": "2014-03-05T16:21:44", + }, + "_index": "git", + }, + { + "_id": "277e8ecc7395754d1ba1f2411ec32337a3e9d73f", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/query.py", + "setup.py", + "test_elasticsearch_dsl/test_query.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 0, "insertions": 59, "lines": 59, "files": 3}, + "description": "Automatic query registration and Q function", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["8f1e34bd8f462fec50bcc10971df2d57e2986604"], + "committed_date": "2014-03-05T16:18:52", + "authored_date": "2014-03-05T16:18:52", + }, + "_index": "git", + }, + { + "_id": "8f1e34bd8f462fec50bcc10971df2d57e2986604", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/query.py", + "test_elasticsearch_dsl/test_query.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 0, "insertions": 54, "lines": 54, "files": 2}, + "description": "Initial implementation of match and bool queries", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["fcff47ddcc6d08be5739d03dd30f504fb9db2608"], + "committed_date": "2014-03-05T15:55:06", + "authored_date": "2014-03-05T15:55:06", + }, + "_index": "git", + }, + { + "_id": "fcff47ddcc6d08be5739d03dd30f504fb9db2608", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "docs/Makefile", + "CONTRIBUTING.md", + "docs/conf.py", + "LICENSE", + "Changelog.rst", + "docs/index.rst", + "docs/Changelog.rst", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 0, "insertions": 692, "lines": 692, "files": 7}, + "description": "Docs template", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["febe8127ae48fcc81778c0fb2d628f1bcc0a0350"], + "committed_date": "2014-03-04T01:42:31", + "authored_date": "2014-03-04T01:42:31", + }, + "_index": "git", + }, + { + "_id": "febe8127ae48fcc81778c0fb2d628f1bcc0a0350", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [ + "elasticsearch_dsl/__init__.py", + "test_elasticsearch_dsl/run_tests.py", + "setup.py", + "README.rst", + "test_elasticsearch_dsl/__init__.py", + ], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 0, "insertions": 82, "lines": 82, "files": 5}, + "description": "Empty project structure", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": ["2a8f1ce89760bfc72808f3945b539eae650acac9"], + "committed_date": "2014-03-04T01:37:49", + "authored_date": "2014-03-03T18:23:55", + }, + "_index": "git", + }, + { + "_id": "2a8f1ce89760bfc72808f3945b539eae650acac9", + "routing": "elasticsearch-dsl-py", + "_source": { + "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, + "files": [".gitignore"], + "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "stats": {"deletions": 0, "insertions": 9, "lines": 9, "files": 1}, + "description": "Initial commit, .gitignore", + "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, + "parent_shas": [], + "committed_date": "2014-03-03T18:15:05", + "authored_date": "2014-03-03T18:15:05", + }, + "_index": "git", + }, +] + + +def flatten_doc(d: Dict[str, Any]) -> Dict[str, Any]: + src = d["_source"].copy() + del src["commit_repo"] + return {"_index": "flat-git", "_id": d["_id"], "_source": src} + + +FLAT_DATA = [flatten_doc(d) for d in DATA if "routing" in d] + + +def create_test_git_data(d: Dict[str, Any]) -> Dict[str, Any]: + src = d["_source"].copy() + return { + "_index": "test-git", + "routing": "elasticsearch-dsl-py", + "_id": d["_id"], + "_source": src, + } + + +TEST_GIT_DATA = [create_test_git_data(d) for d in DATA] diff --git a/test_elasticsearch/test_dsl/test_integration/test_examples/__init__.py b/test_elasticsearch/test_dsl/test_integration/test_examples/__init__.py new file mode 100644 index 000000000..2a87d183f --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/test_examples/__init__.py @@ -0,0 +1,16 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/test_elasticsearch/test_dsl/test_integration/test_examples/_async/__init__.py b/test_elasticsearch/test_dsl/test_integration/test_examples/_async/__init__.py new file mode 100644 index 000000000..2a87d183f --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/test_examples/_async/__init__.py @@ -0,0 +1,16 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_alias_migration.py b/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_alias_migration.py new file mode 100644 index 000000000..d2b4294a4 --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_alias_migration.py @@ -0,0 +1,74 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest + +from elasticsearch import AsyncElasticsearch + +from ..async_examples import alias_migration +from ..async_examples.alias_migration import ALIAS, PATTERN, BlogPost, migrate + + +@pytest.mark.asyncio +async def test_alias_migration(async_write_client: AsyncElasticsearch) -> None: + # create the index + await alias_migration.setup() + + # verify that template, index, and alias has been set up + assert await async_write_client.indices.exists_index_template(name=ALIAS) + assert await async_write_client.indices.exists(index=PATTERN) + assert await async_write_client.indices.exists_alias(name=ALIAS) + + indices = await async_write_client.indices.get(index=PATTERN) + assert len(indices) == 1 + index_name, _ = indices.popitem() + + # which means we can now save a document + with open(__file__) as f: + bp = BlogPost( + _id=0, + title="Hello World!", + tags=["testing", "dummy"], + content=f.read(), + published=None, + ) + await bp.save(refresh=True) + + assert await BlogPost.search().count() == 1 + + # _matches work which means we get BlogPost instance + bp = (await BlogPost.search().execute())[0] + assert isinstance(bp, BlogPost) + assert not bp.is_published() + assert "0" == bp.meta.id + + # create new index + await migrate() + + indices = await async_write_client.indices.get(index=PATTERN) + assert 2 == len(indices) + alias = await async_write_client.indices.get(index=ALIAS) + assert 1 == len(alias) + assert index_name not in alias + + # data has been moved properly + assert await BlogPost.search().count() == 1 + + # _matches work which means we get BlogPost instance + bp = (await BlogPost.search().execute())[0] + assert isinstance(bp, BlogPost) + assert "0" == bp.meta.id diff --git a/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_completion.py b/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_completion.py new file mode 100644 index 000000000..13e73e14a --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_completion.py @@ -0,0 +1,40 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest + +from elasticsearch import AsyncElasticsearch + +from ..async_examples.completion import Person + + +@pytest.mark.asyncio +async def test_person_suggests_on_all_variants_of_name( + async_write_client: AsyncElasticsearch, +) -> None: + await Person.init(using=async_write_client) + + await Person(_id=None, name="Honza Král", popularity=42).save(refresh=True) + + s = Person.search().suggest("t", "kra", completion={"field": "suggest"}) + response = await s.execute() + + opts = response.suggest["t"][0].options + + assert 1 == len(opts) + assert opts[0]._score == 42 + assert opts[0]._source.name == "Honza Král" diff --git a/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_composite_aggs.py b/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_composite_aggs.py new file mode 100644 index 000000000..2d3ab2df7 --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_composite_aggs.py @@ -0,0 +1,57 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest + +from elasticsearch import AsyncElasticsearch +from elasticsearch.dsl import A, AsyncSearch + +from ..async_examples.composite_agg import scan_aggs + + +@pytest.mark.asyncio +async def test_scan_aggs_exhausts_all_files( + async_data_client: AsyncElasticsearch, +) -> None: + s = AsyncSearch(index="flat-git") + key_aggs = [{"files": A("terms", field="files")}] + file_list = [f async for f in scan_aggs(s, key_aggs)] + + assert len(file_list) == 26 + + +@pytest.mark.asyncio +async def test_scan_aggs_with_multiple_aggs( + async_data_client: AsyncElasticsearch, +) -> None: + s = AsyncSearch(index="flat-git") + key_aggs = [ + {"files": A("terms", field="files")}, + { + "months": A( + "date_histogram", field="committed_date", calendar_interval="month" + ) + }, + ] + file_list = [ + f + async for f in scan_aggs( + s, key_aggs, {"first_seen": A("min", field="committed_date")} + ) + ] + + assert len(file_list) == 47 diff --git a/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_parent_child.py b/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_parent_child.py new file mode 100644 index 000000000..a730c8839 --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_parent_child.py @@ -0,0 +1,116 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from datetime import datetime + +import pytest +import pytest_asyncio + +from elasticsearch import AsyncElasticsearch +from elasticsearch.dsl import Q + +from ..async_examples.parent_child import Answer, Comment, Question, User, setup + +honza = User( + id=42, + signed_up=datetime(2013, 4, 3), + username="honzakral", + email="honza@elastic.co", + location="Prague", +) + +nick = User( + id=47, + signed_up=datetime(2017, 4, 3), + username="fxdgear", + email="nick.lang@elastic.co", + location="Colorado", +) + + +@pytest_asyncio.fixture +async def question(async_write_client: AsyncElasticsearch) -> Question: + await setup() + assert await async_write_client.indices.exists_index_template(name="base") + + # create a question object + q = Question( + _id=1, + author=nick, + tags=["elasticsearch", "python"], + title="How do I use elasticsearch from Python?", + body=""" + I want to use elasticsearch, how do I do it from Python? + """, + created=None, + question_answer=None, + comments=[], + ) + await q.save() + return q + + +@pytest.mark.asyncio +async def test_comment( + async_write_client: AsyncElasticsearch, question: Question +) -> None: + await question.add_comment(nick, "Just use elasticsearch-py") + + q = await Question.get(1) # type: ignore[arg-type] + assert isinstance(q, Question) + assert 1 == len(q.comments) + + c = q.comments[0] + assert isinstance(c, Comment) + assert c.author.username == "fxdgear" + + +@pytest.mark.asyncio +async def test_question_answer( + async_write_client: AsyncElasticsearch, question: Question +) -> None: + a = await question.add_answer(honza, "Just use `elasticsearch-py`!") + + assert isinstance(a, Answer) + + # refresh the index so we can search right away + await Question._index.refresh() + + # we can now fetch answers from elasticsearch + answers = await question.get_answers() + assert 1 == len(answers) + assert isinstance(answers[0], Answer) + + search = Question.search().query( + "has_child", + type="answer", + inner_hits={}, + query=Q("term", author__username__keyword="honzakral"), + ) + response = await search.execute() + + assert 1 == len(response.hits) + + q = response.hits[0] + assert isinstance(q, Question) + assert 1 == len(q.meta.inner_hits.answer.hits) + assert q.meta.inner_hits.answer.hits is await q.get_answers() + + a = q.meta.inner_hits.answer.hits[0] + assert isinstance(a, Answer) + assert isinstance(await a.get_question(), Question) + assert (await a.get_question()).meta.id == "1" diff --git a/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_percolate.py b/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_percolate.py new file mode 100644 index 000000000..cf1721b8e --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_percolate.py @@ -0,0 +1,38 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest + +from elasticsearch import AsyncElasticsearch + +from ..async_examples.percolate import BlogPost, setup + + +@pytest.mark.asyncio +async def test_post_gets_tagged_automatically( + async_write_client: AsyncElasticsearch, +) -> None: + await setup() + + bp = BlogPost(_id=47, content="nothing about snakes here!") + bp_py = BlogPost(_id=42, content="something about Python here!") + + await bp.save() + await bp_py.save() + + assert [] == bp.tags + assert {"programming", "development", "python"} == set(bp_py.tags) diff --git a/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_vectors.py b/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_vectors.py new file mode 100644 index 000000000..49c2c01da --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_vectors.py @@ -0,0 +1,56 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from hashlib import md5 +from typing import Any, List, Tuple +from unittest import SkipTest + +import pytest + +from elasticsearch import AsyncElasticsearch +from test_elasticsearch.test_dsl.async_sleep import sleep + +from ..async_examples import vectors + + +@pytest.mark.asyncio +async def test_vector_search( + async_write_client: AsyncElasticsearch, es_version: Tuple[int, ...], mocker: Any +) -> None: + # this test only runs on Elasticsearch >= 8.11 because the example uses + # a dense vector without specifying an explicit size + if es_version < (8, 11): + raise SkipTest("This test requires Elasticsearch 8.11 or newer") + + class MockModel: + def __init__(self, model: Any): + pass + + def encode(self, text: str) -> List[float]: + vector = [int(ch) for ch in md5(text.encode()).digest()] + total = sum(vector) + return [float(v) / total for v in vector] + + mocker.patch.object(vectors, "SentenceTransformer", new=MockModel) + + await vectors.create() + for i in range(10): + results = await (await vectors.search("Welcome to our team!")).execute() + if len(results.hits) > 0: + break + await sleep(0.1) + assert results[0].name == "New Employee Onboarding Guide" diff --git a/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/__init__.py b/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/__init__.py new file mode 100644 index 000000000..2a87d183f --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/__init__.py @@ -0,0 +1,16 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. diff --git a/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_alias_migration.py b/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_alias_migration.py new file mode 100644 index 000000000..9b811b692 --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_alias_migration.py @@ -0,0 +1,74 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest + +from elasticsearch import Elasticsearch + +from ..examples import alias_migration +from ..examples.alias_migration import ALIAS, PATTERN, BlogPost, migrate + + +@pytest.mark.sync +def test_alias_migration(write_client: Elasticsearch) -> None: + # create the index + alias_migration.setup() + + # verify that template, index, and alias has been set up + assert write_client.indices.exists_index_template(name=ALIAS) + assert write_client.indices.exists(index=PATTERN) + assert write_client.indices.exists_alias(name=ALIAS) + + indices = write_client.indices.get(index=PATTERN) + assert len(indices) == 1 + index_name, _ = indices.popitem() + + # which means we can now save a document + with open(__file__) as f: + bp = BlogPost( + _id=0, + title="Hello World!", + tags=["testing", "dummy"], + content=f.read(), + published=None, + ) + bp.save(refresh=True) + + assert BlogPost.search().count() == 1 + + # _matches work which means we get BlogPost instance + bp = (BlogPost.search().execute())[0] + assert isinstance(bp, BlogPost) + assert not bp.is_published() + assert "0" == bp.meta.id + + # create new index + migrate() + + indices = write_client.indices.get(index=PATTERN) + assert 2 == len(indices) + alias = write_client.indices.get(index=ALIAS) + assert 1 == len(alias) + assert index_name not in alias + + # data has been moved properly + assert BlogPost.search().count() == 1 + + # _matches work which means we get BlogPost instance + bp = (BlogPost.search().execute())[0] + assert isinstance(bp, BlogPost) + assert "0" == bp.meta.id diff --git a/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_completion.py b/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_completion.py new file mode 100644 index 000000000..472e067ae --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_completion.py @@ -0,0 +1,40 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest + +from elasticsearch import Elasticsearch + +from ..examples.completion import Person + + +@pytest.mark.sync +def test_person_suggests_on_all_variants_of_name( + write_client: Elasticsearch, +) -> None: + Person.init(using=write_client) + + Person(_id=None, name="Honza Král", popularity=42).save(refresh=True) + + s = Person.search().suggest("t", "kra", completion={"field": "suggest"}) + response = s.execute() + + opts = response.suggest["t"][0].options + + assert 1 == len(opts) + assert opts[0]._score == 42 + assert opts[0]._source.name == "Honza Král" diff --git a/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_composite_aggs.py b/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_composite_aggs.py new file mode 100644 index 000000000..95581a912 --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_composite_aggs.py @@ -0,0 +1,57 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest + +from elasticsearch import Elasticsearch +from elasticsearch.dsl import A, Search + +from ..examples.composite_agg import scan_aggs + + +@pytest.mark.sync +def test_scan_aggs_exhausts_all_files( + data_client: Elasticsearch, +) -> None: + s = Search(index="flat-git") + key_aggs = [{"files": A("terms", field="files")}] + file_list = [f for f in scan_aggs(s, key_aggs)] + + assert len(file_list) == 26 + + +@pytest.mark.sync +def test_scan_aggs_with_multiple_aggs( + data_client: Elasticsearch, +) -> None: + s = Search(index="flat-git") + key_aggs = [ + {"files": A("terms", field="files")}, + { + "months": A( + "date_histogram", field="committed_date", calendar_interval="month" + ) + }, + ] + file_list = [ + f + for f in scan_aggs( + s, key_aggs, {"first_seen": A("min", field="committed_date")} + ) + ] + + assert len(file_list) == 47 diff --git a/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_parent_child.py b/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_parent_child.py new file mode 100644 index 000000000..faa1771f9 --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_parent_child.py @@ -0,0 +1,111 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from datetime import datetime + +import pytest + +from elasticsearch import Elasticsearch +from elasticsearch.dsl import Q + +from ..examples.parent_child import Answer, Comment, Question, User, setup + +honza = User( + id=42, + signed_up=datetime(2013, 4, 3), + username="honzakral", + email="honza@elastic.co", + location="Prague", +) + +nick = User( + id=47, + signed_up=datetime(2017, 4, 3), + username="fxdgear", + email="nick.lang@elastic.co", + location="Colorado", +) + + +@pytest.fixture +def question(write_client: Elasticsearch) -> Question: + setup() + assert write_client.indices.exists_index_template(name="base") + + # create a question object + q = Question( + _id=1, + author=nick, + tags=["elasticsearch", "python"], + title="How do I use elasticsearch from Python?", + body=""" + I want to use elasticsearch, how do I do it from Python? + """, + created=None, + question_answer=None, + comments=[], + ) + q.save() + return q + + +@pytest.mark.sync +def test_comment(write_client: Elasticsearch, question: Question) -> None: + question.add_comment(nick, "Just use elasticsearch-py") + + q = Question.get(1) # type: ignore[arg-type] + assert isinstance(q, Question) + assert 1 == len(q.comments) + + c = q.comments[0] + assert isinstance(c, Comment) + assert c.author.username == "fxdgear" + + +@pytest.mark.sync +def test_question_answer(write_client: Elasticsearch, question: Question) -> None: + a = question.add_answer(honza, "Just use `elasticsearch-py`!") + + assert isinstance(a, Answer) + + # refresh the index so we can search right away + Question._index.refresh() + + # we can now fetch answers from elasticsearch + answers = question.get_answers() + assert 1 == len(answers) + assert isinstance(answers[0], Answer) + + search = Question.search().query( + "has_child", + type="answer", + inner_hits={}, + query=Q("term", author__username__keyword="honzakral"), + ) + response = search.execute() + + assert 1 == len(response.hits) + + q = response.hits[0] + assert isinstance(q, Question) + assert 1 == len(q.meta.inner_hits.answer.hits) + assert q.meta.inner_hits.answer.hits is q.get_answers() + + a = q.meta.inner_hits.answer.hits[0] + assert isinstance(a, Answer) + assert isinstance(a.get_question(), Question) + assert (a.get_question()).meta.id == "1" diff --git a/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_percolate.py b/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_percolate.py new file mode 100644 index 000000000..c8b4d2095 --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_percolate.py @@ -0,0 +1,38 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest + +from elasticsearch import Elasticsearch + +from ..examples.percolate import BlogPost, setup + + +@pytest.mark.sync +def test_post_gets_tagged_automatically( + write_client: Elasticsearch, +) -> None: + setup() + + bp = BlogPost(_id=47, content="nothing about snakes here!") + bp_py = BlogPost(_id=42, content="something about Python here!") + + bp.save() + bp_py.save() + + assert [] == bp.tags + assert {"programming", "development", "python"} == set(bp_py.tags) diff --git a/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_vectors.py b/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_vectors.py new file mode 100644 index 000000000..2bfdce8c5 --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_vectors.py @@ -0,0 +1,56 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from hashlib import md5 +from typing import Any, List, Tuple +from unittest import SkipTest + +import pytest + +from elasticsearch import Elasticsearch +from test_elasticsearch.test_dsl.sleep import sleep + +from ..examples import vectors + + +@pytest.mark.sync +def test_vector_search( + write_client: Elasticsearch, es_version: Tuple[int, ...], mocker: Any +) -> None: + # this test only runs on Elasticsearch >= 8.11 because the example uses + # a dense vector without specifying an explicit size + if es_version < (8, 11): + raise SkipTest("This test requires Elasticsearch 8.11 or newer") + + class MockModel: + def __init__(self, model: Any): + pass + + def encode(self, text: str) -> List[float]: + vector = [int(ch) for ch in md5(text.encode()).digest()] + total = sum(vector) + return [float(v) / total for v in vector] + + mocker.patch.object(vectors, "SentenceTransformer", new=MockModel) + + vectors.create() + for i in range(10): + results = (vectors.search("Welcome to our team!")).execute() + if len(results.hits) > 0: + break + sleep(0.1) + assert results[0].name == "New Employee Onboarding Guide" diff --git a/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples b/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples new file mode 120000 index 000000000..96158259a --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples @@ -0,0 +1 @@ +../../../../examples/dsl/async \ No newline at end of file diff --git a/test_elasticsearch/test_dsl/test_integration/test_examples/examples b/test_elasticsearch/test_dsl/test_integration/test_examples/examples new file mode 120000 index 000000000..ff15b4ebc --- /dev/null +++ b/test_elasticsearch/test_dsl/test_integration/test_examples/examples @@ -0,0 +1 @@ +../../../../examples/dsl \ No newline at end of file diff --git a/test_elasticsearch/test_dsl/test_package.py b/test_elasticsearch/test_dsl/test_package.py new file mode 100644 index 000000000..2e989baa1 --- /dev/null +++ b/test_elasticsearch/test_dsl/test_package.py @@ -0,0 +1,22 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import elasticsearch.dsl + + +def test__all__is_sorted() -> None: + assert elasticsearch.dsl.__all__ == sorted(elasticsearch.dsl.__all__) diff --git a/test_elasticsearch/test_dsl/test_query.py b/test_elasticsearch/test_dsl/test_query.py new file mode 100644 index 000000000..c09f26b1a --- /dev/null +++ b/test_elasticsearch/test_dsl/test_query.py @@ -0,0 +1,671 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from pytest import raises + +from elasticsearch.dsl import function, query, utils + + +def test_empty_Q_is_match_all() -> None: + q = query.Q() + + assert isinstance(q, query.MatchAll) + assert query.MatchAll() == q + + +def test_combined_fields_to_dict() -> None: + assert { + "combined_fields": { + "query": "this is a test", + "fields": ["name", "body", "description"], + "operator": "and", + }, + } == query.CombinedFields( + query="this is a test", + fields=["name", "body", "description"], + operator="and", + ).to_dict() + + +def test_combined_fields_to_dict_extra() -> None: + assert { + "combined_fields": { + "query": "this is a test", + "fields": ["name", "body^2"], + "operator": "or", + }, + } == query.CombinedFields( + query="this is a test", + fields=["name", "body^2"], + operator="or", + ).to_dict() + + +def test_match_to_dict() -> None: + assert {"match": {"f": "value"}} == query.Match(f="value").to_dict() + + +def test_match_to_dict_extra() -> None: + assert {"match": {"f": "value", "boost": 2}} == query.Match( + f="value", boost=2 + ).to_dict() + + +def test_fuzzy_to_dict() -> None: + assert {"fuzzy": {"f": "value"}} == query.Fuzzy(f="value").to_dict() + + +def test_prefix_to_dict() -> None: + assert {"prefix": {"f": "value"}} == query.Prefix(f="value").to_dict() + + +def test_term_to_dict() -> None: + assert {"term": {"_type": "article"}} == query.Term(_type="article").to_dict() + + +def test_terms_to_dict() -> None: + assert {"terms": {"_type": ["article", "section"]}} == query.Terms( + _type=["article", "section"] + ).to_dict() + assert {"terms": {"_type": ["article", "section"], "boost": 1.1}} == query.Terms( + _type=("article", "section"), boost=1.1 + ).to_dict() + assert {"terms": {"_type": "article", "boost": 1.1}} == query.Terms( + _type="article", boost=1.1 + ).to_dict() + assert { + "terms": {"_id": {"index": "my-other-index", "id": "my-id"}, "boost": 1.1} + } == query.Terms( + _id={"index": "my-other-index", "id": "my-id"}, boost=1.1 + ).to_dict() + + +def test_bool_to_dict() -> None: + bool = query.Bool(must=[query.Match(f="value")], should=[]) + + assert {"bool": {"must": [{"match": {"f": "value"}}]}} == bool.to_dict() + + +def test_dismax_to_dict() -> None: + assert {"dis_max": {"queries": [{"term": {"_type": "article"}}]}} == query.DisMax( + queries=[query.Term(_type="article")] + ).to_dict() + + +def test_bool_from_dict_issue_318() -> None: + d = {"bool": {"must_not": {"match": {"field": "value"}}}} + q = query.Q(d) + + assert q == ~query.Match(field="value") + + +def test_repr() -> None: + bool = query.Bool(must=[query.Match(f="value")], should=[]) + + assert "Bool(must=[Match(f='value')])" == repr(bool) + + +def test_query_clone() -> None: + bool = query.Bool( + must=[query.Match(x=42)], + should=[query.Match(g="v2")], + must_not=[query.Match(title="value")], + ) + bool_clone = bool._clone() + + assert bool == bool_clone + assert bool is not bool_clone + + +def test_bool_converts_its_init_args_to_queries() -> None: + q = query.Bool(must=[{"match": {"f": "value"}}]) # type: ignore + + assert len(q.must) == 1 + assert q.must[0] == query.Match(f="value") + + +def test_two_queries_make_a_bool() -> None: + q1 = query.Match(f="value1") + q2 = query.Match(message={"query": "this is a test", "opeartor": "and"}) + q = q1 & q2 + + assert isinstance(q, query.Bool) + assert [q1, q2] == q.must + + +def test_other_and_bool_appends_other_to_must() -> None: + q1 = query.Match(f="value1") + qb = query.Bool() + + q = q1 & qb + assert q is not qb + assert q.must[0] == q1 + + +def test_bool_and_other_appends_other_to_must() -> None: + q1 = query.Match(f="value1") + qb = query.Bool() + + q = qb & q1 + assert q is not qb + assert q.must[0] == q1 + + +def test_bool_and_other_sets_min_should_match_if_needed() -> None: + q1 = query.Q("term", category=1) + q2 = query.Q( + "bool", should=[query.Q("term", name="aaa"), query.Q("term", name="bbb")] + ) + + q = q1 & q2 + assert q == query.Bool( + must=[q1], + should=[query.Q("term", name="aaa"), query.Q("term", name="bbb")], + minimum_should_match=1, + ) + + +def test_bool_with_different_minimum_should_match_should_not_be_combined() -> None: + q1 = query.Q( + "bool", + minimum_should_match=2, + should=[ + query.Q("term", field="aa1"), + query.Q("term", field="aa2"), + query.Q("term", field="aa3"), + query.Q("term", field="aa4"), + ], + ) + q2 = query.Q( + "bool", + minimum_should_match=3, + should=[ + query.Q("term", field="bb1"), + query.Q("term", field="bb2"), + query.Q("term", field="bb3"), + query.Q("term", field="bb4"), + ], + ) + q3 = query.Q( + "bool", + minimum_should_match=4, + should=[ + query.Q("term", field="cc1"), + query.Q("term", field="cc2"), + query.Q("term", field="cc3"), + query.Q("term", field="cc4"), + ], + ) + + q4 = q1 | q2 + assert q4 == query.Bool(should=[q1, q2]) + + q5 = q1 | q2 | q3 + assert q5 == query.Bool(should=[q1, q2, q3]) + + +def test_empty_bool_has_min_should_match_0() -> None: + assert 0 == query.Bool()._min_should_match + + +def test_query_and_query_creates_bool() -> None: + q1 = query.Match(f=42) + q2 = query.Match(g=47) + + q = q1 & q2 + assert isinstance(q, query.Bool) + assert q.must == [q1, q2] + + +def test_match_all_and_query_equals_other() -> None: + q1 = query.Match(f=42) + q2 = query.MatchAll() + + q = q1 & q2 + assert q1 == q + + +def test_not_match_all_is_match_none() -> None: + q = query.MatchAll() + + assert ~q == query.MatchNone() + + +def test_not_match_none_is_match_all() -> None: + q = query.MatchNone() + + assert ~q == query.MatchAll() + + +def test_invert_empty_bool_is_match_none() -> None: + q = query.Bool() + + assert ~q == query.MatchNone() + + +def test_match_none_or_query_equals_query() -> None: + q1 = query.Match(f=42) + q2 = query.MatchNone() + + assert q1 | q2 == query.Match(f=42) + + +def test_match_none_and_query_equals_match_none() -> None: + q1 = query.Match(f=42) + q2 = query.MatchNone() + + assert q1 & q2 == query.MatchNone() + + +def test_bool_and_bool() -> None: + qt1, qt2, qt3 = query.Match(f=1), query.Match(f=2), query.Match(f=3) + + q1 = query.Bool(must=[qt1], should=[qt2]) + q2 = query.Bool(must_not=[qt3]) + assert q1 & q2 == query.Bool( + must=[qt1], must_not=[qt3], should=[qt2], minimum_should_match=0 + ) + + q1 = query.Bool(must=[qt1], should=[qt1, qt2]) + q2 = query.Bool(should=[qt3]) + assert q1 & q2 == query.Bool( + must=[qt1, qt3], should=[qt1, qt2], minimum_should_match=0 + ) + + +def test_bool_and_bool_with_min_should_match() -> None: + qt1, qt2 = query.Match(f=1), query.Match(f=2) + q1 = query.Q("bool", minimum_should_match=1, should=[qt1]) + q2 = query.Q("bool", minimum_should_match=1, should=[qt2]) + + assert query.Q("bool", must=[qt1, qt2]) == q1 & q2 + + +def test_negative_min_should_match() -> None: + qt1, qt2 = query.Match(f=1), query.Match(f=2) + q1 = query.Q("bool", minimum_should_match=-2, should=[qt1]) + q2 = query.Q("bool", minimum_should_match=1, should=[qt2]) + + with raises(ValueError): + q1 & q2 + with raises(ValueError): + q2 & q1 + + +def test_percentage_min_should_match() -> None: + qt1, qt2 = query.Match(f=1), query.Match(f=2) + q1 = query.Q("bool", minimum_should_match="50%", should=[qt1]) + q2 = query.Q("bool", minimum_should_match=1, should=[qt2]) + + with raises(ValueError): + q1 & q2 + with raises(ValueError): + q2 & q1 + + +def test_inverted_query_becomes_bool_with_must_not() -> None: + q = query.Match(f=42) + + assert ~q == query.Bool(must_not=[query.Match(f=42)]) + + +def test_inverted_query_with_must_not_become_should() -> None: + q = query.Q("bool", must_not=[query.Q("match", f=1), query.Q("match", f=2)]) + + assert ~q == query.Q("bool", should=[query.Q("match", f=1), query.Q("match", f=2)]) + + +def test_inverted_query_with_must_and_must_not() -> None: + q = query.Q( + "bool", + must=[query.Q("match", f=3), query.Q("match", f=4)], + must_not=[query.Q("match", f=1), query.Q("match", f=2)], + ) + print((~q).to_dict()) + assert ~q == query.Q( + "bool", + should=[ + # negation of must + query.Q("bool", must_not=[query.Q("match", f=3)]), + query.Q("bool", must_not=[query.Q("match", f=4)]), + # negation of must_not + query.Q("match", f=1), + query.Q("match", f=2), + ], + ) + + +def test_double_invert_returns_original_query() -> None: + q = query.Match(f=42) + + assert q == ~~q + + +def test_bool_query_gets_inverted_internally() -> None: + q = query.Bool(must_not=[query.Match(f=42)], must=[query.Match(g="v")]) + + assert ~q == query.Bool( + should=[ + # negating must + query.Bool(must_not=[query.Match(g="v")]), + # negating must_not + query.Match(f=42), + ] + ) + + +def test_match_all_or_something_is_match_all() -> None: + q1 = query.MatchAll() + q2 = query.Match(f=42) + + assert (q1 | q2) == query.MatchAll() + assert (q2 | q1) == query.MatchAll() + + +def test_or_produces_bool_with_should() -> None: + q1 = query.Match(f=42) + q2 = query.Match(g="v") + + q = q1 | q2 + assert q == query.Bool(should=[q1, q2]) + + +def test_or_bool_doesnt_loop_infinitely_issue_37() -> None: + q = query.Match(f=42) | ~query.Match(f=47) + + assert q == query.Bool( + should=[query.Bool(must_not=[query.Match(f=47)]), query.Match(f=42)] + ) + + +def test_or_bool_doesnt_loop_infinitely_issue_96() -> None: + q = ~query.Match(f=42) | ~query.Match(f=47) + + assert q == query.Bool( + should=[ + query.Bool(must_not=[query.Match(f=42)]), + query.Bool(must_not=[query.Match(f=47)]), + ] + ) + + +def test_bool_will_append_another_query_with_or() -> None: + qb = query.Bool(should=[query.Match(f="v"), query.Match(f="v2")]) + q = query.Match(g=42) + + assert (q | qb) == query.Bool(should=[query.Match(f="v"), query.Match(f="v2"), q]) + + +def test_bool_queries_with_only_should_get_concatenated() -> None: + q1 = query.Bool(should=[query.Match(f=1), query.Match(f=2)]) + q2 = query.Bool(should=[query.Match(f=3), query.Match(f=4)]) + + assert (q1 | q2) == query.Bool( + should=[query.Match(f=1), query.Match(f=2), query.Match(f=3), query.Match(f=4)] + ) + + +def test_two_bool_queries_append_one_to_should_if_possible() -> None: + q1 = query.Bool(should=[query.Match(f="v")]) + q2 = query.Bool(must=[query.Match(f="v")]) + + assert (q1 | q2) == query.Bool( + should=[query.Match(f="v"), query.Bool(must=[query.Match(f="v")])] + ) + assert (q2 | q1) == query.Bool( + should=[query.Match(f="v"), query.Bool(must=[query.Match(f="v")])] + ) + + +def test_queries_are_registered() -> None: + assert "match" in query.Query._classes + assert query.Query._classes["match"] is query.Match + + +def test_defining_query_registers_it() -> None: + class MyQuery(query.Query): + name = "my_query" + + assert "my_query" in query.Query._classes + assert query.Query._classes["my_query"] is MyQuery + + +def test_Q_passes_query_through() -> None: + q = query.Match(f="value1") + + assert query.Q(q) is q + + +def test_Q_constructs_query_by_name() -> None: + q = query.Q("match", f="value") + + assert isinstance(q, query.Match) + assert {"f": "value"} == q._params + + +def test_Q_translates_double_underscore_to_dots_in_param_names() -> None: + q = query.Q("match", comment__author="honza") + + assert {"comment.author": "honza"} == q._params + + +def test_Q_doesn_translate_double_underscore_to_dots_in_param_names() -> None: + q = query.Q("match", comment__author="honza", _expand__to_dot=False) + + assert {"comment__author": "honza"} == q._params + + +def test_Q_constructs_simple_query_from_dict() -> None: + q = query.Q({"match": {"f": "value"}}) + + assert isinstance(q, query.Match) + assert {"f": "value"} == q._params + + +def test_Q_constructs_compound_query_from_dict() -> None: + q = query.Q({"bool": {"must": [{"match": {"f": "value"}}]}}) + + assert q == query.Bool(must=[query.Match(f="value")]) + + +def test_Q_raises_error_when_passed_in_dict_and_params() -> None: + with raises(Exception): + # Ignore types as it's not a valid call + query.Q({"match": {"f": "value"}}, f="value") # type: ignore[call-overload] + + +def test_Q_raises_error_when_passed_in_query_and_params() -> None: + q = query.Match(f="value1") + + with raises(Exception): + # Ignore types as it's not a valid call signature + query.Q(q, f="value") # type: ignore[call-overload] + + +def test_Q_raises_error_on_unknown_query() -> None: + with raises(Exception): + query.Q("not a query", f="value") + + +def test_match_all_and_anything_is_anything() -> None: + q = query.MatchAll() + + s = query.Match(f=42) + assert q & s == s + assert s & q == s + + +def test_function_score_with_functions() -> None: + q = query.Q( + "function_score", + functions=[query.SF("script_score", script="doc['comment_count'] * _score")], + ) + + assert { + "function_score": { + "functions": [{"script_score": {"script": "doc['comment_count'] * _score"}}] + } + } == q.to_dict() + + +def test_function_score_with_no_function_is_boost_factor() -> None: + q = query.Q( + "function_score", + functions=[query.SF({"weight": 20, "filter": query.Q("term", f=42)})], + ) + + assert { + "function_score": {"functions": [{"filter": {"term": {"f": 42}}, "weight": 20}]} + } == q.to_dict() + + +def test_function_score_to_dict() -> None: + q = query.Q( + "function_score", + query=query.Q("match", title="python"), + functions=[ + query.SF("random_score"), + query.SF( + "field_value_factor", + field="comment_count", + filter=query.Q("term", tags="python"), + ), + ], + ) + + d = { + "function_score": { + "query": {"match": {"title": "python"}}, + "functions": [ + {"random_score": {}}, + { + "filter": {"term": {"tags": "python"}}, + "field_value_factor": {"field": "comment_count"}, + }, + ], + } + } + assert d == q.to_dict() + + +def test_function_score_class_based_to_dict() -> None: + q = query.FunctionScore( + query=query.Match(title="python"), + functions=[ + function.RandomScore(), + function.FieldValueFactor( + field="comment_count", + filter=query.Term(tags="python"), + ), + ], + ) + + d = { + "function_score": { + "query": {"match": {"title": "python"}}, + "functions": [ + {"random_score": {}}, + { + "filter": {"term": {"tags": "python"}}, + "field_value_factor": {"field": "comment_count"}, + }, + ], + } + } + assert d == q.to_dict() + + +def test_function_score_with_single_function() -> None: + d = { + "function_score": { + "filter": {"term": {"tags": "python"}}, + "script_score": {"script": "doc['comment_count'] * _score"}, + } + } + + q = query.Q(d) + assert isinstance(q, query.FunctionScore) + assert isinstance(q.filter, query.Term) + assert len(q.functions) == 1 + + sf = q.functions[0] + assert isinstance(sf, function.ScriptScore) + assert "doc['comment_count'] * _score" == sf.script + + +def test_function_score_from_dict() -> None: + d = { + "function_score": { + "filter": {"term": {"tags": "python"}}, + "functions": [ + { + "filter": {"terms": {"tags": "python"}}, + "script_score": {"script": "doc['comment_count'] * _score"}, + }, + {"boost_factor": 6}, + ], + } + } + + q = query.Q(d) + assert isinstance(q, query.FunctionScore) + assert isinstance(q.filter, query.Term) + assert len(q.functions) == 2 + + sf = q.functions[0] + assert isinstance(sf, function.ScriptScore) + assert isinstance(sf.filter, query.Terms) + + sf = q.functions[1] + assert isinstance(sf, function.BoostFactor) + assert 6 == sf.value + assert {"boost_factor": 6} == sf.to_dict() + + +def test_script_score() -> None: + d = { + "script_score": { + "query": {"match_all": {}}, + "script": {"source": "...", "params": {}}, + } + } + q = query.Q(d) + + assert isinstance(q, query.ScriptScore) + assert isinstance(q.query, query.MatchAll) + assert q.script == {"source": "...", "params": {}} + assert q.to_dict() == d + + +def test_expand_double_underscore_to_dot_setting() -> None: + q = query.Term(comment__count=2) + assert q.to_dict() == {"term": {"comment.count": 2}} + utils.EXPAND__TO_DOT = False + q = query.Term(comment__count=2) + assert q.to_dict() == {"term": {"comment__count": 2}} + utils.EXPAND__TO_DOT = True + + +def test_knn_query() -> None: + q = query.Knn(field="image-vector", query_vector=[-5, 9, -12], num_candidates=10) + assert q.to_dict() == { + "knn": { + "field": "image-vector", + "query_vector": [-5, 9, -12], + "num_candidates": 10, + } + } diff --git a/test_elasticsearch/test_dsl/test_result.py b/test_elasticsearch/test_dsl/test_result.py new file mode 100644 index 000000000..46707c715 --- /dev/null +++ b/test_elasticsearch/test_dsl/test_result.py @@ -0,0 +1,215 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pickle +from datetime import date +from typing import Any, Dict + +from pytest import fixture, raises + +from elasticsearch.dsl import Date, Document, Object, Search, response +from elasticsearch.dsl.aggs import Terms +from elasticsearch.dsl.response.aggs import AggResponse, Bucket, BucketData +from elasticsearch.dsl.utils import AttrDict + + +@fixture +def agg_response(aggs_search: Search, aggs_data: Dict[str, Any]) -> response.Response: + return response.Response(aggs_search, aggs_data) + + +def test_agg_response_is_pickleable(agg_response: response.Response) -> None: + agg_response.hits + r = pickle.loads(pickle.dumps(agg_response)) + + assert r == agg_response + assert r._search == agg_response._search + assert r.hits == agg_response.hits + + +def test_response_is_pickleable(dummy_response: Dict[str, Any]) -> None: + res = response.Response(Search(), dummy_response.body) # type: ignore[attr-defined] + res.hits + r = pickle.loads(pickle.dumps(res)) + + assert r == res + assert r._search == res._search + assert r.hits == res.hits + + +def test_hit_is_pickleable(dummy_response: Dict[str, Any]) -> None: + res = response.Response(Search(), dummy_response) + hits = pickle.loads(pickle.dumps(res.hits)) + + assert hits == res.hits + assert hits[0].meta == res.hits[0].meta + + +def test_response_stores_search(dummy_response: Dict[str, Any]) -> None: + s = Search() + r = response.Response(s, dummy_response) + + assert r._search is s + + +def test_attribute_error_in_hits_is_not_hidden(dummy_response: Dict[str, Any]) -> None: + def f(hit: AttrDict[Any]) -> Any: + raise AttributeError() + + s = Search().doc_type(employee=f) + r = response.Response(s, dummy_response) + with raises(TypeError): + r.hits + + +def test_interactive_helpers(dummy_response: Dict[str, Any]) -> None: + res = response.Response(Search(), dummy_response) + hits = res.hits + h = hits[0] + + rhits = ( + "[, , " + ", ]" + ).format( + repr(dummy_response["hits"]["hits"][0]["_source"]), + repr(dummy_response["hits"]["hits"][1]["_source"])[:60], + repr(dummy_response["hits"]["hits"][2]["_source"])[:60], + ) + + assert res + assert f"" == repr(res) + assert rhits == repr(hits) + assert {"meta", "city", "name"} == set(dir(h)) + assert "" % dummy_response["hits"]["hits"][0][ + "_source" + ] == repr(h) + + +def test_empty_response_is_false(dummy_response: Dict[str, Any]) -> None: + dummy_response["hits"]["hits"] = [] + res = response.Response(Search(), dummy_response) + + assert not res + + +def test_len_response(dummy_response: Dict[str, Any]) -> None: + res = response.Response(Search(), dummy_response) + assert len(res) == 4 + + +def test_iterating_over_response_gives_you_hits(dummy_response: Dict[str, Any]) -> None: + res = response.Response(Search(), dummy_response) + hits = list(h for h in res) + + assert res.success() + assert 123 == res.took + assert 4 == len(hits) + assert all(isinstance(h, response.Hit) for h in hits) + h = hits[0] + + assert "test-index" == h.meta.index + assert "company" == h.meta.doc_type + assert "elasticsearch" == h.meta.id + assert 12 == h.meta.score + + assert hits[1].meta.routing == "elasticsearch" + + +def test_hits_get_wrapped_to_contain_additional_attrs( + dummy_response: Dict[str, Any] +) -> None: + res = response.Response(Search(), dummy_response) + hits = res.hits + + assert 123 == hits.total # type: ignore[attr-defined] + assert 12.0 == hits.max_score # type: ignore[attr-defined] + + +def test_hits_provide_dot_and_bracket_access_to_attrs( + dummy_response: Dict[str, Any] +) -> None: + res = response.Response(Search(), dummy_response) + h = res.hits[0] + + assert "Elasticsearch" == h.name + assert "Elasticsearch" == h["name"] + + assert "Honza" == res.hits[2].name.first + + with raises(KeyError): + h["not_there"] + + with raises(AttributeError): + h.not_there + + +def test_slicing_on_response_slices_on_hits(dummy_response: Dict[str, Any]) -> None: + res = response.Response(Search(), dummy_response) + + assert res[0] is res.hits[0] + assert res[::-1] == res.hits[::-1] + + +def test_aggregation_base(agg_response: response.Response) -> None: + assert agg_response.aggs is agg_response.aggregations + assert isinstance(agg_response.aggs, response.AggResponse) + + +def test_metric_agg_works(agg_response: response.Response) -> None: + assert 25052.0 == agg_response.aggs.sum_lines.value + + +def test_aggregations_can_be_iterated_over(agg_response: response.Response) -> None: + aggs = [a for a in agg_response.aggs] + + assert len(aggs) == 3 + assert all(map(lambda a: isinstance(a, AggResponse), aggs)) + + +def test_aggregations_can_be_retrieved_by_name( + agg_response: response.Response, aggs_search: Search +) -> None: + a = agg_response.aggs["popular_files"] + + assert isinstance(a, BucketData) + assert isinstance(a._meta["aggs"], Terms) + assert a._meta["aggs"] is aggs_search.aggs.aggs["popular_files"] + + +def test_bucket_response_can_be_iterated_over(agg_response: response.Response) -> None: + popular_files = agg_response.aggregations.popular_files + + buckets = [b for b in popular_files] + assert all(isinstance(b, Bucket) for b in buckets) + assert buckets == popular_files.buckets + + +def test_bucket_keys_get_deserialized( + aggs_data: Dict[str, Any], aggs_search: Search +) -> None: + class Commit(Document): + info = Object(properties={"committed_date": Date()}) + + class Index: + name = "test-commit" + + aggs_search = aggs_search.doc_type(Commit) + agg_response = response.Response(aggs_search, aggs_data) + + per_month = agg_response.aggregations.per_month + for b in per_month: + assert isinstance(b.key, date) diff --git a/test_elasticsearch/test_dsl/test_utils.py b/test_elasticsearch/test_dsl/test_utils.py new file mode 100644 index 000000000..ac4d6df6e --- /dev/null +++ b/test_elasticsearch/test_dsl/test_utils.py @@ -0,0 +1,136 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pickle +from typing import Any, Dict, Tuple + +from pytest import raises + +from elasticsearch.dsl import Q, serializer, utils + + +def test_attrdict_pickle() -> None: + ad: utils.AttrDict[str] = utils.AttrDict({}) + + pickled_ad = pickle.dumps(ad) + assert ad == pickle.loads(pickled_ad) + + +def test_attrlist_pickle() -> None: + al = utils.AttrList[Any]([]) + + pickled_al = pickle.dumps(al) + assert al == pickle.loads(pickled_al) + + +def test_attrlist_slice() -> None: + class MyAttrDict(utils.AttrDict[str]): + pass + + l = utils.AttrList[Any]([{}, {}], obj_wrapper=MyAttrDict) + assert isinstance(l[:][0], MyAttrDict) + + +def test_attrlist_with_type_argument() -> None: + a = utils.AttrList[str](["a", "b"]) + assert list(a) == ["a", "b"] + + +def test_attrdict_keys_items() -> None: + a = utils.AttrDict({"a": {"b": 42, "c": 47}, "d": "e"}) + assert list(a.keys()) == ["a", "d"] + assert list(a.items()) == [("a", {"b": 42, "c": 47}), ("d", "e")] + + +def test_attrdict_with_type_argument() -> None: + a = utils.AttrDict[str]({"a": "b"}) + assert list(a.keys()) == ["a"] + assert list(a.items()) == [("a", "b")] + + +def test_merge() -> None: + a: utils.AttrDict[Any] = utils.AttrDict({"a": {"b": 42, "c": 47}}) + b = {"a": {"b": 123, "d": -12}, "e": [1, 2, 3]} + + utils.merge(a, b) + + assert a == {"a": {"b": 123, "c": 47, "d": -12}, "e": [1, 2, 3]} + + +def test_merge_conflict() -> None: + data: Tuple[Dict[str, Any], ...] = ( + {"a": 42}, + {"a": {"b": 47}}, + ) + for d in data: + utils.merge({"a": {"b": 42}}, d) + with raises(ValueError): + utils.merge({"a": {"b": 42}}, d, True) + + +def test_attrdict_bool() -> None: + d: utils.AttrDict[str] = utils.AttrDict({}) + + assert not d + d.title = "Title" + assert d + + +def test_attrlist_items_get_wrapped_during_iteration() -> None: + al = utils.AttrList([1, object(), [1], {}]) + + l = list(iter(al)) + + assert isinstance(l[2], utils.AttrList) + assert isinstance(l[3], utils.AttrDict) + + +def test_serializer_deals_with_Attr_versions() -> None: + d = utils.AttrDict({"key": utils.AttrList([1, 2, 3])}) + + assert serializer.serializer.dumps(d) == serializer.serializer.dumps( + {"key": [1, 2, 3]} + ) + + +def test_serializer_deals_with_objects_with_to_dict() -> None: + class MyClass: + def to_dict(self) -> int: + return 42 + + assert serializer.serializer.dumps(MyClass()) == b"42" + + +def test_recursive_to_dict() -> None: + assert utils.recursive_to_dict({"k": [1, (1.0, {"v": Q("match", key="val")})]}) == { + "k": [1, (1.0, {"v": {"match": {"key": "val"}}})] + } + + +def test_attrlist_to_list() -> None: + l = utils.AttrList[Any]([{}, {}]).to_list() + assert isinstance(l, list) + assert l == [{}, {}] + + +def test_attrdict_with_reserved_keyword() -> None: + d = utils.AttrDict({"from": 10, "size": 20}) + assert d.from_ == 10 + assert d.size == 20 + d = utils.AttrDict({}) + d.from_ = 10 + assert {"from": 10} == d.to_dict() diff --git a/test_elasticsearch/test_dsl/test_validation.py b/test_elasticsearch/test_dsl/test_validation.py new file mode 100644 index 000000000..e14550eba --- /dev/null +++ b/test_elasticsearch/test_dsl/test_validation.py @@ -0,0 +1,162 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from datetime import datetime +from typing import Any + +from pytest import raises + +from elasticsearch.dsl import ( + Date, + Document, + InnerDoc, + Integer, + Nested, + Object, + Text, + mapped_field, +) +from elasticsearch.dsl.exceptions import ValidationException + + +class Author(InnerDoc): + name: str + email: str + + def clean(self) -> None: + if not self.name: + raise ValidationException("name is missing") + if not self.email: + raise ValidationException("email is missing") + elif self.name.lower() not in self.email: + raise ValidationException("Invalid email!") + + +class BlogPost(Document): + authors = Nested(Author, required=True) + created = Date() + inner = Object() + + +class BlogPostWithStatus(Document): + published: bool = mapped_field(init=False) + + +class AutoNowDate(Date): + def clean(self, data: Any) -> Any: + if data is None: + data = datetime.now() + return super().clean(data) + + +class Log(Document): + timestamp = AutoNowDate(required=True) + data = Text() + + +def test_required_int_can_be_0() -> None: + class DT(Document): + i = Integer(required=True) + + dt = DT(i=0) + dt.full_clean() + + +def test_required_field_cannot_be_empty_list() -> None: + class DT(Document): + i = Integer(required=True) + + dt = DT(i=[]) + with raises(ValidationException): + dt.full_clean() + + +def test_validation_works_for_lists_of_values() -> None: + class DT(Document): + i = Date(required=True) + + dt = DT(i=[datetime.now(), "not date"]) + with raises(ValidationException): + dt.full_clean() + + dt = DT(i=[datetime.now(), datetime.now()]) + dt.full_clean() + + +def test_field_with_custom_clean() -> None: + l = Log() + l.full_clean() + + assert isinstance(l.timestamp, datetime) + + +def test_empty_object() -> None: + d = BlogPost(authors=[{"name": "Honza", "email": "honza@elastic.co"}]) + d.inner = {} # type: ignore[assignment] + + d.full_clean() + + +def test_missing_required_field_raises_validation_exception() -> None: + d = BlogPost() + with raises(ValidationException): + d.full_clean() + + d = BlogPost() + d.authors.append({"name": "Honza"}) + with raises(ValidationException): + d.full_clean() + + d = BlogPost() + d.authors.append({"name": "Honza", "email": "honza@elastic.co"}) + d.full_clean() + + +def test_boolean_doesnt_treat_false_as_empty() -> None: + d = BlogPostWithStatus() + with raises(ValidationException): + d.full_clean() + d.published = False + d.full_clean() + d.published = True + d.full_clean() + + +def test_custom_validation_on_nested_gets_run() -> None: + d = BlogPost(authors=[Author(name="Honza", email="king@example.com")], created=None) + + assert isinstance(d.authors[0], Author) # type: ignore[index] + + with raises(ValidationException): + d.full_clean() + + +def test_accessing_known_fields_returns_empty_value() -> None: + d = BlogPost() + + assert [] == d.authors + + d.authors.append({}) + assert None is d.authors[0].name # type: ignore[index] + assert None is d.authors[0].email + + +def test_empty_values_are_not_serialized() -> None: + d = BlogPost(authors=[{"name": "Honza", "email": "honza@elastic.co"}], created=None) + + d.full_clean() + assert d.to_dict() == {"authors": [{"name": "Honza", "email": "honza@elastic.co"}]} diff --git a/test_elasticsearch/test_dsl/test_wrappers.py b/test_elasticsearch/test_dsl/test_wrappers.py new file mode 100644 index 000000000..8af6652a8 --- /dev/null +++ b/test_elasticsearch/test_dsl/test_wrappers.py @@ -0,0 +1,111 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from datetime import datetime, timedelta +from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence + +if TYPE_CHECKING: + from _operator import _SupportsComparison + +import pytest + +from elasticsearch.dsl import Range + + +@pytest.mark.parametrize( + "kwargs, item", + [ + ({}, 1), + ({}, -1), + ({"gte": -1}, -1), + ({"lte": 4}, 4), + ({"lte": 4, "gte": 2}, 4), + ({"lte": 4, "gte": 2}, 2), + ({"gt": datetime.now() - timedelta(seconds=10)}, datetime.now()), + ], +) +def test_range_contains( + kwargs: Mapping[str, "_SupportsComparison"], item: "_SupportsComparison" +) -> None: + assert item in Range(**kwargs) + + +@pytest.mark.parametrize( + "kwargs, item", + [ + ({"gt": -1}, -1), + ({"lt": 4}, 4), + ({"lt": 4}, 42), + ({"lte": 4, "gte": 2}, 1), + ({"lte": datetime.now() - timedelta(seconds=10)}, datetime.now()), + ], +) +def test_range_not_contains( + kwargs: Mapping[str, "_SupportsComparison"], item: "_SupportsComparison" +) -> None: + assert item not in Range(**kwargs) + + +@pytest.mark.parametrize( + "args,kwargs", + [ + (({},), {"lt": 42}), + ((), {"not_lt": 42}), + ((object(),), {}), + ((), {"lt": 1, "lte": 1}), + ((), {"gt": 1, "gte": 1}), + ], +) +def test_range_raises_value_error_on_wrong_params( + args: Sequence[Any], kwargs: Mapping[str, "_SupportsComparison"] +) -> None: + with pytest.raises(ValueError): + Range(*args, **kwargs) + + +@pytest.mark.parametrize( + "range,lower,inclusive", + [ + (Range(gt=1), 1, False), + (Range(gte=1), 1, True), + (Range(), None, False), + (Range(lt=42), None, False), + ], +) +def test_range_lower( + range: Range["_SupportsComparison"], + lower: Optional["_SupportsComparison"], + inclusive: bool, +) -> None: + assert (lower, inclusive) == range.lower + + +@pytest.mark.parametrize( + "range,upper,inclusive", + [ + (Range(lt=1), 1, False), + (Range(lte=1), 1, True), + (Range(), None, False), + (Range(gt=42), None, False), + ], +) +def test_range_upper( + range: Range["_SupportsComparison"], + upper: Optional["_SupportsComparison"], + inclusive: bool, +) -> None: + assert (upper, inclusive) == range.upper diff --git a/test_elasticsearch/test_server/test_vectorstore/test_vectorstore.py b/test_elasticsearch/test_server/test_vectorstore/test_vectorstore.py index 3e17442eb..f44334fc4 100644 --- a/test_elasticsearch/test_server/test_vectorstore/test_vectorstore.py +++ b/test_elasticsearch/test_server/test_vectorstore/test_vectorstore.py @@ -485,7 +485,7 @@ def assert_query( ) store.add_texts(texts) - ## without fetch_k parameter + # without fetch_k parameter output = store.search( query="foo", k=3, @@ -551,7 +551,7 @@ def assert_query( ) store.add_texts(texts) - ## with fetch_k parameter + # with fetch_k parameter output = store.search( query="foo", k=3, diff --git a/utils/dsl-generator.py b/utils/dsl-generator.py new file mode 100644 index 000000000..cc905705a --- /dev/null +++ b/utils/dsl-generator.py @@ -0,0 +1,855 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import json +import re +import textwrap +from urllib.error import HTTPError +from urllib.request import urlopen + +from jinja2 import Environment, PackageLoader, select_autoescape + +from elasticsearch import VERSION + +jinja_env = Environment( + loader=PackageLoader("utils"), + autoescape=select_autoescape(), + trim_blocks=True, + lstrip_blocks=True, +) +query_py = jinja_env.get_template("query.py.tpl") +aggs_py = jinja_env.get_template("aggs.py.tpl") +response_init_py = jinja_env.get_template("response.__init__.py.tpl") +types_py = jinja_env.get_template("types.py.tpl") + +# map with name replacements for Elasticsearch attributes +PROP_REPLACEMENTS = {"from": "from_"} + +# map with Elasticsearch type replacements +# keys and values are in given in "{namespace}:{name}" format +TYPE_REPLACEMENTS = { + "_types.query_dsl:DistanceFeatureQuery": "_types.query_dsl:DistanceFeatureQueryBase", +} + +# some aggregation types are complicated to determine from the schema, so they +# have their correct type here +AGG_TYPES = { + "bucket_count_ks_test": "Pipeline", + "bucket_correlation": "Pipeline", + "bucket_sort": "Bucket", + "categorize_text": "Bucket", + "filter": "Bucket", + "moving_avg": "Pipeline", + "variable_width_histogram": "Bucket", +} + + +def property_to_class_name(name): + return "".join([w.title() if w != "ip" else "IP" for w in name.split("_")]) + + +def wrapped_doc(text, width=70, initial_indent="", subsequent_indent=""): + """Formats a docstring as a list of lines of up to the request width.""" + return textwrap.wrap( + text.replace("\n", " "), + width=width, + initial_indent=initial_indent, + subsequent_indent=subsequent_indent, + ) + + +def add_dict_type(type_): + """Add Dict[str, Any] to a Python type hint.""" + if type_.startswith("Union["): + type_ = f"{type_[:-1]}, Dict[str, Any]]" + else: + type_ = f"Union[{type_}, Dict[str, Any]]" + return type_ + + +def add_seq_dict_type(type_): + """Add Sequence[Dict[str, Any]] to a Python type hint.""" + if type_.startswith("Union["): + type_ = f"{type_[:-1]}, Sequence[Dict[str, Any]]]" + else: + type_ = f"Union[{type_}, Sequence[Dict[str, Any]]]" + return type_ + + +def add_not_set(type_): + """Add DefaultType to a Python type hint.""" + if type_.startswith("Union["): + type_ = f'{type_[:-1]}, "DefaultType"]' + else: + type_ = f'Union[{type_}, "DefaultType"]' + return type_ + + +def type_for_types_py(type_): + """Converts a type rendered in a generic way to the format needed in the + types.py module. + """ + type_ = type_.replace('"DefaultType"', "DefaultType") + type_ = type_.replace('"InstrumentedField"', "InstrumentedField") + type_ = re.sub(r'"(function\.[a-zA-Z0-9_]+)"', r"\1", type_) + type_ = re.sub(r'"types\.([a-zA-Z0-9_]+)"', r'"\1"', type_) + type_ = re.sub(r'"(wrappers\.[a-zA-Z0-9_]+)"', r"\1", type_) + return type_ + + +class ElasticsearchSchema: + """Operations related to the Elasticsearch schema.""" + + def __init__(self): + response = None + for branch in [f"{VERSION[0]}.{VERSION[1]}", "main"]: + url = f"https://raw.githubusercontent.com/elastic/elasticsearch-specification/{branch}/output/schema/schema.json" + try: + response = urlopen(url) + print(f"Initializing code generation with '{branch}' specification.") + break + except HTTPError: + continue + if not response: + raise RuntimeError("Could not download Elasticsearch schema") + self.schema = json.loads(response.read()) + + # Interfaces collects interfaces that are seen while traversing the schema. + # Any interfaces collected here are then rendered as Python in the + # types.py module. + self.interfaces = [] + self.response_interfaces = [] + + def find_type(self, name, namespace=None): + for t in self.schema["types"]: + if t["name"]["name"] == name and ( + namespace is None or t["name"]["namespace"] == namespace + ): + return t + + def inherits_from(self, type_, name, namespace=None): + while "inherits" in type_: + type_ = self.find_type( + type_["inherits"]["type"]["name"], + type_["inherits"]["type"]["namespace"], + ) + if type_["name"]["name"] == name and ( + namespace is None or type_["name"]["namespace"] == namespace + ): + return True + return False + + def get_python_type(self, schema_type, for_response=False): + """Obtain Python typing details for a given schema type + + This method returns a tuple. The first element is a string with the + Python type hint. The second element is a dictionary with Python DSL + specific typing details to be stored in the DslBase._param_defs + attribute (or None if the type does not need to be in _param_defs). + + When `for_response` is `False`, any new interfaces that are discovered + are registered to be generated in "request" style, with alternative + Dict type hints and default values. If `for_response` is `True`, + interfaces are generated just with their declared type, without + Dict alternative and without defaults, to help type checkers be more + effective at parsing response expressions. + """ + if schema_type["kind"] == "instance_of": + type_name = schema_type["type"] + if type_name["namespace"] in ["_types", "internal", "_builtins"]: + if type_name["name"] in ["integer", "uint", "long", "ulong"]: + return "int", None + elif type_name["name"] in ["number", "float", "double"]: + return "float", None + elif type_name["name"] == "string": + return "str", None + elif type_name["name"] == "boolean": + return "bool", None + elif type_name["name"] == "binary": + return "bytes", None + elif type_name["name"] == "null": + return "None", None + elif type_name["name"] == "Field": + if for_response: + return "str", None + else: + return 'Union[str, "InstrumentedField"]', None + else: + # not an instance of a native type, so we get the type and try again + return self.get_python_type( + self.find_type(type_name["name"], type_name["namespace"]), + for_response=for_response, + ) + elif ( + type_name["namespace"] == "_types.query_dsl" + and type_name["name"] == "QueryContainer" + ): + # QueryContainer maps to the DSL's Query class + return "Query", {"type": "query"} + elif ( + type_name["namespace"] == "_types.query_dsl" + and type_name["name"] == "FunctionScoreContainer" + ): + # FunctionScoreContainer maps to the DSL's ScoreFunction class + return "ScoreFunction", {"type": "score_function"} + elif ( + type_name["namespace"] == "_types.aggregations" + and type_name["name"] == "Buckets" + ): + if for_response: + return "Union[Sequence[Any], Dict[str, Any]]", None + else: + return "Dict[str, Query]", {"type": "query", "hash": True} + elif ( + type_name["namespace"] == "_types.aggregations" + and type_name["name"] == "CompositeAggregationSource" + ): + # QueryContainer maps to the DSL's Query class + return "Agg[_R]", None + else: + # for any other instances we get the type and recurse + type_ = self.find_type(type_name["name"], type_name["namespace"]) + if type_: + return self.get_python_type(type_, for_response=for_response) + + elif schema_type["kind"] == "type_alias": + # for an alias, we use the aliased type + return self.get_python_type(schema_type["type"], for_response=for_response) + + elif schema_type["kind"] == "array_of": + # for arrays we use Sequence[element_type] + type_, param = self.get_python_type( + schema_type["value"], for_response=for_response + ) + return f"Sequence[{type_}]", {**param, "multi": True} if param else None + + elif schema_type["kind"] == "dictionary_of": + # for dicts we use Mapping[key_type, value_type] + key_type, key_param = self.get_python_type( + schema_type["key"], for_response=for_response + ) + value_type, value_param = self.get_python_type( + schema_type["value"], for_response=for_response + ) + return f"Mapping[{key_type}, {value_type}]", ( + {**value_param, "hash": True} if value_param else None + ) + + elif schema_type["kind"] == "union_of": + if ( + len(schema_type["items"]) == 2 + and schema_type["items"][0]["kind"] == "instance_of" + and schema_type["items"][1]["kind"] == "array_of" + and schema_type["items"][0] == schema_type["items"][1]["value"] + ): + # special kind of unions in the form Union[type, Sequence[type]] + type_, param = self.get_python_type( + schema_type["items"][0], for_response=for_response + ) + if schema_type["items"][0]["type"]["name"] in [ + "CompletionSuggestOption", + "PhraseSuggestOption", + "TermSuggestOption", + ]: + # for suggest types we simplify this type and return just the array form + return ( + f"Sequence[{type_}]", + ({"type": param["type"], "multi": True} if param else None), + ) + else: + # for every other types we produce an union with the two alternatives + return ( + f"Union[{type_}, Sequence[{type_}]]", + ({"type": param["type"], "multi": True} if param else None), + ) + elif ( + len(schema_type["items"]) == 2 + and schema_type["items"][0]["kind"] == "instance_of" + and schema_type["items"][1]["kind"] == "instance_of" + and schema_type["items"][0]["type"] + == {"name": "T", "namespace": "_spec_utils.PipeSeparatedFlags"} + and schema_type["items"][1]["type"] + == {"name": "string", "namespace": "_builtins"} + ): + # for now we treat PipeSeparatedFlags as a special case + if "PipeSeparatedFlags" not in self.interfaces: + self.interfaces.append("PipeSeparatedFlags") + return '"types.PipeSeparatedFlags"', None + else: + # generic union type + types = list( + dict.fromkeys( # eliminate duplicates + [ + self.get_python_type(t, for_response=for_response) + for t in schema_type["items"] + ] + ) + ) + return "Union[" + ", ".join([type_ for type_, _ in types]) + "]", None + + elif schema_type["kind"] == "enum": + # enums are mapped to Literal[member, ...] + return ( + "Literal[" + + ", ".join( + [f"\"{member['name']}\"" for member in schema_type["members"]] + ) + + "]", + None, + ) + + elif schema_type["kind"] == "interface": + if schema_type["name"]["namespace"] == "_types.query_dsl": + # handle specific DSL classes explicitly to map to existing + # Python DSL classes + if schema_type["name"]["name"].endswith("RangeQuery"): + return '"wrappers.Range[Any]"', None + elif schema_type["name"]["name"].endswith("ScoreFunction"): + # When dropping Python 3.8, use `removesuffix("Function")` instead + name = schema_type["name"]["name"][:-8] + return f'"function.{name}"', None + elif schema_type["name"]["name"].endswith("DecayFunction"): + return '"function.DecayFunction"', None + elif schema_type["name"]["name"].endswith("Function"): + return f"\"function.{schema_type['name']['name']}\"", None + elif schema_type["name"]["namespace"] == "_types.analysis" and schema_type[ + "name" + ]["name"].endswith("Analyzer"): + # not expanding analyzers at this time, maybe in the future + return "str, Dict[str, Any]", None + + # to handle other interfaces we generate a type of the same name + # and add the interface to the interfaces.py module + if schema_type["name"]["name"] not in self.interfaces: + self.interfaces.append(schema_type["name"]["name"]) + if for_response: + self.response_interfaces.append(schema_type["name"]["name"]) + return f"\"types.{schema_type['name']['name']}\"", None + elif schema_type["kind"] == "user_defined_value": + # user_defined_value maps to Python's Any type + return "Any", None + + raise RuntimeError(f"Cannot find Python type for {schema_type}") + + def add_attribute(self, k, arg, for_types_py=False, for_response=False): + """Add an attribute to the internal representation of a class. + + This method adds the argument `arg` to the data structure for a class + stored in `k`. In particular, the argument is added to the `k["args"]` + list, making sure required arguments are first in the list. If the + argument is of a type that needs Python DSL specific typing details to + be stored in the DslBase._param_defs attribute, then this is added to + `k["params"]`. + + When `for_types_py` is `True`, type hints are formatted in the most + convenient way for the types.py file. When possible, double quotes are + removed from types, and for types that are in the same file the quotes + are kept to prevent forward references, but the "types." namespace is + removed. When `for_types_py` is `False`, all non-native types use + quotes and are namespaced. + + When `for_response` is `True`, type hints are not given the optional + dictionary representation, nor the `DefaultType` used for omitted + attributes. + """ + try: + type_, param = self.get_python_type(arg["type"], for_response=for_response) + except RuntimeError: + type_ = "Any" + param = None + if not for_response: + if type_ != "Any": + if 'Sequence["types.' in type_: + type_ = add_seq_dict_type(type_) # interfaces can be given as dicts + elif "types." in type_: + type_ = add_dict_type(type_) # interfaces can be given as dicts + type_ = add_not_set(type_) + if for_types_py: + type_ = type_for_types_py(type_) + required = "(required) " if arg["required"] else "" + server_default = ( + f" Defaults to `{arg['serverDefault']}` if omitted." + if arg.get("serverDefault") + else "" + ) + doc = wrapped_doc( + f":arg {arg['name']}: {required}{arg.get('description', '')}{server_default}", + subsequent_indent=" ", + ) + arg = { + "name": PROP_REPLACEMENTS.get(arg["name"], arg["name"]), + "type": type_, + "doc": doc, + "required": arg["required"], + } + if param is not None: + param = {"name": arg["name"], "param": param} + if arg["required"]: + # insert in the right place so that all required arguments + # appear at the top of the argument list + i = 0 + for i in range(len(k["args"]) + 1): + if i == len(k["args"]): + break + if k["args"][i].get("positional"): + continue + if k["args"][i]["required"] is False: + break + k["args"].insert(i, arg) + else: + k["args"].append(arg) + if param and "params" in k: + k["params"].append(param) + + def add_behaviors(self, type_, k, for_types_py=False, for_response=False): + """Add behaviors reported in the specification of the given type to the + class representation. + """ + if "behaviors" in type_: + for behavior in type_["behaviors"]: + if ( + behavior["type"]["name"] != "AdditionalProperty" + or behavior["type"]["namespace"] != "_spec_utils" + ): + # we do not support this behavior, so we ignore it + continue + key_type, _ = self.get_python_type( + behavior["generics"][0], for_response=for_response + ) + if "InstrumentedField" in key_type: + value_type, _ = self.get_python_type( + behavior["generics"][1], for_response=for_response + ) + if for_types_py: + value_type = value_type.replace('"DefaultType"', "DefaultType") + value_type = value_type.replace( + '"InstrumentedField"', "InstrumentedField" + ) + value_type = re.sub( + r'"(function\.[a-zA-Z0-9_]+)"', r"\1", value_type + ) + value_type = re.sub( + r'"types\.([a-zA-Z0-9_]+)"', r'"\1"', value_type + ) + value_type = re.sub( + r'"(wrappers\.[a-zA-Z0-9_]+)"', r"\1", value_type + ) + k["args"].append( + { + "name": "_field", + "type": add_not_set(key_type), + "doc": [":arg _field: The field to use in this query."], + "required": False, + "positional": True, + } + ) + k["args"].append( + { + "name": "_value", + "type": add_not_set(add_dict_type(value_type)), + "doc": [":arg _value: The query value for the field."], + "required": False, + "positional": True, + } + ) + k["is_single_field"] = True + else: + raise RuntimeError( + f"Non-field AdditionalProperty are not supported for interface {type_['name']['namespace']}:{type_['name']['name']}." + ) + + def property_to_python_class(self, p): + """Return a dictionary with template data necessary to render a schema + property as a Python class. + + Used for "container" sub-classes such as `QueryContainer`, where each + sub-class is represented by a Python DSL class. + + The format is as follows: + + ```python + { + "property_name": "the name of the property", + "name": "the class name to use for the property", + "docstring": "the formatted docstring as a list of strings", + "args": [ # a Python description of each class attribute + "name": "the name of the attribute", + "type": "the Python type hint for the attribute", + "doc": ["formatted lines of documentation to add to class docstring"], + "required": bool, + "positional": bool, + ], + "params": [ + "name": "the attribute name", + "param": "the param dictionary to include in `_param_defs` for the class", + ], # a DSL-specific description of interesting attributes + "is_single_field": bool # True for single-key dicts with field key + "is_multi_field": bool # True for multi-key dicts with field keys + } + ``` + """ + k = { + "property_name": p["name"], + "name": property_to_class_name(p["name"]), + } + k["docstring"] = wrapped_doc(p.get("description") or "") + other_classes = [] + kind = p["type"]["kind"] + if kind == "instance_of": + namespace = p["type"]["type"]["namespace"] + name = p["type"]["type"]["name"] + if f"{namespace}:{name}" in TYPE_REPLACEMENTS: + namespace, name = TYPE_REPLACEMENTS[f"{namespace}:{name}"].split(":") + if name == "QueryContainer" and namespace == "_types.query_dsl": + type_ = { + "kind": "interface", + "properties": [p], + } + else: + type_ = self.find_type(name, namespace) + if p["name"] in AGG_TYPES: + k["parent"] = AGG_TYPES[p["name"]] + + if type_["kind"] == "interface": + # set the correct parent for bucket and pipeline aggregations + if self.inherits_from( + type_, "PipelineAggregationBase", "_types.aggregations" + ): + k["parent"] = "Pipeline" + elif self.inherits_from( + type_, "BucketAggregationBase", "_types.aggregations" + ): + k["parent"] = "Bucket" + + # generate class attributes + k["args"] = [] + k["params"] = [] + self.add_behaviors(type_, k) + while True: + for arg in type_["properties"]: + self.add_attribute(k, arg) + if "inherits" in type_ and "type" in type_["inherits"]: + type_ = self.find_type( + type_["inherits"]["type"]["name"], + type_["inherits"]["type"]["namespace"], + ) + else: + break + + elif type_["kind"] == "type_alias": + if type_["type"]["kind"] == "union_of": + # for unions we create sub-classes + for other in type_["type"]["items"]: + other_class = self.interface_to_python_class( + other["type"]["name"], + other["type"]["namespace"], + for_types_py=False, + ) + other_class["parent"] = k["name"] + other_classes.append(other_class) + else: + raise RuntimeError( + "Cannot generate code for instances of type_alias instances that are not unions." + ) + + else: + raise RuntimeError( + f"Cannot generate code for instances of kind '{type_['kind']}'" + ) + + elif kind == "dictionary_of": + key_type, _ = self.get_python_type(p["type"]["key"]) + if "InstrumentedField" in key_type: + value_type, _ = self.get_python_type(p["type"]["value"]) + if p["type"]["singleKey"]: + # special handling for single-key dicts with field key + k["args"] = [ + { + "name": "_field", + "type": add_not_set(key_type), + "doc": [":arg _field: The field to use in this query."], + "required": False, + "positional": True, + }, + { + "name": "_value", + "type": add_not_set(add_dict_type(value_type)), + "doc": [":arg _value: The query value for the field."], + "required": False, + "positional": True, + }, + ] + k["is_single_field"] = True + else: + # special handling for multi-key dicts with field keys + k["args"] = [ + { + "name": "_fields", + "type": f"Optional[Mapping[{key_type}, {value_type}]]", + "doc": [ + ":arg _fields: A dictionary of fields with their values." + ], + "required": False, + "positional": True, + }, + ] + k["is_multi_field"] = True + else: + raise RuntimeError(f"Cannot generate code for type {p['type']}") + + else: + raise RuntimeError(f"Cannot generate code for type {p['type']}") + return [k] + other_classes + + def interface_to_python_class( + self, + interface, + namespace=None, + *, + for_types_py=True, + for_response=False, + ): + """Return a dictionary with template data necessary to render an + interface a Python class. + + This is used to render interfaces that are referenced by container + classes. The current list of rendered interfaces is passed as a second + argument to allow this method to add more interfaces to it as they are + discovered. + + The returned format is as follows: + + ```python + { + "name": "the class name to use for the interface class", + "parent": "the parent class name", + "args": [ # a Python description of each class attribute + "name": "the name of the attribute", + "type": "the Python type hint for the attribute", + "doc": ["formatted lines of documentation to add to class docstring"], + "required": bool, + "positional": bool, + ], + "buckets_as_dict": "type" # optional, only present in aggregation response + # classes that have buckets that can have a list + # or dict representation + } + ``` + """ + type_ = self.find_type(interface, namespace) + if type_["kind"] not in ["interface", "response"]: + raise RuntimeError(f"Type {interface} is not an interface") + if type_["kind"] == "response": + # we consider responses as interfaces because they also have properties + # but the location of the properties is different + type_ = type_["body"] + k = {"name": interface, "for_response": for_response, "args": []} + k["docstring"] = wrapped_doc(type_.get("description") or "") + self.add_behaviors( + type_, k, for_types_py=for_types_py, for_response=for_response + ) + generics = [] + while True: + for arg in type_["properties"]: + if interface == "ResponseBody" and arg["name"] == "hits": + k["args"].append( + { + "name": "hits", + "type": "Sequence[_R]", + "doc": [":arg hits: search results"], + "required": arg["required"], + } + ) + elif interface == "ResponseBody" and arg["name"] == "aggregations": + # Aggregations are tricky because the DSL client uses a + # flexible representation that is difficult to generate + # from the schema. + # To handle this we let the generator do its work by calling + # `add_attribute()`, but then we save the generated attribute + # apart and replace it with the DSL's `AggResponse` class. + # The generated type is then used in type hints in variables + # and methods of this class. + self.add_attribute( + k, arg, for_types_py=for_types_py, for_response=for_response + ) + k["aggregate_type"] = ( + k["args"][-1]["type"] + .split("Mapping[str, ")[1] + .rsplit("]", 1)[0] + ) + k["args"][-1] = { + "name": "aggregations", + "type": '"AggResponse[_R]"', + "doc": [":arg aggregations: aggregation results"], + "required": arg["required"], + } + elif ( + "name" in type_ + and type_["name"]["name"] == "MultiBucketAggregateBase" + and arg["name"] == "buckets" + ): + # Also during aggregation response generation, the "buckets" + # attribute that many aggregation responses have is very + # complex, supporting over a dozen different aggregation + # types via generics, each in array or object configurations. + # Typing this attribute proved very difficult. A solution + # that worked with mypy and pyright is to type "buckets" + # for the list form, and create a `buckets_as_dict` + # property that is typed appropriately for accessing the + # buckets in dictionary form. + # The generic type is assumed to be the first in the list, + # which is a simplification that should be improved when a + # more complete implementation of generics is added. + if generics[0]["type"]["name"] == "Void": + generic_type = "Any" + else: + _g = self.find_type( + generics[0]["type"]["name"], + generics[0]["type"]["namespace"], + ) + generic_type, _ = self.get_python_type( + _g, for_response=for_response + ) + generic_type = type_for_types_py(generic_type) + k["args"].append( + { + "name": arg["name"], + # for the type we only include the array form, since + # this client does not request the dict form + "type": f"Sequence[{generic_type}]", + "doc": [ + ":arg buckets: (required) the aggregation buckets as a list" + ], + "required": True, + } + ) + k["buckets_as_dict"] = generic_type + else: + if interface == "Hit" and arg["name"].startswith("_"): + # Python DSL removes the undersore prefix from all the + # properties of the hit, so we do the same + arg["name"] = arg["name"][1:] + + self.add_attribute( + k, arg, for_types_py=for_types_py, for_response=for_response + ) + + if "inherits" not in type_ or "type" not in type_["inherits"]: + break + + if "generics" in type_["inherits"]: + # Generics are only supported for certain specific cases at this + # time. Here we just save them so that they can be recalled later + # while traversing over to parent classes to find inherited + # attributes. + for generic_type in type_["inherits"]["generics"]: + generics.append(generic_type) + + type_ = self.find_type( + type_["inherits"]["type"]["name"], + type_["inherits"]["type"]["namespace"], + ) + return k + + +def generate_query_py(schema, filename): + """Generate query.py with all the properties of `QueryContainer` as Python + classes. + """ + classes = [] + query_container = schema.find_type("QueryContainer", "_types.query_dsl") + for p in query_container["properties"]: + classes += schema.property_to_python_class(p) + + with open(filename, "wt") as f: + f.write(query_py.render(classes=classes, parent="Query")) + print(f"Generated {filename}.") + + +def generate_aggs_py(schema, filename): + """Generate aggs.py with all the properties of `AggregationContainer` as + Python classes. + """ + classes = [] + aggs_container = schema.find_type("AggregationContainer", "_types.aggregations") + for p in aggs_container["properties"]: + if "containerProperty" not in p or not p["containerProperty"]: + classes += schema.property_to_python_class(p) + + with open(filename, "wt") as f: + f.write(aggs_py.render(classes=classes, parent="Agg")) + print(f"Generated {filename}.") + + +def generate_response_init_py(schema, filename): + """Generate response/__init__.py with all the response properties + documented and typed. + """ + search_response = schema.interface_to_python_class( + "ResponseBody", + "_global.search", + for_types_py=False, + for_response=True, + ) + ubq_response = schema.interface_to_python_class( + "Response", + "_global.update_by_query", + for_types_py=False, + for_response=True, + ) + with open(filename, "wt") as f: + f.write( + response_init_py.render(response=search_response, ubq_response=ubq_response) + ) + print(f"Generated {filename}.") + + +def generate_types_py(schema, filename): + """Generate types.py""" + classes = {} + for interface in schema.interfaces: + if interface == "PipeSeparatedFlags": + continue # handled as a special case + for_response = interface in schema.response_interfaces + k = schema.interface_to_python_class( + interface, for_types_py=True, for_response=for_response + ) + classes[k["name"]] = k + + # sort classes by being request/response and then by name + sorted_classes = sorted( + list(classes.keys()), + key=lambda i: str(int(i in schema.response_interfaces)) + i, + ) + classes_list = [] + for n in sorted_classes: + k = classes[n] + if k in classes_list: + continue + classes_list.append(k) + + with open(filename, "wt") as f: + f.write(types_py.render(classes=classes_list)) + print(f"Generated {filename}.") + + +if __name__ == "__main__": + schema = ElasticsearchSchema() + generate_query_py(schema, "elasticsearch/dsl/query.py") + generate_aggs_py(schema, "elasticsearch/dsl/aggs.py") + generate_response_init_py(schema, "elasticsearch/dsl/response/__init__.py") + generate_types_py(schema, "elasticsearch/dsl/types.py") diff --git a/utils/run-unasync-dsl.py b/utils/run-unasync-dsl.py new file mode 100644 index 000000000..d089f0e3e --- /dev/null +++ b/utils/run-unasync-dsl.py @@ -0,0 +1,151 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os +import subprocess +import sys +from glob import glob +from pathlib import Path + +import unasync + + +def main(check=False): + # the list of directories that need to be processed with unasync + # each entry has two paths: + # - the source path with the async sources + # - the destination path where the sync sources should be written + source_dirs = [ + ( + "elasticsearch/dsl/_async/", + "elasticsearch/dsl/_sync/", + ), + ("test_elasticsearch/test_dsl/_async/", "test_elasticsearch/test_dsl/_sync/"), + ( + "test_elasticsearch/test_dsl/test_integration/_async/", + "test_elasticsearch/test_dsl/test_integration/_sync/", + ), + ( + "test_elasticsearch/test_dsl/test_integration/test_examples/_async/", + "test_elasticsearch/test_dsl/test_integration/test_examples/_sync/", + ), + ("examples/dsl/async/", "examples/dsl/"), + ] + + # Unasync all the generated async code + additional_replacements = { + "_async": "_sync", + "AsyncElasticsearch": "Elasticsearch", + "AsyncSearch": "Search", + "AsyncMultiSearch": "MultiSearch", + "AsyncEmptySearch": "EmptySearch", + "AsyncDocument": "Document", + "AsyncIndexMeta": "IndexMeta", + "AsyncIndexTemplate": "IndexTemplate", + "AsyncIndex": "Index", + "AsyncComposableIndexTemplate": "ComposableIndexTemplate", + "AsyncUpdateByQuery": "UpdateByQuery", + "AsyncMapping": "Mapping", + "AsyncFacetedSearch": "FacetedSearch", + "AsyncUsingType": "UsingType", + "async_connections": "connections", + "async_scan": "scan", + "async_simulate": "simulate", + "async_bulk": "bulk", + "async_mock_client": "mock_client", + "async_client": "client", + "async_data_client": "data_client", + "async_write_client": "write_client", + "async_pull_request": "pull_request", + "async_examples": "examples", + "async_sleep": "sleep", + "assert_awaited_once_with": "assert_called_once_with", + "pytest_asyncio": "pytest", + "asynccontextmanager": "contextmanager", + } + rules = [ + unasync.Rule( + fromdir=dir[0], + todir=f"{dir[0]}_sync_check/" if check else dir[1], + additional_replacements=additional_replacements, + ) + for dir in source_dirs + ] + + filepaths = [] + for root, _, filenames in os.walk(Path(__file__).absolute().parent.parent): + if "/site-packages" in root or "/." in root or "__pycache__" in root: + continue + for filename in filenames: + if filename.rpartition(".")[-1] in ( + "py", + "pyi", + ) and not filename.startswith("utils.py"): + filepaths.append(os.path.join(root, filename)) + + unasync.unasync_files(filepaths, rules) + for dir in source_dirs: + output_dir = f"{dir[0]}_sync_check/" if check else dir[1] + subprocess.check_call(["black", "--target-version=py38", output_dir]) + subprocess.check_call(["isort", output_dir]) + for file in glob("*.py", root_dir=dir[0]): + # remove asyncio from sync files + subprocess.check_call( + ["sed", "-i.bak", "/^import asyncio$/d", f"{output_dir}{file}"] + ) + subprocess.check_call( + [ + "sed", + "-i.bak", + "s/asyncio\\.run(main())/main()/", + f"{output_dir}{file}", + ] + ) + subprocess.check_call( + [ + "sed", + "-i.bak", + "s/elasticsearch\\[async\\]/elasticsearch/", + f"{output_dir}{file}", + ] + ) + subprocess.check_call( + [ + "sed", + "-i.bak", + "s/pytest.mark.asyncio/pytest.mark.sync/", + f"{output_dir}{file}", + ] + ) + subprocess.check_call(["rm", f"{output_dir}{file}.bak"]) + + if check: + # make sure there are no differences between _sync and _sync_check + subprocess.check_call( + [ + "diff", + f"{dir[1]}{file}", + f"{output_dir}{file}", + ] + ) + + if check: + subprocess.check_call(["rm", "-rf", output_dir]) + + +if __name__ == "__main__": + main(check="--check" in sys.argv) diff --git a/utils/templates/aggs.py.tpl b/utils/templates/aggs.py.tpl new file mode 100644 index 000000000..d4ba4f4cd --- /dev/null +++ b/utils/templates/aggs.py.tpl @@ -0,0 +1,320 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import collections.abc +from copy import deepcopy +from typing import ( + TYPE_CHECKING, + Any, + ClassVar, + Dict, + Generic, + Iterable, + Literal, + Mapping, + MutableMapping, + Optional, + Sequence, + Union, + cast, +) + +from elastic_transport.client_utils import DEFAULT + +from .query import Query +from .response.aggs import AggResponse, BucketData, FieldBucketData, TopHitsData +from .utils import _R, AttrDict, DslBase + +if TYPE_CHECKING: + from elastic_transport.client_utils import DefaultType + from . import types + from .document_base import InstrumentedField + from .search_base import SearchBase + + +def A( + name_or_agg: Union[MutableMapping[str, Any], "Agg[_R]", str], + filter: Optional[Union[str, "Query"]] = None, + **params: Any, +) -> "Agg[_R]": + if filter is not None: + if name_or_agg != "filter": + raise ValueError( + "Aggregation %r doesn't accept positional argument 'filter'." + % name_or_agg + ) + params["filter"] = filter + + # {"terms": {"field": "tags"}, "aggs": {...}} + if isinstance(name_or_agg, collections.abc.MutableMapping): + if params: + raise ValueError("A() cannot accept parameters when passing in a dict.") + # copy to avoid modifying in-place + agg = deepcopy(name_or_agg) + # pop out nested aggs + aggs = agg.pop("aggs", None) + # pop out meta data + meta = agg.pop("meta", None) + # should be {"terms": {"field": "tags"}} + if len(agg) != 1: + raise ValueError( + 'A() can only accept dict with an aggregation ({"terms": {...}}). ' + "Instead it got (%r)" % name_or_agg + ) + agg_type, params = agg.popitem() + if aggs: + params = params.copy() + params["aggs"] = aggs + if meta: + params = params.copy() + params["meta"] = meta + return Agg[_R].get_dsl_class(agg_type)(_expand__to_dot=False, **params) + + # Terms(...) just return the nested agg + elif isinstance(name_or_agg, Agg): + if params: + raise ValueError( + "A() cannot accept parameters when passing in an Agg object." + ) + return name_or_agg + + # "terms", field="tags" + return Agg[_R].get_dsl_class(name_or_agg)(**params) + + +class Agg(DslBase, Generic[_R]): + _type_name = "agg" + _type_shortcut = staticmethod(A) + name = "" + + def __contains__(self, key: str) -> bool: + return False + + def to_dict(self) -> Dict[str, Any]: + d = super().to_dict() + if isinstance(d[self.name], dict): + n = cast(Dict[str, Any], d[self.name]) + if "meta" in n: + d["meta"] = n.pop("meta") + return d + + def result(self, search: "SearchBase[_R]", data: Dict[str, Any]) -> AttrDict[Any]: + return AggResponse[_R](self, search, data) + + +class AggBase(Generic[_R]): + aggs: Dict[str, Agg[_R]] + _base: Agg[_R] + _params: Dict[str, Any] + _param_defs: ClassVar[Dict[str, Any]] = { + "aggs": {"type": "agg", "hash": True}, + } + + def __contains__(self, key: str) -> bool: + return key in self._params.get("aggs", {}) + + def __getitem__(self, agg_name: str) -> Agg[_R]: + agg = cast( + Agg[_R], self._params.setdefault("aggs", {})[agg_name] + ) # propagate KeyError + + # make sure we're not mutating a shared state - whenever accessing a + # bucket, return a shallow copy of it to be safe + if isinstance(agg, Bucket): + agg = A(agg.name, **agg._params) + # be sure to store the copy so any modifications to it will affect us + self._params["aggs"][agg_name] = agg + + return agg + + def __setitem__(self, agg_name: str, agg: Agg[_R]) -> None: + self.aggs[agg_name] = A(agg) + + def __iter__(self) -> Iterable[str]: + return iter(self.aggs) + + def _agg( + self, + bucket: bool, + name: str, + agg_type: Union[Dict[str, Any], Agg[_R], str], + *args: Any, + **params: Any, + ) -> Agg[_R]: + agg = self[name] = A(agg_type, *args, **params) + + # For chaining - when creating new buckets return them... + if bucket: + return agg + # otherwise return self._base so we can keep chaining + else: + return self._base + + def metric( + self, + name: str, + agg_type: Union[Dict[str, Any], Agg[_R], str], + *args: Any, + **params: Any, + ) -> Agg[_R]: + return self._agg(False, name, agg_type, *args, **params) + + def bucket( + self, + name: str, + agg_type: Union[Dict[str, Any], Agg[_R], str], + *args: Any, + **params: Any, + ) -> "Bucket[_R]": + return cast("Bucket[_R]", self._agg(True, name, agg_type, *args, **params)) + + def pipeline( + self, + name: str, + agg_type: Union[Dict[str, Any], Agg[_R], str], + *args: Any, + **params: Any, + ) -> "Pipeline[_R]": + return cast("Pipeline[_R]", self._agg(False, name, agg_type, *args, **params)) + + def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]: + return BucketData(self, search, data) # type: ignore[arg-type] + + +class Bucket(AggBase[_R], Agg[_R]): + def __init__(self, **params: Any): + super().__init__(**params) + # remember self for chaining + self._base = self + + def to_dict(self) -> Dict[str, Any]: + d = super(AggBase, self).to_dict() + if isinstance(d[self.name], dict): + n = cast(AttrDict[Any], d[self.name]) + if "aggs" in n: + d["aggs"] = n.pop("aggs") + return d + + +class Pipeline(Agg[_R]): + pass + + +{% for k in classes %} +class {{ k.name }}({{ k.parent if k.parent else parent }}[_R]): + """ + {% for line in k.docstring %} + {{ line }} + {% endfor %} + {% if k.args %} + {% if k.docstring %} + + {% endif %} + {% for kwarg in k.args %} + {% for line in kwarg.doc %} + {{ line }} + {% endfor %} + {% endfor %} + {% endif %} + """ + {% if k.property_name %} + name = "{{ k.property_name }}" + {% endif %} + {% if k.params %} + _param_defs = { + {% for param in k.params %} + "{{ param.name }}": {{ param.param }}, + {% endfor %} + {% if k.name == "Filter" or k.name == "Filters" or k.name == "Composite" %} + {# Some #} + "aggs": {"type": "agg", "hash": True}, + {% endif %} + } + {% endif %} + + def __init__( + self, + {% if k.args | length != 1 %} + {% for arg in k.args %} + {% if arg.positional %} + {{ arg.name }}: {{ arg.type }} = DEFAULT, + {% endif %} + {% endfor %} + {% if k.args and not k.args[-1].positional %} + *, + {% endif %} + {% for arg in k.args %} + {% if not arg.positional %} + {{ arg.name }}: {{ arg.type }} = DEFAULT, + {% endif %} + {% endfor %} + {% else %} + {# when we have just one argument, we allow it as positional or keyword #} + {% for arg in k.args %} + {{ arg.name }}: {{ arg.type }} = DEFAULT, + {% endfor %} + {% endif %} + **kwargs: Any + ): + {% if k.name == "FunctionScore" %} + {# continuation of the FunctionScore shortcut property support from above #} + if functions is DEFAULT: + functions = [] + for name in ScoreFunction._classes: + if name in kwargs: + functions.append({name: kwargs.pop(name)}) # type: ignore + {% elif k.is_single_field %} + if _field is not DEFAULT: + kwargs[str(_field)] = _value + {% elif k.is_multi_field %} + if _fields is not DEFAULT: + for field, value in _fields.items(): + kwargs[str(field)] = value + {% endif %} + super().__init__( + {% for arg in k.args %} + {% if not arg.positional %} + {{ arg.name }}={{ arg.name }}, + {% endif %} + {% endfor %} + **kwargs + ) + + {# what follows is a set of Pythonic enhancements to some of the query classes + which are outside the scope of the code generator #} + {% if k.name == "Filter" %} + def to_dict(self) -> Dict[str, Any]: + d = super().to_dict() + if isinstance(d[self.name], dict): + n = cast(AttrDict[Any], d[self.name]) + n.update(n.pop("filter", {})) + return d + + {% elif k.name == "Histogram" or k.name == "DateHistogram" or k.name == "AutoDateHistogram" or k.name == "VariableWidthHistogram" %} + def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]: + return FieldBucketData(self, search, data) + + {% elif k.name == "Terms" %} + def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]: + return FieldBucketData(self, search, data) + + {% elif k.name == "TopHits" %} + def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]: + return TopHitsData(self, search, data) + + {% endif %} +{% endfor %} diff --git a/utils/templates/query.py.tpl b/utils/templates/query.py.tpl new file mode 100644 index 000000000..6816f2d07 --- /dev/null +++ b/utils/templates/query.py.tpl @@ -0,0 +1,373 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import collections.abc +from copy import deepcopy +from itertools import chain +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ClassVar, + Dict, + List, + Literal, + Mapping, + MutableMapping, + Optional, + Protocol, + Sequence, + TypeVar, + Union, + cast, + overload, +) + +from elastic_transport.client_utils import DEFAULT + +# 'SF' looks unused but the test suite assumes it's available +# from this module so others are liable to do so as well. +from .function import SF # noqa: F401 +from .function import ScoreFunction +from .utils import DslBase + +if TYPE_CHECKING: + from elastic_transport.client_utils import DefaultType + from . import types, wrappers + from .document_base import InstrumentedField + +_T = TypeVar("_T") +_M = TypeVar("_M", bound=Mapping[str, Any]) + + +class QProxiedProtocol(Protocol[_T]): + _proxied: _T + + +@overload +def Q(name_or_query: MutableMapping[str, _M]) -> "Query": ... + + +@overload +def Q(name_or_query: "Query") -> "Query": ... + + +@overload +def Q(name_or_query: QProxiedProtocol[_T]) -> _T: ... + + +@overload +def Q(name_or_query: str = "match_all", **params: Any) -> "Query": ... + + +def Q( + name_or_query: Union[ + str, + "Query", + QProxiedProtocol[_T], + MutableMapping[str, _M], + ] = "match_all", + **params: Any, +) -> Union["Query", _T]: + # {"match": {"title": "python"}} + if isinstance(name_or_query, collections.abc.MutableMapping): + if params: + raise ValueError("Q() cannot accept parameters when passing in a dict.") + if len(name_or_query) != 1: + raise ValueError( + 'Q() can only accept dict with a single query ({"match": {...}}). ' + "Instead it got (%r)" % name_or_query + ) + name, q_params = deepcopy(name_or_query).popitem() + return Query.get_dsl_class(name)(_expand__to_dot=False, **q_params) + + # MatchAll() + if isinstance(name_or_query, Query): + if params: + raise ValueError( + "Q() cannot accept parameters when passing in a Query object." + ) + return name_or_query + + # s.query = Q('filtered', query=s.query) + if hasattr(name_or_query, "_proxied"): + return cast(QProxiedProtocol[_T], name_or_query)._proxied + + # "match", title="python" + return Query.get_dsl_class(name_or_query)(**params) + + +class Query(DslBase): + _type_name = "query" + _type_shortcut = staticmethod(Q) + name: ClassVar[Optional[str]] = None + + # Add type annotations for methods not defined in every subclass + __ror__: ClassVar[Callable[["Query", "Query"], "Query"]] + __radd__: ClassVar[Callable[["Query", "Query"], "Query"]] + __rand__: ClassVar[Callable[["Query", "Query"], "Query"]] + + def __add__(self, other: "Query") -> "Query": + # make sure we give queries that know how to combine themselves + # preference + if hasattr(other, "__radd__"): + return other.__radd__(self) + return Bool(must=[self, other]) + + def __invert__(self) -> "Query": + return Bool(must_not=[self]) + + def __or__(self, other: "Query") -> "Query": + # make sure we give queries that know how to combine themselves + # preference + if hasattr(other, "__ror__"): + return other.__ror__(self) + return Bool(should=[self, other]) + + def __and__(self, other: "Query") -> "Query": + # make sure we give queries that know how to combine themselves + # preference + if hasattr(other, "__rand__"): + return other.__rand__(self) + return Bool(must=[self, other]) + + +{% for k in classes %} +class {{ k.name }}({{ parent }}): + """ + {% for line in k.docstring %} + {{ line }} + {% endfor %} + {% if k.args %} + {% if k.docstring %} + + {% endif %} + {% for kwarg in k.args %} + {% for line in kwarg.doc %} + {{ line }} + {% endfor %} + {% endfor %} + {% endif %} + """ + name = "{{ k.property_name }}" + {% if k.params %} + _param_defs = { + {% for param in k.params %} + "{{ param.name }}": {{ param.param }}, + {% endfor %} + {% if k.name == "FunctionScore" %} + {# The FunctionScore class implements a custom solution for the `functions` + shortcut property. Until the code generator can support shortcut + properties directly that solution is added here #} + "filter": {"type": "query"}, + {% endif %} + } + {% endif %} + + def __init__( + self, + {% for arg in k.args %} + {% if arg.positional %} + {{ arg.name }}: {{ arg.type }} = DEFAULT, + {% endif %} + {% endfor %} + {% if k.args and not k.args[-1].positional %} + *, + {% endif %} + {% for arg in k.args %} + {% if not arg.positional %} + {{ arg.name }}: {{ arg.type }} = DEFAULT, + {% endif %} + {% endfor %} + **kwargs: Any + ): + {% if k.name == "FunctionScore" %} + {# continuation of the FunctionScore shortcut property support from above #} + if functions is DEFAULT: + functions = [] + for name in ScoreFunction._classes: + if name in kwargs: + functions.append({name: kwargs.pop(name)}) # type: ignore[arg-type] + {% elif k.is_single_field %} + if _field is not DEFAULT: + kwargs[str(_field)] = _value + {% elif k.is_multi_field %} + if _fields is not DEFAULT: + for field, value in _fields.items(): + kwargs[str(field)] = value + {% endif %} + super().__init__( + {% for arg in k.args %} + {% if not arg.positional %} + {{ arg.name }}={{ arg.name }}, + {% endif %} + {% endfor %} + **kwargs + ) + + {# what follows is a set of Pythonic enhancements to some of the query classes + which are outside the scope of the code generator #} + {% if k.name == "MatchAll" %} + def __add__(self, other: "Query") -> "Query": + return other._clone() + + __and__ = __rand__ = __radd__ = __add__ + + def __or__(self, other: "Query") -> "MatchAll": + return self + + __ror__ = __or__ + + def __invert__(self) -> "MatchNone": + return MatchNone() + + +EMPTY_QUERY = MatchAll() + + {% elif k.name == "MatchNone" %} + def __add__(self, other: "Query") -> "MatchNone": + return self + + __and__ = __rand__ = __radd__ = __add__ + + def __or__(self, other: "Query") -> "Query": + return other._clone() + + __ror__ = __or__ + + def __invert__(self) -> MatchAll: + return MatchAll() + + {% elif k.name == "Bool" %} + def __add__(self, other: Query) -> "Bool": + q = self._clone() + if isinstance(other, Bool): + q.must += other.must + q.should += other.should + q.must_not += other.must_not + q.filter += other.filter + else: + q.must.append(other) + return q + + __radd__ = __add__ + + def __or__(self, other: Query) -> Query: + for q in (self, other): + if isinstance(q, Bool) and not any( + (q.must, q.must_not, q.filter, getattr(q, "minimum_should_match", None)) + ): + other = self if q is other else other + q = q._clone() + if isinstance(other, Bool) and not any( + ( + other.must, + other.must_not, + other.filter, + getattr(other, "minimum_should_match", None), + ) + ): + q.should.extend(other.should) + else: + q.should.append(other) + return q + + return Bool(should=[self, other]) + + __ror__ = __or__ + + @property + def _min_should_match(self) -> int: + return getattr( + self, + "minimum_should_match", + 0 if not self.should or (self.must or self.filter) else 1, + ) + + def __invert__(self) -> Query: + # Because an empty Bool query is treated like + # MatchAll the inverse should be MatchNone + if not any(chain(self.must, self.filter, self.should, self.must_not)): + return MatchNone() + + negations: List[Query] = [] + for q in chain(self.must, self.filter): + negations.append(~q) + + for q in self.must_not: + negations.append(q) + + if self.should and self._min_should_match: + negations.append(Bool(must_not=self.should[:])) + + if len(negations) == 1: + return negations[0] + return Bool(should=negations) + + def __and__(self, other: Query) -> Query: + q = self._clone() + if isinstance(other, Bool): + q.must += other.must + q.must_not += other.must_not + q.filter += other.filter + q.should = [] + + # reset minimum_should_match as it will get calculated below + if "minimum_should_match" in q._params: + del q._params["minimum_should_match"] + + for qx in (self, other): + min_should_match = qx._min_should_match + # TODO: percentages or negative numbers will fail here + # for now we report an error + if not isinstance(min_should_match, int) or min_should_match < 0: + raise ValueError( + "Can only combine queries with positive integer values for minimum_should_match" + ) + # all subqueries are required + if len(qx.should) <= min_should_match: + q.must.extend(qx.should) + # not all of them are required, use it and remember min_should_match + elif not q.should: + q.minimum_should_match = min_should_match + q.should = qx.should + # all queries are optional, just extend should + elif q._min_should_match == 0 and min_should_match == 0: + q.should.extend(qx.should) + # not all are required, add a should list to the must with proper min_should_match + else: + q.must.append( + Bool(should=qx.should, minimum_should_match=min_should_match) + ) + else: + if not (q.must or q.filter) and q.should: + q._params.setdefault("minimum_should_match", 1) + q.must.append(other) + return q + + __rand__ = __and__ + + {% elif k.name == "Terms" %} + def _setattr(self, name: str, value: Any) -> None: + # here we convert any iterables that are not strings to lists + if hasattr(value, "__iter__") and not isinstance(value, (str, list, dict)): + value = list(value) + super()._setattr(name, value) + + {% endif %} + +{% endfor %} diff --git a/utils/templates/response.__init__.py.tpl b/utils/templates/response.__init__.py.tpl new file mode 100644 index 000000000..991249227 --- /dev/null +++ b/utils/templates/response.__init__.py.tpl @@ -0,0 +1,225 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Generic, + Iterator, + List, + Mapping, + Optional, + Sequence, + Tuple, + Union, + cast, +) + +from ..utils import _R, AttrDict, AttrList, _wrap +from .hit import Hit, HitMeta + +if TYPE_CHECKING: + from ..aggs import Agg + from ..faceted_search_base import FacetedSearchBase + from ..search_base import Request, SearchBase + from ..update_by_query_base import UpdateByQueryBase + from .. import types + +__all__ = ["Response", "AggResponse", "UpdateByQueryResponse", "Hit", "HitMeta", "AggregateResponseType"] + + +class Response(AttrDict[Any], Generic[_R]): + """An Elasticsearch search response. + + {% for arg in response.args %} + {% for line in arg.doc %} + {{ line }} + {% endfor %} + {% endfor %} + """ + _search: "SearchBase[_R]" + _faceted_search: "FacetedSearchBase[_R]" + _doc_class: Optional[_R] + _hits: List[_R] + + {% for arg in response.args %} + {% if arg.name not in ["hits", "aggregations"] %} + {{ arg.name }}: {{ arg.type }} + {% endif %} + {% endfor %} + + def __init__( + self, + search: "Request[_R]", + response: Dict[str, Any], + doc_class: Optional[_R] = None, + ): + super(AttrDict, self).__setattr__("_search", search) + super(AttrDict, self).__setattr__("_doc_class", doc_class) + super().__init__(response) + + def __iter__(self) -> Iterator[_R]: # type: ignore[override] + return iter(self.hits) + + def __getitem__(self, key: Union[slice, int, str]) -> Any: + if isinstance(key, (slice, int)): + # for slicing etc + return self.hits[key] + return super().__getitem__(key) + + def __nonzero__(self) -> bool: + return bool(self.hits) + + __bool__ = __nonzero__ + + def __repr__(self) -> str: + return "" % (self.hits or self.aggregations) + + def __len__(self) -> int: + return len(self.hits) + + def __getstate__(self) -> Tuple[Dict[str, Any], "Request[_R]", Optional[_R]]: # type: ignore[override] + return self._d_, self._search, self._doc_class + + def __setstate__( + self, state: Tuple[Dict[str, Any], "Request[_R]", Optional[_R]] # type: ignore[override] + ) -> None: + super(AttrDict, self).__setattr__("_d_", state[0]) + super(AttrDict, self).__setattr__("_search", state[1]) + super(AttrDict, self).__setattr__("_doc_class", state[2]) + + def success(self) -> bool: + return self._shards.total == self._shards.successful and not self.timed_out + + @property + def hits(self) -> List[_R]: + if not hasattr(self, "_hits"): + h = cast(AttrDict[Any], self._d_["hits"]) + + try: + hits = AttrList(list(map(self._search._get_result, h["hits"]))) + except AttributeError as e: + # avoid raising AttributeError since it will be hidden by the property + raise TypeError("Could not parse hits.", e) + + # avoid assigning _hits into self._d_ + super(AttrDict, self).__setattr__("_hits", hits) + for k in h: + setattr(self._hits, k, _wrap(h[k])) + return self._hits + + @property + def aggregations(self) -> "AggResponse[_R]": + return self.aggs + + @property + def aggs(self) -> "AggResponse[_R]": + if not hasattr(self, "_aggs"): + aggs = AggResponse[_R]( + cast("Agg[_R]", self._search.aggs), + self._search, + cast(Dict[str, Any], self._d_.get("aggregations", {})), + ) + + # avoid assigning _aggs into self._d_ + super(AttrDict, self).__setattr__("_aggs", aggs) + return cast("AggResponse[_R]", self._aggs) + + def search_after(self) -> "SearchBase[_R]": + """ + Return a ``Search`` instance that retrieves the next page of results. + + This method provides an easy way to paginate a long list of results using + the ``search_after`` option. For example:: + + page_size = 20 + s = Search()[:page_size].sort("date") + + while True: + # get a page of results + r = await s.execute() + + # do something with this page of results + + # exit the loop if we reached the end + if len(r.hits) < page_size: + break + + # get a search object with the next page of results + s = r.search_after() + + Note that the ``search_after`` option requires the search to have an + explicit ``sort`` order. + """ + if len(self.hits) == 0: + raise ValueError("Cannot use search_after when there are no search results") + if not hasattr(self.hits[-1].meta, "sort"): # type: ignore[attr-defined] + raise ValueError("Cannot use search_after when results are not sorted") + return self._search.extra(search_after=self.hits[-1].meta.sort) # type: ignore[attr-defined] + + +AggregateResponseType = {{ response["aggregate_type"] }} + + +class AggResponse(AttrDict[Any], Generic[_R]): + """An Elasticsearch aggregation response.""" + _meta: Dict[str, Any] + + def __init__(self, aggs: "Agg[_R]", search: "Request[_R]", data: Dict[str, Any]): + super(AttrDict, self).__setattr__("_meta", {"search": search, "aggs": aggs}) + super().__init__(data) + + def __getitem__(self, attr_name: str) -> AggregateResponseType: + if attr_name in self._meta["aggs"]: + # don't do self._meta['aggs'][attr_name] to avoid copying + agg = self._meta["aggs"].aggs[attr_name] + return cast(AggregateResponseType, agg.result(self._meta["search"], self._d_[attr_name])) + return super().__getitem__(attr_name) # type: ignore[no-any-return] + + def __iter__(self) -> Iterator[AggregateResponseType]: # type: ignore[override] + for name in self._meta["aggs"]: + yield self[name] + + +class UpdateByQueryResponse(AttrDict[Any], Generic[_R]): + """An Elasticsearch update by query response. + + {% for arg in ubq_response.args %} + {% for line in arg.doc %} + {{ line }} + {% endfor %} + {% endfor %} + """ + _search: "UpdateByQueryBase[_R]" + + {% for arg in ubq_response.args %} + {{ arg.name }}: {{ arg.type }} + {% endfor %} + + def __init__( + self, + search: "Request[_R]", + response: Dict[str, Any], + doc_class: Optional[_R] = None, + ): + super(AttrDict, self).__setattr__("_search", search) + super(AttrDict, self).__setattr__("_doc_class", doc_class) + super().__init__(response) + + def success(self) -> bool: + return not self.timed_out and not self.failures diff --git a/utils/templates/types.py.tpl b/utils/templates/types.py.tpl new file mode 100644 index 000000000..4ee80d5cb --- /dev/null +++ b/utils/templates/types.py.tpl @@ -0,0 +1,107 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from typing import Any, Dict, Literal, Mapping, Sequence, Union + +from elastic_transport.client_utils import DEFAULT, DefaultType + +from . import Query +from .document_base import InstrumentedField +from .utils import AttrDict + +PipeSeparatedFlags = str + + +{% for k in classes %} +class {{ k.name }}({{ k.parent if k.parent else "AttrDict[Any]" }}): + {% if k.docstring or k.args %} + """ + {% for line in k.docstring %} + {{ line }} + {% endfor %} + {% if k.args %} + {% if k.docstring %} + + {% endif %} + {% endif %} + {% for arg in k.args %} + {% for line in arg.doc %} + {{ line }} + {% endfor %} + {% endfor %} + """ + {% for arg in k.args %} + {% if arg.name not in ["keys", "items"] %} + {{ arg.name }}: {{ arg.type }} + {% else %} + {{ arg.name }}: {{ arg.type }} # type: ignore[assignment] + {% endif %} + {% endfor %} + {% if not k.for_response %} + + def __init__( + self, + {% for arg in k.args %} + {% if arg.positional %} + {{ arg.name }}: {{ arg.type }} = DEFAULT, + {% endif %} + {% endfor %} + {% if k.args and not k.args[-1].positional %} + *, + {% endif %} + {% for arg in k.args %} + {% if not arg.positional %} + {{ arg.name }}: {{ arg.type }} = DEFAULT, + {% endif %} + {% endfor %} + **kwargs: Any + ): + {% if k.is_single_field %} + if _field is not DEFAULT: + kwargs[str(_field)] = _value + {% elif k.is_multi_field %} + if _fields is not DEFAULT: + for field, value in _fields.items(): + kwargs[str(field)] = value + {% endif %} + {% for arg in k.args %} + {% if not arg.positional %} + if {{ arg.name }} is not DEFAULT: + {% if "InstrumentedField" in arg.type %} + kwargs["{{ arg.name }}"] = str({{ arg.name }}) + {% else %} + kwargs["{{ arg.name }}"] = {{ arg.name }} + {% endif %} + {% endif %} + {% endfor %} + {% if k.parent %} + super().__init__(**kwargs) + {% else %} + super().__init__(kwargs) + {% endif %} + {% endif %} + {% if k.buckets_as_dict %} + + @property + def buckets_as_dict(self) -> Mapping[str, {{ k.buckets_as_dict }}]: + return self.buckets # type: ignore[return-value] + {% endif %} + {% else %} + pass + {% endif %} + +{% endfor %} From 9b3fcf763741bd7204d30b933c2d67a9767e38f2 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 11 Feb 2025 15:12:09 +0000 Subject: [PATCH 28/65] fix flaky integration test (#2775) (#2785) (cherry picked from commit 8aee9495dbcb69703931814f116abce3ebf4e3f6) Co-authored-by: Miguel Grinberg --- .../test_integration/test_examples/_async/test_vectors.py | 8 ++------ .../test_integration/test_examples/_sync/test_vectors.py | 8 ++------ 2 files changed, 4 insertions(+), 12 deletions(-) diff --git a/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_vectors.py b/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_vectors.py index 49c2c01da..dc45ceb52 100644 --- a/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_vectors.py +++ b/test_elasticsearch/test_dsl/test_integration/test_examples/_async/test_vectors.py @@ -22,7 +22,6 @@ import pytest from elasticsearch import AsyncElasticsearch -from test_elasticsearch.test_dsl.async_sleep import sleep from ..async_examples import vectors @@ -48,9 +47,6 @@ def encode(self, text: str) -> List[float]: mocker.patch.object(vectors, "SentenceTransformer", new=MockModel) await vectors.create() - for i in range(10): - results = await (await vectors.search("Welcome to our team!")).execute() - if len(results.hits) > 0: - break - await sleep(0.1) + await vectors.WorkplaceDoc._index.refresh() + results = await (await vectors.search("Welcome to our team!")).execute() assert results[0].name == "New Employee Onboarding Guide" diff --git a/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_vectors.py b/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_vectors.py index 2bfdce8c5..4b14c89a0 100644 --- a/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_vectors.py +++ b/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/test_vectors.py @@ -22,7 +22,6 @@ import pytest from elasticsearch import Elasticsearch -from test_elasticsearch.test_dsl.sleep import sleep from ..examples import vectors @@ -48,9 +47,6 @@ def encode(self, text: str) -> List[float]: mocker.patch.object(vectors, "SentenceTransformer", new=MockModel) vectors.create() - for i in range(10): - results = (vectors.search("Welcome to our team!")).execute() - if len(results.hits) > 0: - break - sleep(0.1) + vectors.WorkplaceDoc._index.refresh() + results = (vectors.search("Welcome to our team!")).execute() assert results[0].name == "New Employee Onboarding Guide" From ab6b69dfc6be2ea5305aa5b9660e71b58ad6f4a1 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 11 Feb 2025 16:09:01 +0000 Subject: [PATCH 29/65] Added DSL documentation to Guide (#2761) (#2784) * Added DSL documentation to Guide * clarify that this is a Python DSL (cherry picked from commit 8a270803fdcf258ae6a3b791ca309e4a89f57ffa) Co-authored-by: Miguel Grinberg --- docs/guide/dsl/asyncio.asciidoc | 103 ++++ docs/guide/dsl/configuration.asciidoc | 125 ++++ docs/guide/dsl/examples.asciidoc | 5 + docs/guide/dsl/faceted_search.asciidoc | 145 +++++ docs/guide/dsl/howto.asciidoc | 7 + docs/guide/dsl/persistence.asciidoc | 761 ++++++++++++++++++++++++ docs/guide/dsl/search_dsl.asciidoc | 704 ++++++++++++++++++++++ docs/guide/dsl/tutorials.asciidoc | 251 ++++++++ docs/guide/dsl/update_by_query.asciidoc | 168 ++++++ docs/guide/elasticsearch-dsl.asciidoc | 48 ++ docs/guide/index-custom-title-page.html | 5 +- docs/guide/index.asciidoc | 4 +- docs/guide/overview.asciidoc | 22 +- 13 files changed, 2329 insertions(+), 19 deletions(-) create mode 100644 docs/guide/dsl/asyncio.asciidoc create mode 100644 docs/guide/dsl/configuration.asciidoc create mode 100644 docs/guide/dsl/examples.asciidoc create mode 100644 docs/guide/dsl/faceted_search.asciidoc create mode 100644 docs/guide/dsl/howto.asciidoc create mode 100644 docs/guide/dsl/persistence.asciidoc create mode 100644 docs/guide/dsl/search_dsl.asciidoc create mode 100644 docs/guide/dsl/tutorials.asciidoc create mode 100644 docs/guide/dsl/update_by_query.asciidoc create mode 100644 docs/guide/elasticsearch-dsl.asciidoc diff --git a/docs/guide/dsl/asyncio.asciidoc b/docs/guide/dsl/asyncio.asciidoc new file mode 100644 index 000000000..ff7799dfb --- /dev/null +++ b/docs/guide/dsl/asyncio.asciidoc @@ -0,0 +1,103 @@ +[[asyncio]] +==== Using asyncio with Elasticsearch Python DSL + +The DSL module supports async/await with +https://docs.python.org/3/library/asyncio.html[asyncio]. To ensure that +you have all the required dependencies, install the `[async]` +extra: + +[source,bash] +---- +$ python -m pip install "elasticsearch[async]" +---- + +===== Connections + +Use the `async_connections` module to manage your asynchronous +connections. + +[source,python] +---- +from elasticsearch.dsl import async_connections + +async_connections.create_connection(hosts=['localhost'], timeout=20) +---- + +All the options available in the `connections` module can be used with +`async_connections`. + +====== How to avoid 'Unclosed client session / connector' warnings on exit + +These warnings come from the `aiohttp` package, which is used internally +by the `AsyncElasticsearch` client. They appear often when the +application exits and are caused by HTTP connections that are open when +they are garbage collected. To avoid these warnings, make sure that you +close your connections. + +[source,python] +---- +es = async_connections.get_connection() +await es.close() +---- + +===== Search DSL + +Use the `AsyncSearch` class to perform asynchronous searches. + +[source,python] +---- +from elasticsearch.dsl import AsyncSearch + +s = AsyncSearch().query("match", title="python") +async for hit in s: + print(hit.title) +---- + +Instead of using the `AsyncSearch` object as an asynchronous iterator, +you can explicitly call the `execute()` method to get a `Response` +object. + +[source,python] +---- +s = AsyncSearch().query("match", title="python") +response = await s.execute() +for hit in response: + print(hit.title) +---- + +An `AsyncMultiSearch` is available as well. + +[source,python] +---- +from elasticsearch.dsl import AsyncMultiSearch + +ms = AsyncMultiSearch(index='blogs') + +ms = ms.add(AsyncSearch().filter('term', tags='python')) +ms = ms.add(AsyncSearch().filter('term', tags='elasticsearch')) + +responses = await ms.execute() + +for response in responses: + print("Results for query %r." % response.search.query) + for hit in response: + print(hit.title) +---- + +===== Asynchronous Documents, Indexes, and more + +The `Document`, `Index`, `IndexTemplate`, `Mapping`, `UpdateByQuery` and +`FacetedSearch` classes all have asynchronous versions that use the same +name with an `Async` prefix. These classes expose the same interfaces as +the synchronous versions, but any methods that perform I/O are defined +as coroutines. + +Auxiliary classes that do not perform I/O do not have asynchronous +versions. The same classes can be used in synchronous and asynchronous +applications. + +When using a custom analyzer in an asynchronous +application, use the `async_simulate()` method to invoke the Analyze +API on it. + +Consult the `api` section for details about each specific method. diff --git a/docs/guide/dsl/configuration.asciidoc b/docs/guide/dsl/configuration.asciidoc new file mode 100644 index 000000000..c8cb2b4f0 --- /dev/null +++ b/docs/guide/dsl/configuration.asciidoc @@ -0,0 +1,125 @@ +=== Configuration + +There are several ways to configure connections for the library. The +easiest and most useful approach is to define one default connection +that can be used every time an API call is made without explicitly +passing in other connections. + +[NOTE] +==== +Unless you want to access multiple clusters from your application, it is +highly recommended that you use the `create_connection` method and +all operations will use that connection automatically. +==== + +==== Default connection + +To define a default connection that can be used globally, use the +`connections` module and the `create_connection` method like this: + +[source,python] +---- +from elasticsearch.dsl import connections + +connections.create_connection(hosts=['localhost'], timeout=20) +---- + +===== Single connection with an alias + +You can define the `alias` or name of a connection so you can easily +refer to it later. The default value for `alias` is `default`. + +[source,python] +---- +from elasticsearch.dsl import connections + +connections.create_connection(alias='my_new_connection', hosts=['localhost'], timeout=60) +---- + +Additional keyword arguments (`hosts` and `timeout` in our example) will +be passed to the `Elasticsearch` class from `elasticsearch-py`. + +To see all possible configuration options refer to the +https://elasticsearch-py.readthedocs.io/en/latest/api/elasticsearch.html[documentation]. + +==== Multiple clusters + +You can define multiple connections to multiple clusters at the same +time using the `configure` method: + +[source,python] +---- +from elasticsearch.dsl import connections + +connections.configure( + default={'hosts': 'localhost'}, + dev={ + 'hosts': ['esdev1.example.com:9200'], + 'sniff_on_start': True + } +) +---- + +Such connections will be constructed lazily when requested for the first +time. + +You can alternatively define multiple connections by adding them one by +one as shown in the following example: + +[source,python] +---- +# if you have configuration options to be passed to Elasticsearch.__init__ +# this also shows creating a connection with the alias 'qa' +connections.create_connection('qa', hosts=['esqa1.example.com'], sniff_on_start=True) + +# if you already have an Elasticsearch instance ready +connections.add_connection('another_qa', my_client) +---- + +===== Using aliases + +When using multiple connections, you can refer to them using the string +alias specified when you created the connection. + +This example shows how to use an alias to a connection: + +[source,python] +---- +s = Search(using='qa') +---- + +A `KeyError` will be raised if there is no connection registered with +that alias. + +==== Manual + +If you don't want to supply a global configuration, you can always pass +in your own connection as an instance of `elasticsearch.Elasticsearch` +with the parameter `using` wherever it is accepted like this: + +[source,python] +---- +s = Search(using=Elasticsearch('localhost')) +---- + +You can even use this approach to override any connection the object +might be already associated with: + +[source,python] +---- +s = s.using(Elasticsearch('otherhost:9200')) +---- + +[NOTE] +==== +When using the `dsl` module, it is highly recommended that you +use the built-in serializer +(`elasticsearch.dsl.serializer.serializer`) to ensure your objects +are correctly serialized into `JSON` every time. The +`create_connection` method that is described here (and that the +`configure` method uses under the hood) will do that automatically for +you, unless you explicitly specify your own serializer. The built-in +serializer also allows you to serialize your own objects - just define a +`to_dict()` method on your objects and that method will be +automatically called when serializing your custom objects to `JSON`. +==== diff --git a/docs/guide/dsl/examples.asciidoc b/docs/guide/dsl/examples.asciidoc new file mode 100644 index 000000000..5d22f84e5 --- /dev/null +++ b/docs/guide/dsl/examples.asciidoc @@ -0,0 +1,5 @@ +=== Examples + +Please see the +https://github.com/elastic/elasticsearch-py/tree/master/examples/dsl[DSL examples] +directory to see some complex examples using the DSL module. diff --git a/docs/guide/dsl/faceted_search.asciidoc b/docs/guide/dsl/faceted_search.asciidoc new file mode 100644 index 000000000..6d05cae2d --- /dev/null +++ b/docs/guide/dsl/faceted_search.asciidoc @@ -0,0 +1,145 @@ +[[faceted_search]] +==== Faceted Search + +The library comes with a simple abstraction aimed at helping you develop +faceted navigation for your data. + +[NOTE] +==== +This API is experimental and will be subject to change. Any feedback is +welcome. +==== + +===== Configuration + +You can provide several configuration options (as class attributes) when +declaring a `FacetedSearch` subclass: + +- `index`: + the name of the index (as string) to search through, defaults to + `'_all'`. +- `doc_types`: + list of `Document` subclasses or strings to be used, defaults to + `['_all']`. +- `fields`: + list of fields on the document type to search through. The list will + be passes to `MultiMatch` query so can contain boost values + (`'title^5'`), defaults to `['*']`. +- `facets`: + dictionary of facets to display/filter on. The key is the name + displayed and values should be instances of any `Facet` subclass, for + example: `{'tags': TermsFacet(field='tags')}` + +====== Facets + +There are several different facets available: + +- `TermsFacet`: + provides an option to split documents into groups based on a value of + a field, for example `TermsFacet(field='category')` +- `DateHistogramFacet`: + split documents into time intervals, example: + `DateHistogramFacet(field="published_date", calendar_interval="day")` +- `HistogramFacet`: + similar to `DateHistogramFacet` but for numerical values: + `HistogramFacet(field="rating", interval=2)` +- `RangeFacet`: + allows you to define your own ranges for a numerical fields: + `RangeFacet(field="comment_count", ranges=[("few", (None, 2)), ("lots", (2, None))])` +- `NestedFacet`: + is just a simple facet that wraps another to provide access to nested + documents: + `NestedFacet('variants', TermsFacet(field='variants.color'))` + +By default facet results will only calculate document count, if you wish +for a different metric you can pass in any single value metric +aggregation as the `metric` kwarg +(`TermsFacet(field='tags', metric=A('max', field=timestamp))`). When +specifying `metric` the results will be, by default, sorted in +descending order by that metric. To change it to ascending specify +`metric_sort="asc"` and to just sort by document count use +`metric_sort=False`. + +====== Advanced + +If you require any custom behavior or modifications simply override one +or more of the methods responsible for the class' functions: + +- `search(self)`: + is responsible for constructing the `Search` object used. Override + this if you want to customize the search object (for example by adding + a global filter for published articles only). +- `query(self, search)`: + adds the query position of the search (if search input specified), by + default using `MultiField` query. Override this if you want to modify + the query type used. +- `highlight(self, search)`: + defines the highlighting on the `Search` object and returns a new one. + Default behavior is to highlight on all fields specified for search. + +===== Usage + +The custom subclass can be instantiated empty to provide an empty search +(matching everything) or with `query`, `filters` and `sort`. + +- `query`: + is used to pass in the text of the query to be performed. If `None` is + passed in (default) a `MatchAll` query will be used. For example + `'python web'` +- `filters`: + is a dictionary containing all the facet filters that you wish to + apply. Use the name of the facet (from `.facets` attribute) as the key + and one of the possible values as value. For example + `{'tags': 'python'}`. +- `sort`: + is a tuple or list of fields on which the results should be sorted. + The format of the individual fields are to be the same as those passed + to `~elasticsearch.dsl.Search.sort`. + +====== Response + +the response returned from the `FacetedSearch` object (by calling +`.execute()`) is a subclass of the standard `Response` class that adds a +property called `facets` which contains a dictionary with lists of +buckets -each represented by a tuple of key, document count and a flag +indicating whether this value has been filtered on. + +===== Example + +[source,python] +---- +from datetime import date + +from elasticsearch.dsl import FacetedSearch, TermsFacet, DateHistogramFacet + +class BlogSearch(FacetedSearch): + doc_types = [Article, ] + # fields that should be searched + fields = ['tags', 'title', 'body'] + + facets = { + # use bucket aggregations to define facets + 'tags': TermsFacet(field='tags'), + 'publishing_frequency': DateHistogramFacet(field='published_from', interval='month') + } + + def search(self): + # override methods to add custom pieces + s = super().search() + return s.filter('range', publish_from={'lte': 'now/h'}) + +bs = BlogSearch('python web', {'publishing_frequency': date(2015, 6)}) +response = bs.execute() + +# access hits and other attributes as usual +total = response.hits.total +print('total hits', total.relation, total.value) +for hit in response: + print(hit.meta.score, hit.title) + +for (tag, count, selected) in response.facets.tags: + print(tag, ' (SELECTED):' if selected else ':', count) + +for (month, count, selected) in response.facets.publishing_frequency: + print(month.strftime('%B %Y'), ' (SELECTED):' if selected else ':', count) +---- diff --git a/docs/guide/dsl/howto.asciidoc b/docs/guide/dsl/howto.asciidoc new file mode 100644 index 000000000..3328234bb --- /dev/null +++ b/docs/guide/dsl/howto.asciidoc @@ -0,0 +1,7 @@ +=== How-To Guides + +include::search_dsl.asciidoc[] +include::persistence.asciidoc[] +include::faceted_search.asciidoc[] +include::update_by_query.asciidoc[] +include::asyncio.asciidoc[] diff --git a/docs/guide/dsl/persistence.asciidoc b/docs/guide/dsl/persistence.asciidoc new file mode 100644 index 000000000..ff478dadf --- /dev/null +++ b/docs/guide/dsl/persistence.asciidoc @@ -0,0 +1,761 @@ +==== Persistence + +You can use the DSL module to define your mappings and a basic +persistent layer for your application. + +For more comprehensive examples have a look at the +https://github.com/elastic/elasticsearch-py/tree/main/examples/dsl[DSL examples] +directory in the repository. + +[[doc_type]] +===== Document + +If you want to create a model-like wrapper around your documents, use +the `Document` class. It can also be used to create all the necessary +mappings and settings in elasticsearch (see `life-cycle` for details). + +[source,python] +---- +from datetime import datetime +from elasticsearch.dsl import Document, Date, Nested, Boolean, \ + analyzer, InnerDoc, Completion, Keyword, Text + +html_strip = analyzer('html_strip', + tokenizer="standard", + filter=["standard", "lowercase", "stop", "snowball"], + char_filter=["html_strip"] +) + +class Comment(InnerDoc): + author = Text(fields={'raw': Keyword()}) + content = Text(analyzer='snowball') + created_at = Date() + + def age(self): + return datetime.now() - self.created_at + +class Post(Document): + title = Text() + title_suggest = Completion() + created_at = Date() + published = Boolean() + category = Text( + analyzer=html_strip, + fields={'raw': Keyword()} + ) + + comments = Nested(Comment) + + class Index: + name = 'blog' + + def add_comment(self, author, content): + self.comments.append( + Comment(author=author, content=content, created_at=datetime.now())) + + def save(self, ** kwargs): + self.created_at = datetime.now() + return super().save(** kwargs) +---- + +====== Data types + +The `Document` instances use native python types like `str` and +`datetime`. In case of `Object` or `Nested` fields an instance of the +`InnerDoc` subclass is used, as in the `add_comment` method in the +above example where we are creating an instance of the `Comment` class. + +There are some specific types that were created as part of this library +to make working with some field types easier, for example the `Range` +object used in any of the +https://www.elastic.co/guide/en/elasticsearch/reference/current/range.html[range +fields]: + +[source,python] +---- +from elasticsearch.dsl import Document, DateRange, Keyword, Range + +class RoomBooking(Document): + room = Keyword() + dates = DateRange() + + +rb = RoomBooking( + room='Conference Room II', + dates=Range( + gte=datetime(2018, 11, 17, 9, 0, 0), + lt=datetime(2018, 11, 17, 10, 0, 0) + ) +) + +# Range supports the in operator correctly: +datetime(2018, 11, 17, 9, 30, 0) in rb.dates # True + +# you can also get the limits and whether they are inclusive or exclusive: +rb.dates.lower # datetime(2018, 11, 17, 9, 0, 0), True +rb.dates.upper # datetime(2018, 11, 17, 10, 0, 0), False + +# empty range is unbounded +Range().lower # None, False +---- + +====== Python Type Hints + +Document fields can be defined using standard Python type hints if +desired. Here are some simple examples: + +[source,python] +---- +from typing import Optional + +class Post(Document): + title: str # same as title = Text(required=True) + created_at: Optional[datetime] # same as created_at = Date(required=False) + published: bool # same as published = Boolean(required=True) +---- + +It is important to note that when using `Field` subclasses such as +`Text`, `Date` and `Boolean`, they must be given in the right-side of an +assignment, as shown in examples above. Using these classes as type +hints will result in errors. + +Python types are mapped to their corresponding field type according to +the following table: + +.Python type to DSL field mappings +[cols=",",options="header",] +|=== +|Python type |DSL field +|`str` |`Text(required=True)` +|`bool` |`Boolean(required=True)` +|`int` |`Integer(required=True)` +|`float` |`Float(required=True)` +|`bytes` |`Binary(required=True)` +|`datetime` |`Date(required=True)` +|`date` |`Date(format="yyyy-MM-dd", required=True)` +|=== + +To type a field as optional, the standard `Optional` modifier from the +Python `typing` package can be used. When using Python 3.10 or newer, +"pipe" syntax can also be used, by adding `| None` to a type. The +`List` modifier can be added to a field to convert it to an array, +similar to using the `multi=True` argument on the field object. + +[source,python] +---- +from typing import Optional, List + +class MyDoc(Document): + pub_date: Optional[datetime] # same as pub_date = Date() + middle_name: str | None # same as middle_name = Text() + authors: List[str] # same as authors = Text(multi=True, required=True) + comments: Optional[List[str]] # same as comments = Text(multi=True) +---- + +A field can also be given a type hint of an `InnerDoc` subclass, in +which case it becomes an `Object` field of that class. When the +`InnerDoc` subclass is wrapped with `List`, a `Nested` field is created +instead. + +[source,python] +---- +from typing import List + +class Address(InnerDoc): + ... + +class Comment(InnerDoc): + ... + +class Post(Document): + address: Address # same as address = Object(Address, required=True) + comments: List[Comment] # same as comments = Nested(Comment, required=True) +---- + +Unfortunately it is impossible to have Python type hints that uniquely +identify every possible Elasticsearch field type. To choose a field type +that is different than the ones in the table above, the field instance +can be added explicitly as a right-side assignment in the field +declaration. The next example creates a field that is typed as +`Optional[str]`, but is mapped to `Keyword` instead of `Text`: + +[source,python] +---- +class MyDocument(Document): + category: Optional[str] = Keyword() +---- + +This form can also be used when additional options need to be given to +initialize the field, such as when using custom analyzer settings or +changing the `required` default: + +[source,python] +---- +class Comment(InnerDoc): + content: str = Text(analyzer='snowball', required=True) +---- + +When using type hints as above, subclasses of `Document` and `InnerDoc` +inherit some of the behaviors associated with Python dataclasses, as +defined by https://peps.python.org/pep-0681/[PEP 681] and the +https://typing.readthedocs.io/en/latest/spec/dataclasses.html#dataclass-transform[dataclass_transform +decorator]. To add per-field dataclass options such as `default` or +`default_factory`, the `mapped_field()` wrapper can be used on +the right side of a typed field declaration: + +[source,python] +---- +class MyDocument(Document): + title: str = mapped_field(default="no title") + created_at: datetime = mapped_field(default_factory=datetime.now) + published: bool = mapped_field(default=False) + category: str = mapped_field(Keyword(required=True), default="general") +---- + +When using the `mapped_field()` wrapper function, an explicit field +type instance can be passed as a first positional argument, as the +`category` field does in the example above. + +Static type checkers such as https://mypy-lang.org/[mypy] and +https://github.com/microsoft/pyright[pyright] can use the type hints and +the dataclass-specific options added to the `mapped_field()` +function to improve type inference and provide better real-time +suggestions in IDEs. + +One situation in which type checkers can't infer the correct type is +when using fields as class attributes. Consider the following example: + +[source,python] +---- +class MyDocument(Document): + title: str + +doc = MyDocument() +# doc.title is typed as "str" (correct) +# MyDocument.title is also typed as "str" (incorrect) +---- + +To help type checkers correctly identify class attributes as such, the +`M` generic must be used as a wrapper to the type hint, as shown in the +next examples: + +[source,python] +---- +from elasticsearch.dsl import M + +class MyDocument(Document): + title: M[str] + created_at: M[datetime] = mapped_field(default_factory=datetime.now) + +doc = MyDocument() +# doc.title is typed as "str" +# doc.created_at is typed as "datetime" +# MyDocument.title is typed as "InstrumentedField" +# MyDocument.created_at is typed as "InstrumentedField" +---- + +Note that the `M` type hint does not provide any runtime behavior and +its use is not required, but it can be useful to eliminate spurious type +errors in IDEs or type checking builds. + +The `InstrumentedField` objects returned when fields are accessed as +class attributes are proxies for the field instances that can be used +anywhere a field needs to be referenced, such as when specifying sort +options in a `Search` object: + +[source,python] +---- +# sort by creation date descending, and title ascending +s = MyDocument.search().sort(-MyDocument.created_at, MyDocument.title) +---- + +When specifying sorting order, the `{plus}` and `-` unary operators can +be used on the class field attributes to indicate ascending and +descending order. + +Finally, the `ClassVar` annotation can be used to define a regular class +attribute that should not be mapped to the Elasticsearch index: + +[source,python] +---- +from typing import ClassVar + +class MyDoc(Document): + title: M[str] created_at: M[datetime] = + mapped_field(default_factory=datetime.now) my_var: + ClassVar[str] # regular class variable, ignored by Elasticsearch +---- + +====== Note on dates + +The DSL module will always respect the timezone information (or +lack thereof) on the `datetime` objects passed in or stored in +Elasticsearch. Elasticsearch itself interprets all datetimes with no +timezone information as `UTC`. If you wish to reflect this in your +python code, you can specify `default_timezone` when instantiating a +`Date` field: + +[source,python] +---- +class Post(Document): + created_at = Date(default_timezone='UTC') +---- + +In that case any `datetime` object passed in (or parsed from +elasticsearch) will be treated as if it were in `UTC` timezone. + +[[life-cycle]] +====== Document life cycle + +Before you first use the `Post` document type, you need to create the +mappings in Elasticsearch. For that you can either use the `index` +object or create the mappings directly by calling the `init` class +method: + +[source,python] +---- +# create the mappings in Elasticsearch +Post.init() +---- + +This code will typically be run in the setup for your application during +a code deploy, similar to running database migrations. + +To create a new `Post` document just instantiate the class and pass in +any fields you wish to set, you can then use standard attribute setting +to change/add more fields. Note that you are not limited to the fields +defined explicitly: + +[source,python] +---- +# instantiate the document +first = Post(title='My First Blog Post, yay!', published=True) +# assign some field values, can be values or lists of values +first.category = ['everything', 'nothing'] +# every document has an id in meta +first.meta.id = 47 + + +# save the document into the cluster +first.save() +---- + +All the metadata fields (`id`, `routing`, `index` etc) can be accessed +(and set) via a `meta` attribute or directly using the underscored +variant: + +[source,python] +---- +post = Post(meta={'id': 42}) + +# prints 42 +print(post.meta.id) + +# override default index +post.meta.index = 'my-blog' +---- + +[NOTE] +==== +Having all metadata accessible through `meta` means that this name is +reserved and you shouldn't have a field called `meta` on your document. +If you, however, need it you can still access the data using the get +item (as opposed to attribute) syntax: `post['meta']`. +==== + +To retrieve an existing document use the `get` class method: + +[source,python] +---- +# retrieve the document +first = Post.get(id=42) +# now we can call methods, change fields, ... +first.add_comment('me', 'This is nice!') +# and save the changes into the cluster again +first.save() +---- + +The +https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html[Update +API] can also be used via the `update` method. By default any keyword +arguments, beyond the parameters of the API, will be considered fields +with new values. Those fields will be updated on the local copy of the +document and then sent over as partial document to be updated: + +[source,python] +---- +# retrieve the document +first = Post.get(id=42) +# you can update just individual fields which will call the update API +# and also update the document in place +first.update(published=True, published_by='me') +---- + +In case you wish to use a `painless` script to perform the update you +can pass in the script string as `script` or the `id` of a +https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting-using.html#script-stored-scripts[stored script] +via `script_id`. All additional keyword arguments to the +`update` method will then be passed in as parameters of the script. The +document will not be updated in place. + +[source,python] +---- +# retrieve the document +first = Post.get(id=42) +# we execute a script in elasticsearch with additional kwargs being passed +# as params into the script +first.update(script='ctx._source.category.add(params.new_category)', + new_category='testing') +---- + +If the document is not found in elasticsearch an exception +(`elasticsearch.NotFoundError`) will be raised. If you wish to return +`None` instead just pass in `ignore=404` to suppress the exception: + +[source,python] +---- +p = Post.get(id='not-in-es', ignore=404) +p is None +---- + +When you wish to retrieve multiple documents at the same time by their +`id` you can use the `mget` method: + +[source,python] +---- +posts = Post.mget([42, 47, 256]) +---- + +`mget` will, by default, raise a `NotFoundError` if any of the documents +wasn't found and `RequestError` if any of the document had resulted in +error. You can control this behavior by setting parameters: + +- `raise_on_error`: + If `True` (default) then any error will cause an exception to be + raised. Otherwise all documents containing errors will be treated as + missing. +- `missing`: + Can have three possible values: `'none'` (default), `'raise'` and + `'skip'`. If a document is missing or errored it will either be + replaced with `None`, an exception will be raised or the document will + be skipped in the output list entirely. + +The index associated with the `Document` is accessible via the +`_index` class property which gives you access to the `index` class. + +The `_index` attribute is also home to the `load_mappings` +method which will update the mapping on the `Index` from elasticsearch. +This is very useful if you use dynamic mappings and want the class to be +aware of those fields (for example if you wish the `Date` fields to be +properly (de)serialized): + +[source,python] +---- +Post._index.load_mappings() +---- + +To delete a document just call its `delete` method: + +[source,python] +---- +first = Post.get(id=42) +first.delete() +---- + +====== Analysis + +To specify `analyzer` values for `Text` fields you can just use the name +of the analyzer (as a string) and either rely on the analyzer being +defined (like built-in analyzers) or define the analyzer yourself +manually. + +Alternatively you can create your own analyzer and have the persistence +layer handle its creation, from our example earlier: + +[source,python] +---- +from elasticsearch.dsl import analyzer, tokenizer + +my_analyzer = analyzer('my_analyzer', + tokenizer=tokenizer('trigram', 'nGram', min_gram=3, max_gram=3), + filter=['lowercase'] +) +---- + +Each analysis object needs to have a name (`my_analyzer` and +`trigram` in our example) and tokenizers, token filters and char filters +also need to specify type (`nGram` in our example). + +Once you have an instance of a custom `analyzer` you can also call the +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-analyze.html[analyze +API] on it by using the `simulate` method: + +[source,python] +---- +response = my_analyzer.simulate('Hello World!') + +# ['hel', 'ell', 'llo', 'lo ', 'o w', ' wo', 'wor', 'orl', 'rld', 'ld!'] +tokens = [t.token for t in response.tokens] +---- + +[NOTE] +==== +When creating a mapping which relies on a custom analyzer the index must +either not exist or be closed. To create multiple `Document`-defined +mappings you can use the `index` object. +==== + +====== Search + +To search for this document type, use the `search` class method: + +[source,python] +---- +# by calling .search we get back a standard Search object +s = Post.search() +# the search is already limited to the index and doc_type of our document +s = s.filter('term', published=True).query('match', title='first') + + +results = s.execute() + +# when you execute the search the results are wrapped in your document class (Post) +for post in results: + print(post.meta.score, post.title) +---- + +Alternatively you can just take a `Search` object and restrict it to +return our document type, wrapped in correct class: + +[source,python] +---- +s = Search() +s = s.doc_type(Post) +---- + +You can also combine document classes with standard doc types (just +strings), which will be treated as before. You can also pass in multiple +`Document` subclasses and each document in the response will be wrapped +in it's class. + +If you want to run suggestions, just use the `suggest` method on the +`Search` object: + +[source,python] +---- +s = Post.search() +s = s.suggest('title_suggestions', 'pyth', completion={'field': 'title_suggest'}) + +response = s.execute() + +for result in response.suggest.title_suggestions: + print('Suggestions for %s:' % result.text) + for option in result.options: + print(' %s (%r)' % (option.text, option.payload)) +---- + +====== `class Meta` options + +In the `Meta` class inside your document definition you can define +various metadata for your document: + +- `mapping`: + optional instance of `Mapping` class to use as base for the mappings + created from the fields on the document class itself. + +Any attributes on the `Meta` class that are instance of `MetaField` will +be used to control the mapping of the meta fields (`_all`, `dynamic` +etc). Just name the parameter (without the leading underscore) as the +field you wish to map and pass any parameters to the `MetaField` class: + +[source,python] +---- +class Post(Document): + title = Text() + + class Meta: + all = MetaField(enabled=False) + dynamic = MetaField('strict') +---- + +====== `class Index` options + +This section of the `Document` definition can contain any information +about the index, its name, settings and other attributes: + +- `name`: + name of the index to use, if it contains a wildcard (`*`) then it + cannot be used for any write operations and an `index` kwarg will have + to be passed explicitly when calling methods like `.save()`. +- `using`: + default connection alias to use, defaults to `'default'` +- `settings`: + dictionary containing any settings for the `Index` object like + `number_of_shards`. +- `analyzers`: + additional list of analyzers that should be defined on an index (see + `analysis` for details). +- `aliases`: + dictionary with any aliases definitions + +====== Document Inheritance + +You can use standard Python inheritance to extend models, this can be +useful in a few scenarios. For example if you want to have a +`BaseDocument` defining some common fields that several different +`Document` classes should share: + +[source,python] +---- +class User(InnerDoc): + username = Text(fields={'keyword': Keyword()}) + email = Text() + +class BaseDocument(Document): + created_by = Object(User) + created_date = Date() + last_updated = Date() + + def save(**kwargs): + if not self.created_date: + self.created_date = datetime.now() + self.last_updated = datetime.now() + return super(BaseDocument, self).save(**kwargs) + +class BlogPost(BaseDocument): + class Index: + name = 'blog' +---- + +Another use case would be using the +https://www.elastic.co/guide/en/elasticsearch/reference/current/parent-join.html[join +type] to have multiple different entities in a single index. You can see +an +https://github.com/elastic/elasticsearch-py/blob/master/examples/dsl/parent_child.py[example] +of this approach. Note that in this case, if the subclasses don't define +their own [.title-ref]#Index# classes, the mappings are merged and +shared between all the subclasses. + +===== Index + +In typical scenario using `class Index` on a `Document` class is +sufficient to perform any action. In a few cases though it can be useful +to manipulate an `Index` object directly. + +`Index` is a class responsible for holding all the metadata related to +an index in elasticsearch - mappings and settings. It is most useful +when defining your mappings since it allows for easy creation of +multiple mappings at the same time. This is especially useful when +setting up your elasticsearch objects in a migration: + +[source,python] +---- +from elasticsearch.dsl import Index, Document, Text, analyzer + +blogs = Index('blogs') + +# define custom settings +blogs.settings( + number_of_shards=1, + number_of_replicas=0 +) + +# define aliases +blogs.aliases( + old_blogs={} +) + +# register a document with the index +blogs.document(Post) + +# can also be used as class decorator when defining the Document +@blogs.document +class Post(Document): + title = Text() + +# You can attach custom analyzers to the index + +html_strip = analyzer('html_strip', + tokenizer="standard", + filter=["standard", "lowercase", "stop", "snowball"], + char_filter=["html_strip"] +) + +blogs.analyzer(html_strip) + +# delete the index, ignore if it doesn't exist +blogs.delete(ignore=404) + +# create the index in elasticsearch +blogs.create() +---- + +You can also set up a template for your indices and use the `clone` +method to create specific copies: + +[source,python] +---- +blogs = Index('blogs', using='production') +blogs.settings(number_of_shards=2) +blogs.document(Post) + +# create a copy of the index with different name +company_blogs = blogs.clone('company-blogs') + +# create a different copy on different cluster +dev_blogs = blogs.clone('blogs', using='dev') +# and change its settings +dev_blogs.setting(number_of_shards=1) +---- + +[[index-template]] +====== IndexTemplate + +The DSL module also exposes an option to manage +https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html[index +templates] in elasticsearch using the `IndexTemplate` class which has +very similar API to `Index`. + +Once an index template is saved in elasticsearch it's contents will be +automatically applied to new indices (existing indices are completely +unaffected by templates) that match the template pattern (any index +starting with `blogs-` in our example), even if the index is created +automatically upon indexing a document into that index. + +Potential workflow for a set of time based indices governed by a single +template: + +[source,python] +---- +from datetime import datetime + +from elasticsearch.dsl import Document, Date, Text + + +class Log(Document): + content = Text() + timestamp = Date() + + class Index: + name = "logs-*" + settings = { + "number_of_shards": 2 + } + + def save(self, **kwargs): + # assign now if no timestamp given + if not self.timestamp: + self.timestamp = datetime.now() + + # override the index to go to the proper timeslot + kwargs['index'] = self.timestamp.strftime('logs-%Y%m%d') + return super().save(**kwargs) + +# once, as part of application setup, during deploy/migrations: +logs = Log._index.as_template('logs', order=0) +logs.save() + +# to perform search across all logs: +search = Log.search() +---- + diff --git a/docs/guide/dsl/search_dsl.asciidoc b/docs/guide/dsl/search_dsl.asciidoc new file mode 100644 index 000000000..dfc3b256a --- /dev/null +++ b/docs/guide/dsl/search_dsl.asciidoc @@ -0,0 +1,704 @@ +[[search_dsl]] +==== Search DSL + +===== The `Search` object + +The `Search` object represents the entire search request: + +* queries +* filters +* aggregations +* k-nearest neighbor searches +* sort +* pagination +* highlighting +* suggestions +* collapsing +* additional parameters +* associated client + +The API is designed to be chainable. With the exception of the +aggregations functionality this means that the `Search` object is +immutable -all changes to the object will result in a shallow copy being +created which contains the changes. This means you can safely pass the +`Search` object to foreign code without fear of it modifying your +objects as long as it sticks to the `Search` object APIs. + +You can pass an instance of the +https://elasticsearch-py.readthedocs.io/[elasticsearch client] when +instantiating the `Search` object: + +[source,python] +---- +from elasticsearch import Elasticsearch +from elasticsearch.dsl import Search + +client = Elasticsearch() + +s = Search(using=client) +---- + +You can also define the client at a later time (for more options see the +`configuration` chapter): + +[source,python] +---- +s = s.using(client) +---- + +[NOTE] +==== +All methods return a _copy_ of the object, making it safe to pass to +outside code. +==== + +The API is chainable, allowing you to combine multiple method calls in +one statement: + +[source,python] +---- +s = Search().using(client).query("match", title="python") +---- + +To send the request to Elasticsearch: + +[source,python] +---- +response = s.execute() +---- + +If you just want to iterate over the hits returned by your search you +can iterate over the `Search` object: + +[source,python] +---- +for hit in s: + print(hit.title) +---- + +Search results will be cached. Subsequent calls to `execute` or trying +to iterate over an already executed `Search` object will not trigger +additional requests being sent to Elasticsearch. To force a request +specify `ignore_cache=True` when calling `execute`. + +For debugging purposes you can serialize the `Search` object to a `dict` +explicitly: + +[source,python] +---- +print(s.to_dict()) +---- + +====== Delete By Query + +You can delete the documents matching a search by calling `delete` on +the `Search` object instead of `execute` like this: + +[source,python] +---- +s = Search(index='i').query("match", title="python") +response = s.delete() +---- + +====== Queries + +The library provides classes for all Elasticsearch query types. Pass all +the parameters as keyword arguments. The classes accept any keyword +arguments, the dsl then takes all arguments passed to the constructor +and serializes them as top-level keys in the resulting dictionary (and +thus the resulting json being sent to elasticsearch). This means that +there is a clear one-to-one mapping between the raw query and its +equivalent in the DSL: + +[source,python] +---- +from elasticsearch.dsl.query import MultiMatch, Match + +# {"multi_match": {"query": "python django", "fields": ["title", "body"]}} +MultiMatch(query='python django', fields=['title', 'body']) + +# {"match": {"title": {"query": "web framework", "type": "phrase"}}} +Match(title={"query": "web framework", "type": "phrase"}) +---- + +[NOTE] +==== +In some cases this approach is not possible due to python's restriction +on identifiers - for example if your field is called `@timestamp`. In +that case you have to fall back to unpacking a dictionary: +`Range(*+ {'@timestamp': {'lt': 'now'}})` +==== + +You can use the `Q` shortcut to construct the instance using a name with +parameters or the raw `dict`: + +[source,python] +---- +from elasticsearch.dsl import Q + +Q("multi_match", query='python django', fields=['title', 'body']) +Q({"multi_match": {"query": "python django", "fields": ["title", "body"]}}) +---- + +To add the query to the `Search` object, use the `.query()` method: + +[source,python] +---- +q = Q("multi_match", query='python django', fields=['title', 'body']) +s = s.query(q) +---- + +The method also accepts all the parameters as the `Q` shortcut: + +[source,python] +---- +s = s.query("multi_match", query='python django', fields=['title', 'body']) +---- + +If you already have a query object, or a `dict` representing one, you +can just override the query used in the `Search` object: + +[source,python] +---- +s.query = Q('bool', must=[Q('match', title='python'), Q('match', body='best')]) +---- + +====== Dotted fields + +Sometimes you want to refer to a field within another field, either as a +multi-field (`title.keyword`) or in a structured `json` document like +`address.city`. To make it easier, the `Q` shortcut (as well as the +`query`, `filter`, and `exclude` methods on `Search` class) allows you +to use `_+` (double underscore) in place of a dot in a keyword +argument: + +[source,python] +---- +s = Search() +s = s.filter('term', category__keyword='Python') +s = s.query('match', address__city='prague') +---- + +Alternatively you can always fall back to python's kwarg unpacking if +you prefer: + +[source,python] +---- +s = Search() +s = s.filter('term', **{'category.keyword': 'Python'}) +s = s.query('match', **{'address.city': 'prague'}) +---- + +====== Query combination + +Query objects can be combined using logical operators: + +[source,python] +---- +Q("match", title='python') | Q("match", title='django') +# {"bool": {"should": [...]}} + +Q("match", title='python') & Q("match", title='django') +# {"bool": {"must": [...]}} + +~Q("match", title="python") +# {"bool": {"must_not": [...]}} +---- + +When you call the `.query()` method multiple times, the `&` operator +will be used internally: + +[source,python] +---- +s = s.query().query() +print(s.to_dict()) +# {"query": {"bool": {...}}} +---- + +If you want to have precise control over the query form, use the `Q` +shortcut to directly construct the combined query: + +[source,python] +---- +q = Q('bool', + must=[Q('match', title='python')], + should=[Q(...), Q(...)], + minimum_should_match=1 +) +s = Search().query(q) +---- + +====== Filters + +If you want to add a query in a +https://www.elastic.co/guide/en/elasticsearch/reference/2.0/query-filter-context.html[filter +context] you can use the `filter()` method to make things easier: + +[source,python] +---- +s = Search() +s = s.filter('terms', tags=['search', 'python']) +---- + +Behind the scenes this will produce a `Bool` query and place the +specified `terms` query into its `filter` branch, making it equivalent +to: + +[source,python] +---- +s = Search() +s = s.query('bool', filter=[Q('terms', tags=['search', 'python'])]) +---- + +If you want to use the post_filter element for faceted navigation, +use the `.post_filter()` method. + +You can also `exclude()` items from your query like this: + +[source,python] +---- +s = Search() +s = s.exclude('terms', tags=['search', 'python']) +---- + +which is shorthand for: +`s = s.query('bool', filter=[~Q('terms', tags=['search', 'python'])])` + +====== Aggregations + +To define an aggregation, you can use the `A` shortcut: + +[source,python] +---- +from elasticsearch.dsl import A + +A('terms', field='tags') +# {"terms": {"field": "tags"}} +---- + +To nest aggregations, you can use the `.bucket()`, `.metric()` and +`.pipeline()` methods: + +[source,python] +---- +a = A('terms', field='category') +# {'terms': {'field': 'category'}} + +a.metric('clicks_per_category', 'sum', field='clicks')\ + .bucket('tags_per_category', 'terms', field='tags') +# { +# 'terms': {'field': 'category'}, +# 'aggs': { +# 'clicks_per_category': {'sum': {'field': 'clicks'}}, +# 'tags_per_category': {'terms': {'field': 'tags'}} +# } +# } +---- + +To add aggregations to the `Search` object, use the `.aggs` property, +which acts as a top-level aggregation: + +[source,python] +---- +s = Search() +a = A('terms', field='category') +s.aggs.bucket('category_terms', a) +# { +# 'aggs': { +# 'category_terms': { +# 'terms': { +# 'field': 'category' +# } +# } +# } +# } +---- + +or + +[source,python] +---- +s = Search() +s.aggs.bucket('articles_per_day', 'date_histogram', field='publish_date', interval='day')\ + .metric('clicks_per_day', 'sum', field='clicks')\ + .pipeline('moving_click_average', 'moving_avg', buckets_path='clicks_per_day')\ + .bucket('tags_per_day', 'terms', field='tags') + +s.to_dict() +# { +# "aggs": { +# "articles_per_day": { +# "date_histogram": { "interval": "day", "field": "publish_date" }, +# "aggs": { +# "clicks_per_day": { "sum": { "field": "clicks" } }, +# "moving_click_average": { "moving_avg": { "buckets_path": "clicks_per_day" } }, +# "tags_per_day": { "terms": { "field": "tags" } } +# } +# } +# } +# } +---- + +You can access an existing bucket by its name: + +[source,python] +---- +s = Search() + +s.aggs.bucket('per_category', 'terms', field='category') +s.aggs['per_category'].metric('clicks_per_category', 'sum', field='clicks') +s.aggs['per_category'].bucket('tags_per_category', 'terms', field='tags') +---- + +[NOTE] +==== +When chaining multiple aggregations, there is a difference between what +`.bucket()` and `.metric()` methods return - `.bucket()` returns the +newly defined bucket while `.metric()` returns its parent bucket to +allow further chaining. +==== + +As opposed to other methods on the `Search` objects, defining +aggregations is done in-place (does not return a copy). + +====== K-Nearest Neighbor Searches + +To issue a kNN search, use the `.knn()` method: + +[source,python] +---- +s = Search() +vector = get_embedding("search text") + +s = s.knn( + field="embedding", + k=5, + num_candidates=10, + query_vector=vector +) +---- + +The `field`, `k` and `num_candidates` arguments can be given as +positional or keyword arguments and are required. In addition to these, +`query_vector` or `query_vector_builder` must be given as +well. + +The `.knn()` method can be invoked multiple times to include multiple +kNN searches in the request. + +====== Sorting + +To specify sorting order, use the `.sort()` method: + +[source,python] +---- +s = Search().sort( + 'category', + '-title', + {"lines" : {"order" : "asc", "mode" : "avg"}} +) +---- + +It accepts positional arguments which can be either strings or +dictionaries. String value is a field name, optionally prefixed by the +`-` sign to specify a descending order. + +To reset the sorting, just call the method with no arguments: + +[source,python] +---- +s = s.sort() +---- + +====== Pagination + +To specify the from/size parameters, use the Python slicing API: + +[source,python] +---- +s = s[10:20] +# {"from": 10, "size": 10} + +s = s[:20] +# {"size": 20} + +s = s[10:] +# {"from": 10} + +s = s[10:20][2:] +# {"from": 12, "size": 8} +---- + +If you want to access all the documents matched by your query you can +use the `scan` method which uses the scan/scroll elasticsearch API: + +[source,python] +---- +for hit in s.scan(): + print(hit.title) +---- + +Note that in this case the results won't be sorted. + +====== Highlighting + +To set common attributes for highlighting use the +`highlight_options` method: + +[source,python] +---- +s = s.highlight_options(order='score') +---- + +Enabling highlighting for individual fields is done using the +`highlight` method: + +[source,python] +---- +s = s.highlight('title') +# or, including parameters: +s = s.highlight('title', fragment_size=50) +---- + +The fragments in the response will then be available on each `Result` +object as `.meta.highlight.FIELD` which will contain the list of +fragments: + +[source,python] +---- +response = s.execute() +for hit in response: + for fragment in hit.meta.highlight.title: + print(fragment) +---- + +====== Suggestions + +To specify a suggest request on your `Search` object use the `suggest` +method: + +[source,python] +---- +# check for correct spelling +s = s.suggest('my_suggestion', 'pyhton', term={'field': 'title'}) +---- + +The first argument is the name of the suggestions (name under which it +will be returned), second is the actual text you wish the suggester to +work on and the keyword arguments will be added to the suggest's json +as-is which means that it should be one of `term`, `phrase` or +`completion` to indicate which type of suggester should be used. + +====== Collapsing + +To collapse search results use the `collapse` method on your `Search` +object: + +[source,python] +---- +s = Search().query("match", message="GET /search") +# collapse results by user_id +s = s.collapse("user_id") +---- + +The top hits will only include one result per `user_id`. You can +also expand each collapsed top hit with the `inner_hits` parameter, +`max_concurrent_group_searches` being the number of +concurrent requests allowed to retrieve the inner hits per group: + +[source,python] +---- +inner_hits = {"name": "recent_search", "size": 5, "sort": [{"@timestamp": "desc"}]} +s = s.collapse("user_id", inner_hits=inner_hits, max_concurrent_group_searches=4) +---- + +====== More Like This Query + +To use Elasticsearch's `more_like_this` functionality, you can use +the MoreLikeThis query type. + +A simple example is below + +[source,python] +---- +from elasticsearch.dsl.query import MoreLikeThis +from elasticsearch.dsl import Search + +my_text = 'I want to find something similar' + +s = Search() +# We're going to match based only on two fields, in this case text and title +s = s.query(MoreLikeThis(like=my_text, fields=['text', 'title'])) +# You can also exclude fields from the result to make the response quicker in the normal way +s = s.source(exclude=["text"]) +response = s.execute() + +for hit in response: + print(hit.title) +---- + +====== Extra properties and parameters + +To set extra properties of the search request, use the `.extra()` +method. This can be used to define keys in the body that cannot be +defined via a specific API method like `explain` or `search_after`: + +[source,python] +---- +s = s.extra(explain=True) +---- + +To set query parameters, use the `.params()` method: + +[source,python] +---- +s = s.params(routing="42") +---- + +If you need to limit the fields being returned by elasticsearch, use the +`source()` method: + +[source,python] +---- +# only return the selected fields +s = s.source(['title', 'body']) +# don't return any fields, just the metadata +s = s.source(False) +# explicitly include/exclude fields +s = s.source(includes=["title"], excludes=["user.*"]) +# reset the field selection +s = s.source(None) +---- + +====== Serialization and Deserialization + +The search object can be serialized into a dictionary by using the +`.to_dict()` method. + +You can also create a `Search` object from a `dict` using the +`from_dict` class method. This will create a new `Search` object and +populate it using the data from the dict: + +[source,python] +---- +s = Search.from_dict({"query": {"match": {"title": "python"}}}) +---- + +If you wish to modify an existing `Search` object, overriding it's +properties, instead use the `update_from_dict` method that +alters an instance *in-place*: + +[source,python] +---- +s = Search(index='i') +s.update_from_dict({"query": {"match": {"title": "python"}}, "size": 42}) +---- + +===== Response + +You can execute your search by calling the `.execute()` method that will +return a `Response` object. The `Response` object allows you access to +any key from the response dictionary via attribute access. It also +provides some convenient helpers: + +[source,python] +---- +response = s.execute() + +print(response.success()) +# True + +print(response.took) +# 12 + +print(response.hits.total.relation) +# eq +print(response.hits.total.value) +# 142 + +print(response.suggest.my_suggestions) +---- + +If you want to inspect the contents of the `response` objects, just use +its `to_dict` method to get access to the raw data for pretty +printing. + +====== Hits + +To access to the hits returned by the search, access the `hits` property +or just iterate over the `Response` object: + +[source,python] +---- +response = s.execute() +print('Total %d hits found.' % response.hits.total) +for h in response: + print(h.title, h.body) +---- + +[NOTE] +==== +If you are only seeing partial results (e.g. 10000 or even 10 results), +consider using the option `s.extra(track_total_hits=True)` to +get a full hit count. +==== + +====== Result + +The individual hits is wrapped in a convenience class that allows +attribute access to the keys in the returned dictionary. All the +metadata for the results are accessible via `meta` (without the leading +`_`): + +[source,python] +---- +response = s.execute() +h = response.hits[0] +print('/%s/%s/%s returned with score %f' % ( + h.meta.index, h.meta.doc_type, h.meta.id, h.meta.score)) +---- + +[NOTE] +==== +If your document has a field called `meta` you have to access it using +the get item syntax: `hit['meta']`. +==== + +====== Aggregations + +Aggregations are available through the `aggregations` property: + +[source,python] +---- +for tag in response.aggregations.per_tag.buckets: + print(tag.key, tag.max_lines.value) +---- + +===== `MultiSearch` + +If you need to execute multiple searches at the same time you can use +the `MultiSearch` class which will use the `_msearch` API: + +[source,python] +---- +from elasticsearch.dsl import MultiSearch, Search + +ms = MultiSearch(index='blogs') + +ms = ms.add(Search().filter('term', tags='python')) +ms = ms.add(Search().filter('term', tags='elasticsearch')) + +responses = ms.execute() + +for response in responses: + print("Results for query %r." % response._search.query) + for hit in response: + print(hit.title) +---- + +===== `EmptySearch` + +The `EmptySearch` class can be used as a fully compatible version of +`Search` that will return no results, regardless of any queries +configured. + diff --git a/docs/guide/dsl/tutorials.asciidoc b/docs/guide/dsl/tutorials.asciidoc new file mode 100644 index 000000000..1b5ff0e2c --- /dev/null +++ b/docs/guide/dsl/tutorials.asciidoc @@ -0,0 +1,251 @@ +=== Tutorials + +==== Search + +Let's have a typical search request written directly as a `dict`: + +[source,python] +---- +from elasticsearch import Elasticsearch +client = Elasticsearch("https://localhost:9200") + +response = client.search( + index="my-index", + body={ + "query": { + "bool": { + "must": [{"match": {"title": "python"}}], + "must_not": [{"match": {"description": "beta"}}], + "filter": [{"term": {"category": "search"}}] + } + }, + "aggs" : { + "per_tag": { + "terms": {"field": "tags"}, + "aggs": { + "max_lines": {"max": {"field": "lines"}} + } + } + } + } +) + +for hit in response['hits']['hits']: + print(hit['_score'], hit['_source']['title']) + +for tag in response['aggregations']['per_tag']['buckets']: + print(tag['key'], tag['max_lines']['value']) +---- + +The problem with this approach is that it is very verbose, prone to +syntax mistakes like incorrect nesting, hard to modify (eg. adding +another filter) and definitely not fun to write. + +Let's rewrite the example using the DSL module: + +[source,python] +---- +from elasticsearch import Elasticsearch +from elasticsearch.dsl import Search + +client = Elasticsearch("https://localhost:9200") + +s = Search(using=client, index="my-index") \ + .filter("term", category="search") \ + .query("match", title="python") \ + .exclude("match", description="beta") + +s.aggs.bucket('per_tag', 'terms', field='tags') \ + .metric('max_lines', 'max', field='lines') + +response = s.execute() + +for hit in response: + print(hit.meta.score, hit.title) + +for tag in response.aggregations.per_tag.buckets: + print(tag.key, tag.max_lines.value) +---- + +As you see, the library took care of: + +* creating appropriate `Query` objects by name (eq. "match") +* composing queries into a compound `bool` query +* putting the `term` query in a filter context of the `bool` query +* providing a convenient access to response data +* no curly or square brackets everywhere + +==== Persistence + +Let's have a simple Python class representing an article in a blogging +system: + +[source,python] +---- +from datetime import datetime +from elasticsearch.dsl import Document, Date, Integer, Keyword, Text, connections + +# Define a default Elasticsearch client +connections.create_connection(hosts="https://localhost:9200") + +class Article(Document): + title = Text(analyzer='snowball', fields={'raw': Keyword()}) + body = Text(analyzer='snowball') + tags = Keyword() + published_from = Date() + lines = Integer() + + class Index: + name = 'blog' + settings = { + "number_of_shards": 2, + } + + def save(self, ** kwargs): + self.lines = len(self.body.split()) + return super(Article, self).save(** kwargs) + + def is_published(self): + return datetime.now() > self.published_from + +# create the mappings in elasticsearch +Article.init() + +# create and save and article +article = Article(meta={'id': 42}, title='Hello world!', tags=['test']) +article.body = ''' looong text ''' +article.published_from = datetime.now() +article.save() + +article = Article.get(id=42) +print(article.is_published()) + +# Display cluster health +print(connections.get_connection().cluster.health()) +---- + +In this example you can see: + +* providing a default connection +* defining fields with mapping configuration +* setting index name +* defining custom methods +* overriding the built-in `.save()` method to hook into the persistence +life cycle +* retrieving and saving the object into Elasticsearch +* accessing the underlying client for other APIs + +You can see more in the `persistence` chapter. + +==== Pre-built Faceted Search + +If you have your `Document`s defined you can very easily create a +faceted search class to simplify searching and filtering. + +[NOTE] +==== +This feature is experimental and may be subject to change. +==== + +[source,python] +---- +from elasticsearch.dsl import FacetedSearch, TermsFacet, DateHistogramFacet + +class BlogSearch(FacetedSearch): + doc_types = [Article, ] + # fields that should be searched + fields = ['tags', 'title', 'body'] + + facets = { + # use bucket aggregations to define facets + 'tags': TermsFacet(field='tags'), + 'publishing_frequency': DateHistogramFacet(field='published_from', interval='month') + } + +# empty search +bs = BlogSearch() +response = bs.execute() + +for hit in response: + print(hit.meta.score, hit.title) + +for (tag, count, selected) in response.facets.tags: + print(tag, ' (SELECTED):' if selected else ':', count) + +for (month, count, selected) in response.facets.publishing_frequency: + print(month.strftime('%B %Y'), ' (SELECTED):' if selected else ':', count) +---- + +You can find more details in the `faceted_search` chapter. + +==== Update By Query + +Let's resume the simple example of articles on a blog, and let's assume +that each article has a number of likes. For this example, imagine we +want to increment the number of likes by 1 for all articles that match a +certain tag and do not match a certain description. Writing this as a +`dict`, we would have the following code: + +[source,python] +---- +from elasticsearch import Elasticsearch +client = Elasticsearch() + +response = client.update_by_query( + index="my-index", + body={ + "query": { + "bool": { + "must": [{"match": {"tag": "python"}}], + "must_not": [{"match": {"description": "beta"}}] + } + }, + "script"={ + "source": "ctx._source.likes++", + "lang": "painless" + } + }, + ) +---- + +Using the DSL, we can now express this query as such: + +[source,python] +---- +from elasticsearch import Elasticsearch +from elasticsearch.dsl import Search, UpdateByQuery + +client = Elasticsearch() +ubq = UpdateByQuery(using=client, index="my-index") \ + .query("match", title="python") \ + .exclude("match", description="beta") \ + .script(source="ctx._source.likes++", lang="painless") + +response = ubq.execute() +---- + +As you can see, the `Update By Query` object provides many of the +savings offered by the `Search` object, and additionally allows one to +update the results of the search based on a script assigned in the same +manner. + +==== Migration from the standard client + +You don't have to port your entire application to get the benefits of +the DSL module, you can start gradually by creating a `Search` object +from your existing `dict`, modifying it using the API and serializing it +back to a `dict`: + +[source,python] +---- +body = {...} # insert complicated query here + +# Convert to Search object +s = Search.from_dict(body) + +# Add some filters, aggregations, queries, ... +s.filter("term", tags="python") + +# Convert back to dict to plug back into existing code +body = s.to_dict() +---- diff --git a/docs/guide/dsl/update_by_query.asciidoc b/docs/guide/dsl/update_by_query.asciidoc new file mode 100644 index 000000000..b4a550a77 --- /dev/null +++ b/docs/guide/dsl/update_by_query.asciidoc @@ -0,0 +1,168 @@ +[[update_by_query]] +==== Update By Query + +===== The `Update By Query` object + +The `Update By Query` object enables the use of the +https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html[_update_by_query] +endpoint to perform an update on documents that match a search query. + +The object is implemented as a modification of the `Search` object, +containing a subset of its query methods, as well as a script method, +which is used to make updates. + +The `Update By Query` object implements the following `Search` query +types: + +* queries +* filters +* excludes + +For more information on queries, see the `search_dsl` chapter. + +Like the `Search` object, the API is designed to be chainable. This +means that the `Update By Query` object is immutable: all changes to the +object will result in a shallow copy being created which contains the +changes. This means you can safely pass the `Update By Query` object to +foreign code without fear of it modifying your objects as long as it +sticks to the `Update By Query` object APIs. + +You can define your client in a number of ways, but the preferred method +is to use a global configuration. For more information on defining a +client, see the `configuration` chapter. + +Once your client is defined, you can instantiate a copy of the +`Update By Query` object as seen below: + +[source,python] +---- +from elasticsearch.dsl import UpdateByQuery + +ubq = UpdateByQuery().using(client) +# or +ubq = UpdateByQuery(using=client) +---- + +[NOTE] +==== +All methods return a _copy_ of the object, making it safe to pass to +outside code. +==== + +The API is chainable, allowing you to combine multiple method calls in +one statement: + +[source,python] +---- +ubq = UpdateByQuery().using(client).query("match", title="python") +---- + +To send the request to Elasticsearch: + +[source,python] +---- +response = ubq.execute() +---- + +It should be noted, that there are limits to the chaining using the +script method: calling script multiple times will overwrite the previous +value. That is, only a single script can be sent with a call. An attempt +to use two scripts will result in only the second script being stored. + +Given the below example: + +[source,python] +---- +ubq = UpdateByQuery() \ + .using(client) \ + .script(source="ctx._source.likes++") \ + .script(source="ctx._source.likes+=2") +---- + +This means that the stored script by this client will be +`'source': 'ctx._source.likes{plus}=2'` and the previous call will +not be stored. + +For debugging purposes you can serialize the `Update By Query` object to +a `dict` explicitly: + +[source,python] +---- +print(ubq.to_dict()) +---- + +Also, to use variables in script see below example: + +[source,python] +---- +ubq.script( + source="ctx._source.messages.removeIf(x -> x.somefield == params.some_var)", + params={ + 'some_var': 'some_string_val' + } +) +---- + +====== Serialization and Deserialization + +The search object can be serialized into a dictionary by using the +`.to_dict()` method. + +You can also create a `Update By Query` object from a `dict` using the +`from_dict` class method. This will create a new `Update By Query` +object and populate it using the data from the dict: + +[source,python] +---- +ubq = UpdateByQuery.from_dict({"query": {"match": {"title": "python"}}}) +---- + +If you wish to modify an existing `Update By Query` object, overriding +it's properties, instead use the `update_from_dict` method that +alters an instance *in-place*: + +[source,python] +---- +ubq = UpdateByQuery(index='i') +ubq.update_from_dict({"query": {"match": {"title": "python"}}, "size": 42}) +---- + +====== Extra properties and parameters + +To set extra properties of the search request, use the `.extra()` +method. This can be used to define keys in the body that cannot be +defined via a specific API method like `explain`: + +[source,python] +---- +ubq = ubq.extra(explain=True) +---- + +To set query parameters, use the `.params()` method: + +[source,python] +---- +ubq = ubq.params(routing="42") +---- + +===== Response + +You can execute your search by calling the `.execute()` method that will +return a `Response` object. The `Response` object allows you access to +any key from the response dictionary via attribute access. It also +provides some convenient helpers: + +[source,python] +---- +response = ubq.execute() + +print(response.success()) +# True + +print(response.took) +# 12 +---- + +If you want to inspect the contents of the `response` objects, just use +its `to_dict` method to get access to the raw data for pretty +printing. diff --git a/docs/guide/elasticsearch-dsl.asciidoc b/docs/guide/elasticsearch-dsl.asciidoc new file mode 100644 index 000000000..bd3fb5d19 --- /dev/null +++ b/docs/guide/elasticsearch-dsl.asciidoc @@ -0,0 +1,48 @@ +[[elasticsearch-dsl]] +== Elasticsearch Python DSL + +Elasticsearch DSL is a module of the official Python client that aims to help +with writing and running queries against Elasticsearch in a more convenient and +idiomatic way. It stays close to the Elasticsearch JSON DSL, mirroring its +terminology and structure. It exposes the whole range of the DSL from +Python either directly using defined classes or a queryset-like +expressions. Here is an example: + +[source,python] +.... +from elasticsearch.dsl import Search + +s = Search(index="my-index") \ + .filter("term", category="search") \ + .query("match", title="python") \ + .exclude("match", description="beta") +for hit in s: + print(hit.title) +.... + +Or with asynchronous Python: + +[source,python] +.... +from elasticsearch.dsl import AsyncSearch + +async def run_query(): + s = AsyncSearch(index="my-index") \ + .filter("term", category="search") \ + .query("match", title="python") \ + .exclude("match", description="beta") + async for hit in s: + print(hit.title) +.... + +It also provides an optional wrapper for working with documents as +Python objects: defining mappings, retrieving and saving documents, +wrapping the document data in user-defined classes. + +To use the other Elasticsearch APIs (eg. cluster health) just use the +regular client. + +include::dsl/configuration.asciidoc[] +include::dsl/tutorials.asciidoc[] +include::dsl/howto.asciidoc[] +include::dsl/examples.asciidoc[] diff --git a/docs/guide/index-custom-title-page.html b/docs/guide/index-custom-title-page.html index bd1d84081..b7fd0f405 100644 --- a/docs/guide/index-custom-title-page.html +++ b/docs/guide/index-custom-title-page.html @@ -135,6 +135,9 @@

  • Integrations
  • +
  • + Elasticsearch Python DSL +
  • Client helpers
  • @@ -186,4 +189,4 @@

    -

    View all Elastic docs

    \ No newline at end of file +

    View all Elastic docs

    diff --git a/docs/guide/index.asciidoc b/docs/guide/index.asciidoc index d2ae4ab63..5607a9f24 100644 --- a/docs/guide/index.asciidoc +++ b/docs/guide/index.asciidoc @@ -22,6 +22,8 @@ include::integrations.asciidoc[] include::examples.asciidoc[] +include::elasticsearch-dsl.asciidoc[] + include::helpers.asciidoc[] -include::release-notes.asciidoc[] \ No newline at end of file +include::release-notes.asciidoc[] diff --git a/docs/guide/overview.asciidoc b/docs/guide/overview.asciidoc index af7581eb0..2b331e08c 100644 --- a/docs/guide/overview.asciidoc +++ b/docs/guide/overview.asciidoc @@ -82,20 +82,8 @@ some of the more engaging tasks like bulk indexing and reindexing. [discrete] -=== Elasticsearch DSL - -For a more high level client library with more limited scope, have a look at -https://elasticsearch-dsl.readthedocs.org/[elasticsearch-dsl] - a more Pythonic library -sitting on top of `elasticsearch-py`. - -It provides a more convenient and idiomatic way to write and manipulate -https://elasticsearch-dsl.readthedocs.org/en/latest/search_dsl.html[queries]. It -stays close to the Elasticsearch JSON DSL, mirroring its terminology and -structure while exposing the whole range of the DSL from Python either directly -using defined classes or a queryset-like expressions. - -It also provides an optional -https://elasticsearch-dsl.readthedocs.org/en/latest/persistence.html#doctype[persistence -layer] for working with documents as Python objects in an ORM-like fashion: -defining mappings, retrieving and saving documents, wrapping the document data -in user-defined classes. +=== Elasticsearch Python DSL + +For a higher level access with more limited scope, have a look at the DSL module, +which provides a more convenient and idiomatic way to write and manipulate +queries. From 1be0de9fe0a9467ba553d9a26a588985813a8f7b Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 12 Feb 2025 19:41:53 +0400 Subject: [PATCH 30/65] [Backport 8.x] Switch to Black 2025 and isort 6 (#2787) * Switch to Black 2025 and isort 6 (#2779) * Switch to Black 2025 code style * Run format with isort 6 (cherry picked from commit 68f80128b61a8162b58e3082e35564e7aa7b341a) * Run nox -s format --------- Co-authored-by: Quentin Pradet --- elasticsearch/dsl/_sync/search.py | 10 ++- elasticsearch/dsl/response/__init__.py | 68 ++++++++++++++----- elasticsearch/dsl/search_base.py | 6 +- elasticsearch/dsl/types.py | 8 +-- .../helpers/vectorstore/_sync/vectorstore.py | 5 +- examples/dsl/search_as_you_type.py | 7 +- noxfile.py | 8 ++- .../test_dsl/_sync/test_index.py | 9 ++- .../test_dsl/_sync/test_search.py | 10 ++- .../test_integration/_sync/test_search.py | 10 ++- test_elasticsearch/test_dsl/test_result.py | 4 +- 11 files changed, 110 insertions(+), 35 deletions(-) diff --git a/elasticsearch/dsl/_sync/search.py b/elasticsearch/dsl/_sync/search.py index f46364a67..ae826a12f 100644 --- a/elasticsearch/dsl/_sync/search.py +++ b/elasticsearch/dsl/_sync/search.py @@ -16,7 +16,15 @@ # under the License. import contextlib -from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, cast +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Iterator, + List, + Optional, + cast, +) from typing_extensions import Self diff --git a/elasticsearch/dsl/response/__init__.py b/elasticsearch/dsl/response/__init__.py index f6f3d551d..2ae863fff 100644 --- a/elasticsearch/dsl/response/__init__.py +++ b/elasticsearch/dsl/response/__init__.py @@ -53,9 +53,19 @@ class Response(AttrDict[Any], Generic[_R]): """An Elasticsearch search response. - :arg took: (required) - :arg timed_out: (required) - :arg _shards: (required) + :arg took: (required) The number of milliseconds it took Elasticsearch + to run the request. This value is calculated by measuring the time + elapsed between receipt of a request on the coordinating node and + the time at which the coordinating node is ready to send the + response. It includes: * Communication time between the + coordinating node and data nodes * Time the request spends in the + search thread pool, queued for execution * Actual run time It + does not include: * Time needed to send the request to + Elasticsearch * Time needed to serialize the JSON response * Time + needed to send the response to a client + :arg timed_out: (required) If `true`, the request timed out before + completion; returned results may be partial or empty. + :arg _shards: (required) A count of shards used for the request. :arg hits: search results :arg aggregations: aggregation results :arg _clusters: @@ -64,7 +74,11 @@ class Response(AttrDict[Any], Generic[_R]): :arg num_reduce_phases: :arg profile: :arg pit_id: - :arg _scroll_id: + :arg _scroll_id: The identifier for the search and its search context. + You can use this scroll ID with the scroll API to retrieve the + next batch of search results for the request. This property is + returned only if the `scroll` query parameter is specified in the + request. :arg suggest: :arg terminated_early: """ @@ -303,22 +317,42 @@ def __iter__(self) -> Iterator[AggregateResponseType]: # type: ignore[override] class UpdateByQueryResponse(AttrDict[Any], Generic[_R]): """An Elasticsearch update by query response. - :arg batches: - :arg failures: - :arg noops: - :arg deleted: - :arg requests_per_second: - :arg retries: + :arg batches: The number of scroll responses pulled back by the update + by query. + :arg failures: Array of failures if there were any unrecoverable + errors during the process. If this is non-empty then the request + ended because of those failures. Update by query is implemented + using batches. Any failure causes the entire process to end, but + all failures in the current batch are collected into the array. + You can use the `conflicts` option to prevent reindex from ending + when version conflicts occur. + :arg noops: The number of documents that were ignored because the + script used for the update by query returned a noop value for + `ctx.op`. + :arg deleted: The number of documents that were successfully deleted. + :arg requests_per_second: The number of requests per second + effectively run during the update by query. + :arg retries: The number of retries attempted by update by query. + `bulk` is the number of bulk actions retried. `search` is the + number of search actions retried. :arg task: - :arg timed_out: - :arg took: - :arg total: - :arg updated: - :arg version_conflicts: + :arg timed_out: If true, some requests timed out during the update by + query. + :arg took: The number of milliseconds from start to end of the whole + operation. + :arg total: The number of documents that were successfully processed. + :arg updated: The number of documents that were successfully updated. + :arg version_conflicts: The number of version conflicts that the + update by query hit. :arg throttled: - :arg throttled_millis: + :arg throttled_millis: The number of milliseconds the request slept to + conform to `requests_per_second`. :arg throttled_until: - :arg throttled_until_millis: + :arg throttled_until_millis: This field should always be equal to zero + in an _update_by_query response. It only has meaning when using + the task API, where it indicates the next time (in milliseconds + since epoch) a throttled request will be run again in order to + conform to `requests_per_second`. """ _search: "UpdateByQueryBase[_R]" diff --git a/elasticsearch/dsl/search_base.py b/elasticsearch/dsl/search_base.py index ad4a56059..c513fc78d 100644 --- a/elasticsearch/dsl/search_base.py +++ b/elasticsearch/dsl/search_base.py @@ -698,12 +698,12 @@ def ensure_strings(fields: "InstrumentedField") -> str: ... @overload def ensure_strings( - fields: List[Union[str, "InstrumentedField"]] + fields: List[Union[str, "InstrumentedField"]], ) -> List[str]: ... @overload def ensure_strings( - fields: Dict[str, List[Union[str, "InstrumentedField"]]] + fields: Dict[str, List[Union[str, "InstrumentedField"]]], ) -> Dict[str, List[str]]: ... def ensure_strings( @@ -712,7 +712,7 @@ def ensure_strings( "InstrumentedField", List[Union[str, "InstrumentedField"]], Dict[str, List[Union[str, "InstrumentedField"]]], - ] + ], ) -> Union[str, List[str], Dict[str, List[str]]]: if isinstance(fields, dict): return {k: ensure_strings(v) for k, v in fields.items()} diff --git a/elasticsearch/dsl/types.py b/elasticsearch/dsl/types.py index ce639c4ed..80332ce32 100644 --- a/elasticsearch/dsl/types.py +++ b/elasticsearch/dsl/types.py @@ -406,9 +406,9 @@ class FieldAndFormat(AttrDict[Any]): A reference to a field with formatting instructions on how to return the value - :arg field: (required) Wildcard pattern. The request returns values + :arg field: (required) A wildcard pattern. The request returns values for field names matching this pattern. - :arg format: Format in which the values are returned. + :arg format: The format in which the values are returned. :arg include_unmapped: """ @@ -5630,8 +5630,8 @@ class RateAggregate(AttrDict[Any]): class Retries(AttrDict[Any]): """ - :arg bulk: (required) - :arg search: (required) + :arg bulk: (required) The number of bulk actions retried. + :arg search: (required) The number of search actions retried. """ bulk: int diff --git a/elasticsearch/helpers/vectorstore/_sync/vectorstore.py b/elasticsearch/helpers/vectorstore/_sync/vectorstore.py index 3c4a0d51a..6a6a5ee2a 100644 --- a/elasticsearch/helpers/vectorstore/_sync/vectorstore.py +++ b/elasticsearch/helpers/vectorstore/_sync/vectorstore.py @@ -22,7 +22,10 @@ from elasticsearch import Elasticsearch from elasticsearch._version import __versionstr__ as lib_version from elasticsearch.helpers import BulkIndexError, bulk -from elasticsearch.helpers.vectorstore import EmbeddingService, RetrievalStrategy +from elasticsearch.helpers.vectorstore import ( + EmbeddingService, + RetrievalStrategy, +) from elasticsearch.helpers.vectorstore._utils import maximal_marginal_relevance logger = logging.getLogger(__name__) diff --git a/examples/dsl/search_as_you_type.py b/examples/dsl/search_as_you_type.py index c1ebc99a4..a70de8ccb 100644 --- a/examples/dsl/search_as_you_type.py +++ b/examples/dsl/search_as_you_type.py @@ -28,7 +28,12 @@ import os from typing import TYPE_CHECKING, Optional -from elasticsearch.dsl import Document, SearchAsYouType, connections, mapped_field +from elasticsearch.dsl import ( + Document, + SearchAsYouType, + connections, + mapped_field, +) from elasticsearch.dsl.query import MultiMatch diff --git a/noxfile.py b/noxfile.py index 90172d49e..c9e961900 100644 --- a/noxfile.py +++ b/noxfile.py @@ -66,7 +66,9 @@ def test_otel(session): @nox.session() def format(session): - session.install(".", "black~=24.0", "isort", "flynt", "unasync>=0.6.0", "jinja2") + session.install( + ".", "black~=25.0", "isort~=6.0", "flynt", "unasync>=0.6.0", "jinja2" + ) session.run("python", "utils/run-unasync.py") session.run("python", "utils/run-unasync-dsl.py") @@ -88,9 +90,9 @@ def lint(session): session.install( "flake8", - "black~=24.0", + "black~=25.0", "mypy", - "isort", + "isort~=6.0", "types-requests", "types-python-dateutil", "unasync>=0.6.0", diff --git a/test_elasticsearch/test_dsl/_sync/test_index.py b/test_elasticsearch/test_dsl/_sync/test_index.py index c6d1b7904..327efa047 100644 --- a/test_elasticsearch/test_dsl/_sync/test_index.py +++ b/test_elasticsearch/test_dsl/_sync/test_index.py @@ -22,7 +22,14 @@ import pytest from pytest import raises -from elasticsearch.dsl import Date, Document, Index, IndexTemplate, Text, analyzer +from elasticsearch.dsl import ( + Date, + Document, + Index, + IndexTemplate, + Text, + analyzer, +) class Post(Document): diff --git a/test_elasticsearch/test_dsl/_sync/test_search.py b/test_elasticsearch/test_dsl/_sync/test_search.py index 04b0ad53e..1fa7da1c7 100644 --- a/test_elasticsearch/test_dsl/_sync/test_search.py +++ b/test_elasticsearch/test_dsl/_sync/test_search.py @@ -21,7 +21,15 @@ import pytest from pytest import raises -from elasticsearch.dsl import Document, EmptySearch, Q, Search, query, types, wrappers +from elasticsearch.dsl import ( + Document, + EmptySearch, + Q, + Search, + query, + types, + wrappers, +) from elasticsearch.dsl.exceptions import IllegalOperation diff --git a/test_elasticsearch/test_dsl/test_integration/_sync/test_search.py b/test_elasticsearch/test_dsl/test_integration/_sync/test_search.py index 54060d311..41dd720a3 100644 --- a/test_elasticsearch/test_dsl/test_integration/_sync/test_search.py +++ b/test_elasticsearch/test_dsl/test_integration/_sync/test_search.py @@ -20,7 +20,15 @@ from pytest import raises from elasticsearch import ApiError, Elasticsearch -from elasticsearch.dsl import Date, Document, Keyword, MultiSearch, Q, Search, Text +from elasticsearch.dsl import ( + Date, + Document, + Keyword, + MultiSearch, + Q, + Search, + Text, +) from elasticsearch.dsl.response import aggs from ..test_data import FLAT_DATA diff --git a/test_elasticsearch/test_dsl/test_result.py b/test_elasticsearch/test_dsl/test_result.py index 46707c715..2acd810d5 100644 --- a/test_elasticsearch/test_dsl/test_result.py +++ b/test_elasticsearch/test_dsl/test_result.py @@ -130,7 +130,7 @@ def test_iterating_over_response_gives_you_hits(dummy_response: Dict[str, Any]) def test_hits_get_wrapped_to_contain_additional_attrs( - dummy_response: Dict[str, Any] + dummy_response: Dict[str, Any], ) -> None: res = response.Response(Search(), dummy_response) hits = res.hits @@ -140,7 +140,7 @@ def test_hits_get_wrapped_to_contain_additional_attrs( def test_hits_provide_dot_and_bracket_access_to_attrs( - dummy_response: Dict[str, Any] + dummy_response: Dict[str, Any], ) -> None: res = response.Response(Search(), dummy_response) h = res.hits[0] From 03be83473f3943daa0062d1e8edf16259f6f1dac Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 12 Feb 2025 21:02:10 +0400 Subject: [PATCH 31/65] Simplify lint and format nox sessions (#2790) (#2791) (cherry picked from commit 4d5060ba2626d8495892b556a4f0b3992063ba73) Co-authored-by: Quentin Pradet --- noxfile.py | 33 ++------------------------------- 1 file changed, 2 insertions(+), 31 deletions(-) diff --git a/noxfile.py b/noxfile.py index c9e961900..66ee963e6 100644 --- a/noxfile.py +++ b/noxfile.py @@ -78,8 +78,6 @@ def format(session): session.run("black", *SOURCE_FILES) session.run("python", "utils/license-headers.py", "fix", *SOURCE_FILES) - lint(session) - @nox.session() def lint(session): @@ -106,8 +104,7 @@ def lint(session): session.install(".[async,requests,orjson,pyarrow,vectorstore_mmr]", env=INSTALL_ENV) - # Run mypy on the package and then the type examples separately for - # the two different mypy use-cases, ourselves and our users. + # Run mypy on the package, the type examples and the DSL examples session.run( "mypy", "--strict", @@ -116,28 +113,7 @@ def lint(session): "--show-error-codes", "--enable-error-code=ignore-without-code", "elasticsearch/", - ) - session.run( - "mypy", - "--strict", - "--show-error-codes", - "test_elasticsearch/test_types/sync_types.py", - ) - session.run( - "mypy", - "--strict", - "--show-error-codes", - "test_elasticsearch/test_types/async_types.py", - ) - - # check typing on the DSL examples - session.run( - "mypy", - "--strict", - "--implicit-reexport", - "--explicit-package-bases", - "--show-error-codes", - "--enable-error-code=ignore-without-code", + "test_elasticsearch/test_types/", "examples/dsl/", ) @@ -151,11 +127,6 @@ def lint(session): "--explicit-package-bases", "--show-error-codes", "elasticsearch/", - ) - session.run( - "mypy", - "--strict", - "--show-error-codes", "test_elasticsearch/test_types/sync_types.py", ) From 25e651969b13028c7ba074b8dc7c7d9c3427c0ba Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 14 Feb 2025 10:41:44 +0000 Subject: [PATCH 32/65] reduce the number of black and isort calls (#2794) (#2795) (cherry picked from commit c8454414e3e4b8ba4da0a69998b7c55804412f2a) Co-authored-by: Miguel Grinberg --- utils/run-unasync-dsl.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/utils/run-unasync-dsl.py b/utils/run-unasync-dsl.py index d089f0e3e..59c0b05bc 100644 --- a/utils/run-unasync-dsl.py +++ b/utils/run-unasync-dsl.py @@ -98,10 +98,12 @@ def main(check=False): filepaths.append(os.path.join(root, filename)) unasync.unasync_files(filepaths, rules) + output_dirs = [] for dir in source_dirs: - output_dir = f"{dir[0]}_sync_check/" if check else dir[1] - subprocess.check_call(["black", "--target-version=py38", output_dir]) - subprocess.check_call(["isort", output_dir]) + output_dirs.append(f"{dir[0]}_sync_check/" if check else dir[1]) + subprocess.check_call(["black", "--target-version=py38", *output_dirs]) + subprocess.check_call(["isort", *output_dirs]) + for dir, output_dir in zip(source_dirs, output_dirs): for file in glob("*.py", root_dir=dir[0]): # remove asyncio from sync files subprocess.check_call( From 6883a78f0941994d137d8eb9b7d57da69fe2fcff Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 14 Feb 2025 14:42:13 +0000 Subject: [PATCH 33/65] autogenerate field classes from schema (#2780) (#2799) * autogenerate field classes from schema * generated code * special case the dynamic field to also be a bool (cherry picked from commit 6e2363569c83da2ef2f107dffb9e619da8105bb5) Co-authored-by: Miguel Grinberg --- elasticsearch/dsl/field.py | 3978 ++++++++++++++++++++++++++++++++-- elasticsearch/dsl/types.py | 135 ++ utils/dsl-generator.py | 158 +- utils/templates/field.py.tpl | 463 ++++ 4 files changed, 4548 insertions(+), 186 deletions(-) create mode 100644 utils/templates/field.py.tpl diff --git a/elasticsearch/dsl/field.py b/elasticsearch/dsl/field.py index 8f9dd2ff7..50f30b405 100644 --- a/elasticsearch/dsl/field.py +++ b/elasticsearch/dsl/field.py @@ -26,7 +26,10 @@ Dict, Iterable, Iterator, + Literal, + Mapping, Optional, + Sequence, Tuple, Type, Union, @@ -34,6 +37,7 @@ ) from dateutil import parser, tz +from elastic_transport.client_utils import DEFAULT, DefaultType from .exceptions import ValidationException from .query import Q @@ -46,7 +50,9 @@ from _operator import _SupportsComparison + from . import types from .document import InnerDoc + from .document_base import InstrumentedField from .mapping_base import MappingBase from .query import Query @@ -170,34 +176,314 @@ def to_dict(self) -> Dict[str, Any]: return d +class RangeField(Field): + _coerce = True + _core_field: Optional[Field] = None + + def _deserialize(self, data: Any) -> Range["_SupportsComparison"]: + if isinstance(data, Range): + return data + data = {k: self._core_field.deserialize(v) for k, v in data.items()} # type: ignore[union-attr] + return Range(data) + + def _serialize(self, data: Any) -> Optional[Dict[str, Any]]: + if data is None: + return None + if not isinstance(data, collections.abc.Mapping): + data = data.to_dict() + return {k: self._core_field.serialize(v) for k, v in data.items()} # type: ignore[union-attr] + + +class Float(Field): + """ + :arg null_value: + :arg boost: + :arg coerce: + :arg ignore_malformed: + :arg index: + :arg on_script_error: + :arg script: + :arg time_series_metric: For internal use by Elastic only. Marks the + field as a time series dimension. Defaults to false. + :arg time_series_dimension: For internal use by Elastic only. Marks + the field as a time series dimension. Defaults to false. + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "float" + _coerce = True + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + null_value: Union[float, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + coerce: Union[bool, "DefaultType"] = DEFAULT, + ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + time_series_metric: Union[ + Literal["gauge", "counter", "summary", "histogram", "position"], + "DefaultType", + ] = DEFAULT, + time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if null_value is not DEFAULT: + kwargs["null_value"] = null_value + if boost is not DEFAULT: + kwargs["boost"] = boost + if coerce is not DEFAULT: + kwargs["coerce"] = coerce + if ignore_malformed is not DEFAULT: + kwargs["ignore_malformed"] = ignore_malformed + if index is not DEFAULT: + kwargs["index"] = index + if on_script_error is not DEFAULT: + kwargs["on_script_error"] = on_script_error + if script is not DEFAULT: + kwargs["script"] = script + if time_series_metric is not DEFAULT: + kwargs["time_series_metric"] = time_series_metric + if time_series_dimension is not DEFAULT: + kwargs["time_series_dimension"] = time_series_dimension + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) + + def _deserialize(self, data: Any) -> float: + return float(data) + + +class Integer(Field): + """ + :arg null_value: + :arg boost: + :arg coerce: + :arg ignore_malformed: + :arg index: + :arg on_script_error: + :arg script: + :arg time_series_metric: For internal use by Elastic only. Marks the + field as a time series dimension. Defaults to false. + :arg time_series_dimension: For internal use by Elastic only. Marks + the field as a time series dimension. Defaults to false. + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "integer" + _coerce = True + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + null_value: Union[int, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + coerce: Union[bool, "DefaultType"] = DEFAULT, + ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + time_series_metric: Union[ + Literal["gauge", "counter", "summary", "histogram", "position"], + "DefaultType", + ] = DEFAULT, + time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if null_value is not DEFAULT: + kwargs["null_value"] = null_value + if boost is not DEFAULT: + kwargs["boost"] = boost + if coerce is not DEFAULT: + kwargs["coerce"] = coerce + if ignore_malformed is not DEFAULT: + kwargs["ignore_malformed"] = ignore_malformed + if index is not DEFAULT: + kwargs["index"] = index + if on_script_error is not DEFAULT: + kwargs["on_script_error"] = on_script_error + if script is not DEFAULT: + kwargs["script"] = script + if time_series_metric is not DEFAULT: + kwargs["time_series_metric"] = time_series_metric + if time_series_dimension is not DEFAULT: + kwargs["time_series_dimension"] = time_series_dimension + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) + + def _deserialize(self, data: Any) -> int: + return int(data) + + class Object(Field): + """ + :arg doc_class: base doc class that handles mapping. + If no `doc_class` is provided, new instance of `InnerDoc` will be created, + populated with `properties` and used. Can not be provided together with `properties` + :arg enabled: + :arg subobjects: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + name = "object" _coerce = True + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } def __init__( self, - doc_class: Optional[Type["InnerDoc"]] = None, - dynamic: Optional[Union[bool, str]] = None, - properties: Optional[Dict[str, Any]] = None, + doc_class: Union[Type["InnerDoc"], "DefaultType"] = DEFAULT, + *args: Any, + enabled: Union[bool, "DefaultType"] = DEFAULT, + subobjects: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, **kwargs: Any, ): - """ - :arg document.InnerDoc doc_class: base doc class that handles mapping. - If no `doc_class` is provided, new instance of `InnerDoc` will be created, - populated with `properties` and used. Can not be provided together with `properties` - :arg dynamic: whether new properties may be created dynamically. - Valid values are `True`, `False`, `'strict'`. - Can not be provided together with `doc_class`. - See https://www.elastic.co/guide/en/elasticsearch/reference/current/dynamic.html - for more details - :arg dict properties: used to construct underlying mapping if no `doc_class` is provided. - Can not be provided together with `doc_class` - """ - if doc_class and (properties or dynamic is not None): + if enabled is not DEFAULT: + kwargs["enabled"] = enabled + if subobjects is not DEFAULT: + kwargs["subobjects"] = subobjects + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + + if doc_class is not DEFAULT and ( + properties is not DEFAULT or dynamic is not DEFAULT + ): raise ValidationException( "doc_class and properties/dynamic should not be provided together" ) - if doc_class: + if doc_class is not DEFAULT: self._doc_class: Type["InnerDoc"] = doc_class else: # FIXME import @@ -205,9 +491,13 @@ def __init__( # no InnerDoc subclass, creating one instead... self._doc_class = type("InnerDoc", (InnerDoc,), {}) - for name, field in (properties or {}).items(): + for name, field in ( + properties if properties is not DEFAULT else {} + ).items(): self._doc_class._doc_type.mapping.field(name, field) - if dynamic is not None: + if "properties" in kwargs: + del kwargs["properties"] + if dynamic is not DEFAULT: self._doc_class._doc_type.mapping.meta("dynamic", dynamic) self._mapping: "MappingBase" = deepcopy(self._doc_class._doc_type.mapping) @@ -279,29 +569,630 @@ def update(self, other: Any, update_only: bool = False) -> None: self._mapping.update(other._mapping, update_only) -class Nested(Object): - name = "nested" +class AggregateMetricDouble(Field): + """ + :arg default_metric: (required) + :arg metrics: (required) + :arg time_series_metric: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "aggregate_metric_double" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } - def __init__(self, *args: Any, **kwargs: Any): - kwargs.setdefault("multi", True) + def __init__( + self, + *args: Any, + default_metric: Union[str, "DefaultType"] = DEFAULT, + metrics: Union[Sequence[str], "DefaultType"] = DEFAULT, + time_series_metric: Union[ + Literal["gauge", "counter", "summary", "histogram", "position"], + "DefaultType", + ] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if default_metric is not DEFAULT: + kwargs["default_metric"] = default_metric + if metrics is not DEFAULT: + kwargs["metrics"] = metrics + if time_series_metric is not DEFAULT: + kwargs["time_series_metric"] = time_series_metric + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) + + +class Alias(Field): + """ + :arg path: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "alias" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + path: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if path is not DEFAULT: + kwargs["path"] = str(path) + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) + + +class Binary(Field): + """ + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "binary" + _coerce = True + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) + + def clean(self, data: str) -> str: + # Binary fields are opaque, so there's not much cleaning + # that can be done. + return data + + def _deserialize(self, data: Any) -> bytes: + return base64.b64decode(data) + + def _serialize(self, data: Any) -> Optional[str]: + if data is None: + return None + return base64.b64encode(data).decode() + + +class Boolean(Field): + """ + :arg boost: + :arg fielddata: + :arg index: + :arg null_value: + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "boolean" + _coerce = True + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + boost: Union[float, "DefaultType"] = DEFAULT, + fielddata: Union[ + "types.NumericFielddata", Dict[str, Any], "DefaultType" + ] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + null_value: Union[bool, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if boost is not DEFAULT: + kwargs["boost"] = boost + if fielddata is not DEFAULT: + kwargs["fielddata"] = fielddata + if index is not DEFAULT: + kwargs["index"] = index + if null_value is not DEFAULT: + kwargs["null_value"] = null_value + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) + + def _deserialize(self, data: Any) -> bool: + if data == "false": + return False + return bool(data) + + def clean(self, data: Any) -> Optional[bool]: + if data is not None: + data = self.deserialize(data) + if data is None and self._required: + raise ValidationException("Value required for this field.") + return data # type: ignore[no-any-return] + + +class Byte(Integer): + """ + :arg null_value: + :arg boost: + :arg coerce: + :arg ignore_malformed: + :arg index: + :arg on_script_error: + :arg script: + :arg time_series_metric: For internal use by Elastic only. Marks the + field as a time series dimension. Defaults to false. + :arg time_series_dimension: For internal use by Elastic only. Marks + the field as a time series dimension. Defaults to false. + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "byte" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + null_value: Union[float, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + coerce: Union[bool, "DefaultType"] = DEFAULT, + ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + time_series_metric: Union[ + Literal["gauge", "counter", "summary", "histogram", "position"], + "DefaultType", + ] = DEFAULT, + time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if null_value is not DEFAULT: + kwargs["null_value"] = null_value + if boost is not DEFAULT: + kwargs["boost"] = boost + if coerce is not DEFAULT: + kwargs["coerce"] = coerce + if ignore_malformed is not DEFAULT: + kwargs["ignore_malformed"] = ignore_malformed + if index is not DEFAULT: + kwargs["index"] = index + if on_script_error is not DEFAULT: + kwargs["on_script_error"] = on_script_error + if script is not DEFAULT: + kwargs["script"] = script + if time_series_metric is not DEFAULT: + kwargs["time_series_metric"] = time_series_metric + if time_series_dimension is not DEFAULT: + kwargs["time_series_dimension"] = time_series_dimension + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) + + +class Completion(Field): + """ + :arg analyzer: + :arg contexts: + :arg max_input_length: + :arg preserve_position_increments: + :arg preserve_separators: + :arg search_analyzer: + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "completion" + _param_defs = { + "analyzer": {"type": "analyzer"}, + "search_analyzer": {"type": "analyzer"}, + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT, + contexts: Union[ + Sequence["types.SuggestContext"], Sequence[Dict[str, Any]], "DefaultType" + ] = DEFAULT, + max_input_length: Union[int, "DefaultType"] = DEFAULT, + preserve_position_increments: Union[bool, "DefaultType"] = DEFAULT, + preserve_separators: Union[bool, "DefaultType"] = DEFAULT, + search_analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if contexts is not DEFAULT: + kwargs["contexts"] = contexts + if max_input_length is not DEFAULT: + kwargs["max_input_length"] = max_input_length + if preserve_position_increments is not DEFAULT: + kwargs["preserve_position_increments"] = preserve_position_increments + if preserve_separators is not DEFAULT: + kwargs["preserve_separators"] = preserve_separators + if search_analyzer is not DEFAULT: + kwargs["search_analyzer"] = search_analyzer + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) + + +class ConstantKeyword(Field): + """ + :arg value: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "constant_keyword" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + value: Any = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if value is not DEFAULT: + kwargs["value"] = value + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class Date(Field): + """ + :arg default_timezone: timezone that will be automatically used for tz-naive values + May be instance of `datetime.tzinfo` or string containing TZ offset + :arg boost: + :arg fielddata: + :arg format: + :arg ignore_malformed: + :arg index: + :arg null_value: + :arg precision_step: + :arg locale: + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + name = "date" _coerce = True + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } def __init__( self, - default_timezone: Optional[Union[str, "tzinfo"]] = None, + default_timezone: Union[str, "tzinfo", "DefaultType"] = DEFAULT, *args: Any, + boost: Union[float, "DefaultType"] = DEFAULT, + fielddata: Union[ + "types.NumericFielddata", Dict[str, Any], "DefaultType" + ] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + null_value: Any = DEFAULT, + precision_step: Union[int, "DefaultType"] = DEFAULT, + locale: Union[str, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, **kwargs: Any, ): - """ - :arg default_timezone: timezone that will be automatically used for tz-naive values - May be instance of `datetime.tzinfo` or string containing TZ offset - """ - if isinstance(default_timezone, str): + if boost is not DEFAULT: + kwargs["boost"] = boost + if fielddata is not DEFAULT: + kwargs["fielddata"] = fielddata + if format is not DEFAULT: + kwargs["format"] = format + if ignore_malformed is not DEFAULT: + kwargs["ignore_malformed"] = ignore_malformed + if index is not DEFAULT: + kwargs["index"] = index + if null_value is not DEFAULT: + kwargs["null_value"] = null_value + if precision_step is not DEFAULT: + kwargs["precision_step"] = precision_step + if locale is not DEFAULT: + kwargs["locale"] = locale + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + + if default_timezone is DEFAULT: + self._default_timezone = None + elif isinstance(default_timezone, str): self._default_timezone = tz.gettz(default_timezone) else: self._default_timezone = default_timezone @@ -332,72 +1223,241 @@ def _deserialize(self, data: Any) -> Union[datetime, date]: raise ValidationException(f"Could not parse date from the value ({data!r})") -class Text(Field): +class DateNanos(Field): + """ + :arg boost: + :arg format: + :arg ignore_malformed: + :arg index: + :arg null_value: + :arg precision_step: + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "date_nanos" _param_defs = { + "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, - "analyzer": {"type": "analyzer"}, - "search_analyzer": {"type": "analyzer"}, - "search_quote_analyzer": {"type": "analyzer"}, } - name = "text" + def __init__( + self, + *args: Any, + boost: Union[float, "DefaultType"] = DEFAULT, + format: Union[str, "DefaultType"] = DEFAULT, + ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + null_value: Any = DEFAULT, + precision_step: Union[int, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if boost is not DEFAULT: + kwargs["boost"] = boost + if format is not DEFAULT: + kwargs["format"] = format + if ignore_malformed is not DEFAULT: + kwargs["ignore_malformed"] = ignore_malformed + if index is not DEFAULT: + kwargs["index"] = index + if null_value is not DEFAULT: + kwargs["null_value"] = null_value + if precision_step is not DEFAULT: + kwargs["precision_step"] = precision_step + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) -class SearchAsYouType(Field): - _param_defs = { - "analyzer": {"type": "analyzer"}, - "search_analyzer": {"type": "analyzer"}, - "search_quote_analyzer": {"type": "analyzer"}, - } - name = "search_as_you_type" +class DateRange(RangeField): + """ + :arg format: + :arg boost: + :arg coerce: + :arg index: + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ -class Keyword(Field): + name = "date_range" + _core_field = Date() _param_defs = { + "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, - "search_analyzer": {"type": "analyzer"}, - "normalizer": {"type": "normalizer"}, } - name = "keyword" + def __init__( + self, + *args: Any, + format: Union[str, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + coerce: Union[bool, "DefaultType"] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if format is not DEFAULT: + kwargs["format"] = format + if boost is not DEFAULT: + kwargs["boost"] = boost + if coerce is not DEFAULT: + kwargs["coerce"] = coerce + if index is not DEFAULT: + kwargs["index"] = index + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) -class ConstantKeyword(Keyword): - name = "constant_keyword" +class DenseVector(Field): + """ + :arg element_type: + :arg dims: + :arg similarity: + :arg index: + :arg index_options: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ -class Boolean(Field): - name = "boolean" + name = "dense_vector" _coerce = True + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } - def _deserialize(self, data: Any) -> bool: - if data == "false": - return False - return bool(data) - - def clean(self, data: Any) -> Optional[bool]: - if data is not None: - data = self.deserialize(data) - if data is None and self._required: - raise ValidationException("Value required for this field.") - return data # type: ignore[no-any-return] - - -class Float(Field): - name = "float" - _coerce = True - - def _deserialize(self, data: Any) -> float: - return float(data) - - -class DenseVector(Field): - name = "dense_vector" - _coerce = True - - def __init__(self, **kwargs: Any): - self._element_type = kwargs.get("element_type", "float") - if self._element_type in ["float", "byte"]: - kwargs["multi"] = True - super().__init__(**kwargs) + def __init__( + self, + *args: Any, + element_type: Union[str, "DefaultType"] = DEFAULT, + dims: Union[int, "DefaultType"] = DEFAULT, + similarity: Union[str, "DefaultType"] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + index_options: Union[ + "types.DenseVectorIndexOptions", Dict[str, Any], "DefaultType" + ] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if element_type is not DEFAULT: + kwargs["element_type"] = element_type + if dims is not DEFAULT: + kwargs["dims"] = dims + if similarity is not DEFAULT: + kwargs["similarity"] = similarity + if index is not DEFAULT: + kwargs["index"] = index + if index_options is not DEFAULT: + kwargs["index_options"] = index_options + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + self._element_type = kwargs.get("element_type", "float") + if self._element_type in ["float", "byte"]: + kwargs["multi"] = True + super().__init__(*args, **kwargs) def _deserialize(self, data: Any) -> Any: if self._element_type == "float": @@ -407,181 +1467,2733 @@ def _deserialize(self, data: Any) -> Any: return data -class SparseVector(Field): - name = "sparse_vector" +class Double(Float): + """ + :arg null_value: + :arg boost: + :arg coerce: + :arg ignore_malformed: + :arg index: + :arg on_script_error: + :arg script: + :arg time_series_metric: For internal use by Elastic only. Marks the + field as a time series dimension. Defaults to false. + :arg time_series_dimension: For internal use by Elastic only. Marks + the field as a time series dimension. Defaults to false. + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + name = "double" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } -class HalfFloat(Float): - name = "half_float" + def __init__( + self, + *args: Any, + null_value: Union[float, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + coerce: Union[bool, "DefaultType"] = DEFAULT, + ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + time_series_metric: Union[ + Literal["gauge", "counter", "summary", "histogram", "position"], + "DefaultType", + ] = DEFAULT, + time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if null_value is not DEFAULT: + kwargs["null_value"] = null_value + if boost is not DEFAULT: + kwargs["boost"] = boost + if coerce is not DEFAULT: + kwargs["coerce"] = coerce + if ignore_malformed is not DEFAULT: + kwargs["ignore_malformed"] = ignore_malformed + if index is not DEFAULT: + kwargs["index"] = index + if on_script_error is not DEFAULT: + kwargs["on_script_error"] = on_script_error + if script is not DEFAULT: + kwargs["script"] = script + if time_series_metric is not DEFAULT: + kwargs["time_series_metric"] = time_series_metric + if time_series_dimension is not DEFAULT: + kwargs["time_series_dimension"] = time_series_dimension + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) -class ScaledFloat(Float): - name = "scaled_float" +class DoubleRange(RangeField): + """ + :arg boost: + :arg coerce: + :arg index: + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ - def __init__(self, scaling_factor: int, *args: Any, **kwargs: Any): - super().__init__(scaling_factor=scaling_factor, *args, **kwargs) + name = "double_range" + _core_field = Double() + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + def __init__( + self, + *args: Any, + boost: Union[float, "DefaultType"] = DEFAULT, + coerce: Union[bool, "DefaultType"] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if boost is not DEFAULT: + kwargs["boost"] = boost + if coerce is not DEFAULT: + kwargs["coerce"] = coerce + if index is not DEFAULT: + kwargs["index"] = index + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) -class Double(Float): - name = "double" +class Flattened(Field): + """ + :arg boost: + :arg depth_limit: + :arg doc_values: + :arg eager_global_ordinals: + :arg index: + :arg index_options: + :arg null_value: + :arg similarity: + :arg split_queries_on_whitespace: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "flattened" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } -class RankFeature(Float): - name = "rank_feature" + def __init__( + self, + *args: Any, + boost: Union[float, "DefaultType"] = DEFAULT, + depth_limit: Union[int, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + eager_global_ordinals: Union[bool, "DefaultType"] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + index_options: Union[ + Literal["docs", "freqs", "positions", "offsets"], "DefaultType" + ] = DEFAULT, + null_value: Union[str, "DefaultType"] = DEFAULT, + similarity: Union[str, "DefaultType"] = DEFAULT, + split_queries_on_whitespace: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if boost is not DEFAULT: + kwargs["boost"] = boost + if depth_limit is not DEFAULT: + kwargs["depth_limit"] = depth_limit + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if eager_global_ordinals is not DEFAULT: + kwargs["eager_global_ordinals"] = eager_global_ordinals + if index is not DEFAULT: + kwargs["index"] = index + if index_options is not DEFAULT: + kwargs["index_options"] = index_options + if null_value is not DEFAULT: + kwargs["null_value"] = null_value + if similarity is not DEFAULT: + kwargs["similarity"] = similarity + if split_queries_on_whitespace is not DEFAULT: + kwargs["split_queries_on_whitespace"] = split_queries_on_whitespace + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) -class RankFeatures(Field): - name = "rank_features" +class FloatRange(RangeField): + """ + :arg boost: + :arg coerce: + :arg index: + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + name = "float_range" + _core_field = Float() + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } -class Integer(Field): - name = "integer" - _coerce = True + def __init__( + self, + *args: Any, + boost: Union[float, "DefaultType"] = DEFAULT, + coerce: Union[bool, "DefaultType"] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if boost is not DEFAULT: + kwargs["boost"] = boost + if coerce is not DEFAULT: + kwargs["coerce"] = coerce + if index is not DEFAULT: + kwargs["index"] = index + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) - def _deserialize(self, data: Any) -> int: - return int(data) +class GeoPoint(Field): + """ + :arg ignore_malformed: + :arg ignore_z_value: + :arg null_value: + :arg index: + :arg on_script_error: + :arg script: + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ -class Byte(Integer): - name = "byte" + name = "geo_point" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + def __init__( + self, + *args: Any, + ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, + ignore_z_value: Union[bool, "DefaultType"] = DEFAULT, + null_value: Union[ + "types.LatLonGeoLocation", + "types.GeoHashLocation", + Sequence[float], + str, + Dict[str, Any], + "DefaultType", + ] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if ignore_malformed is not DEFAULT: + kwargs["ignore_malformed"] = ignore_malformed + if ignore_z_value is not DEFAULT: + kwargs["ignore_z_value"] = ignore_z_value + if null_value is not DEFAULT: + kwargs["null_value"] = null_value + if index is not DEFAULT: + kwargs["index"] = index + if on_script_error is not DEFAULT: + kwargs["on_script_error"] = on_script_error + if script is not DEFAULT: + kwargs["script"] = script + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) -class Short(Integer): - name = "short" +class GeoShape(Field): + """ + The `geo_shape` data type facilitates the indexing of and searching + with arbitrary geo shapes such as rectangles and polygons. + + :arg coerce: + :arg ignore_malformed: + :arg ignore_z_value: + :arg orientation: + :arg strategy: + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ -class Long(Integer): - name = "long" + name = "geo_shape" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + def __init__( + self, + *args: Any, + coerce: Union[bool, "DefaultType"] = DEFAULT, + ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, + ignore_z_value: Union[bool, "DefaultType"] = DEFAULT, + orientation: Union[Literal["right", "left"], "DefaultType"] = DEFAULT, + strategy: Union[Literal["recursive", "term"], "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if coerce is not DEFAULT: + kwargs["coerce"] = coerce + if ignore_malformed is not DEFAULT: + kwargs["ignore_malformed"] = ignore_malformed + if ignore_z_value is not DEFAULT: + kwargs["ignore_z_value"] = ignore_z_value + if orientation is not DEFAULT: + kwargs["orientation"] = orientation + if strategy is not DEFAULT: + kwargs["strategy"] = strategy + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) -class Ip(Field): - name = "ip" - _coerce = True - def _deserialize(self, data: Any) -> Union["IPv4Address", "IPv6Address"]: - # the ipaddress library for pypy only accepts unicode. - return ipaddress.ip_address(unicode(data)) +class HalfFloat(Float): + """ + :arg null_value: + :arg boost: + :arg coerce: + :arg ignore_malformed: + :arg index: + :arg on_script_error: + :arg script: + :arg time_series_metric: For internal use by Elastic only. Marks the + field as a time series dimension. Defaults to false. + :arg time_series_dimension: For internal use by Elastic only. Marks + the field as a time series dimension. Defaults to false. + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ - def _serialize(self, data: Any) -> Optional[str]: - if data is None: - return None - return str(data) + name = "half_float" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + def __init__( + self, + *args: Any, + null_value: Union[float, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + coerce: Union[bool, "DefaultType"] = DEFAULT, + ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + time_series_metric: Union[ + Literal["gauge", "counter", "summary", "histogram", "position"], + "DefaultType", + ] = DEFAULT, + time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if null_value is not DEFAULT: + kwargs["null_value"] = null_value + if boost is not DEFAULT: + kwargs["boost"] = boost + if coerce is not DEFAULT: + kwargs["coerce"] = coerce + if ignore_malformed is not DEFAULT: + kwargs["ignore_malformed"] = ignore_malformed + if index is not DEFAULT: + kwargs["index"] = index + if on_script_error is not DEFAULT: + kwargs["on_script_error"] = on_script_error + if script is not DEFAULT: + kwargs["script"] = script + if time_series_metric is not DEFAULT: + kwargs["time_series_metric"] = time_series_metric + if time_series_dimension is not DEFAULT: + kwargs["time_series_dimension"] = time_series_dimension + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) -class Binary(Field): - name = "binary" - _coerce = True - def clean(self, data: str) -> str: - # Binary fields are opaque, so there's not much cleaning - # that can be done. - return data +class Histogram(Field): + """ + :arg ignore_malformed: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ - def _deserialize(self, data: Any) -> bytes: - return base64.b64decode(data) + name = "histogram" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } - def _serialize(self, data: Any) -> Optional[str]: - if data is None: - return None - return base64.b64encode(data).decode() + def __init__( + self, + *args: Any, + ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if ignore_malformed is not DEFAULT: + kwargs["ignore_malformed"] = ignore_malformed + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) -class Point(Field): - name = "point" +class IcuCollationKeyword(Field): + """ + :arg norms: + :arg index_options: + :arg index: Should the field be searchable? + :arg null_value: Accepts a string value which is substituted for any + explicit null values. Defaults to null, which means the field is + treated as missing. + :arg rules: + :arg language: + :arg country: + :arg variant: + :arg strength: + :arg decomposition: + :arg alternate: + :arg case_level: + :arg case_first: + :arg numeric: + :arg variable_top: + :arg hiragana_quaternary_mode: + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "icu_collation_keyword" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + def __init__( + self, + *args: Any, + norms: Union[bool, "DefaultType"] = DEFAULT, + index_options: Union[ + Literal["docs", "freqs", "positions", "offsets"], "DefaultType" + ] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + null_value: Union[str, "DefaultType"] = DEFAULT, + rules: Union[str, "DefaultType"] = DEFAULT, + language: Union[str, "DefaultType"] = DEFAULT, + country: Union[str, "DefaultType"] = DEFAULT, + variant: Union[str, "DefaultType"] = DEFAULT, + strength: Union[ + Literal["primary", "secondary", "tertiary", "quaternary", "identical"], + "DefaultType", + ] = DEFAULT, + decomposition: Union[Literal["no", "identical"], "DefaultType"] = DEFAULT, + alternate: Union[Literal["shifted", "non-ignorable"], "DefaultType"] = DEFAULT, + case_level: Union[bool, "DefaultType"] = DEFAULT, + case_first: Union[Literal["lower", "upper"], "DefaultType"] = DEFAULT, + numeric: Union[bool, "DefaultType"] = DEFAULT, + variable_top: Union[str, "DefaultType"] = DEFAULT, + hiragana_quaternary_mode: Union[bool, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if norms is not DEFAULT: + kwargs["norms"] = norms + if index_options is not DEFAULT: + kwargs["index_options"] = index_options + if index is not DEFAULT: + kwargs["index"] = index + if null_value is not DEFAULT: + kwargs["null_value"] = null_value + if rules is not DEFAULT: + kwargs["rules"] = rules + if language is not DEFAULT: + kwargs["language"] = language + if country is not DEFAULT: + kwargs["country"] = country + if variant is not DEFAULT: + kwargs["variant"] = variant + if strength is not DEFAULT: + kwargs["strength"] = strength + if decomposition is not DEFAULT: + kwargs["decomposition"] = decomposition + if alternate is not DEFAULT: + kwargs["alternate"] = alternate + if case_level is not DEFAULT: + kwargs["case_level"] = case_level + if case_first is not DEFAULT: + kwargs["case_first"] = case_first + if numeric is not DEFAULT: + kwargs["numeric"] = numeric + if variable_top is not DEFAULT: + kwargs["variable_top"] = variable_top + if hiragana_quaternary_mode is not DEFAULT: + kwargs["hiragana_quaternary_mode"] = hiragana_quaternary_mode + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) -class Shape(Field): - name = "shape" +class IntegerRange(RangeField): + """ + :arg boost: + :arg coerce: + :arg index: + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ -class GeoPoint(Field): - name = "geo_point" + name = "integer_range" + _core_field = Integer() + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + def __init__( + self, + *args: Any, + boost: Union[float, "DefaultType"] = DEFAULT, + coerce: Union[bool, "DefaultType"] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if boost is not DEFAULT: + kwargs["boost"] = boost + if coerce is not DEFAULT: + kwargs["coerce"] = coerce + if index is not DEFAULT: + kwargs["index"] = index + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) -class GeoShape(Field): - name = "geo_shape" +class Ip(Field): + """ + :arg boost: + :arg index: + :arg ignore_malformed: + :arg null_value: + :arg on_script_error: + :arg script: + :arg time_series_dimension: For internal use by Elastic only. Marks + the field as a time series dimension. Defaults to false. + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ -class Completion(Field): + name = "ip" + _coerce = True _param_defs = { - "analyzer": {"type": "analyzer"}, - "search_analyzer": {"type": "analyzer"}, + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, } - name = "completion" + def __init__( + self, + *args: Any, + boost: Union[float, "DefaultType"] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, + null_value: Union[str, "DefaultType"] = DEFAULT, + on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if boost is not DEFAULT: + kwargs["boost"] = boost + if index is not DEFAULT: + kwargs["index"] = index + if ignore_malformed is not DEFAULT: + kwargs["ignore_malformed"] = ignore_malformed + if null_value is not DEFAULT: + kwargs["null_value"] = null_value + if on_script_error is not DEFAULT: + kwargs["on_script_error"] = on_script_error + if script is not DEFAULT: + kwargs["script"] = script + if time_series_dimension is not DEFAULT: + kwargs["time_series_dimension"] = time_series_dimension + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) -class Percolator(Field): - name = "percolator" - _coerce = True - - def _deserialize(self, data: Any) -> "Query": - return Q(data) # type: ignore[no-any-return] + def _deserialize(self, data: Any) -> Union["IPv4Address", "IPv6Address"]: + # the ipaddress library for pypy only accepts unicode. + return ipaddress.ip_address(unicode(data)) - def _serialize(self, data: Any) -> Optional[Dict[str, Any]]: + def _serialize(self, data: Any) -> Optional[str]: if data is None: return None - return data.to_dict() # type: ignore[no-any-return] + return str(data) -class RangeField(Field): - _coerce = True - _core_field: Optional[Field] = None +class IpRange(Field): + """ + :arg boost: + :arg coerce: + :arg index: + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ - def _deserialize(self, data: Any) -> Range["_SupportsComparison"]: - if isinstance(data, Range): - return data - data = {k: self._core_field.deserialize(v) for k, v in data.items()} # type: ignore[union-attr] - return Range(data) + name = "ip_range" + _core_field = Ip() + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } - def _serialize(self, data: Any) -> Optional[Dict[str, Any]]: - if data is None: - return None - if not isinstance(data, collections.abc.Mapping): - data = data.to_dict() - return {k: self._core_field.serialize(v) for k, v in data.items()} # type: ignore[union-attr] + def __init__( + self, + *args: Any, + boost: Union[float, "DefaultType"] = DEFAULT, + coerce: Union[bool, "DefaultType"] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if boost is not DEFAULT: + kwargs["boost"] = boost + if coerce is not DEFAULT: + kwargs["coerce"] = coerce + if index is not DEFAULT: + kwargs["index"] = index + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) -class IntegerRange(RangeField): - name = "integer_range" - _core_field = Integer() +class Join(Field): + """ + :arg relations: + :arg eager_global_ordinals: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + name = "join" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } -class FloatRange(RangeField): - name = "float_range" - _core_field = Float() + def __init__( + self, + *args: Any, + relations: Union[ + Mapping[str, Union[str, Sequence[str]]], "DefaultType" + ] = DEFAULT, + eager_global_ordinals: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if relations is not DEFAULT: + kwargs["relations"] = relations + if eager_global_ordinals is not DEFAULT: + kwargs["eager_global_ordinals"] = eager_global_ordinals + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) -class LongRange(RangeField): - name = "long_range" - _core_field = Long() +class Keyword(Field): + """ + :arg boost: + :arg eager_global_ordinals: + :arg index: + :arg index_options: + :arg script: + :arg on_script_error: + :arg normalizer: + :arg norms: + :arg null_value: + :arg similarity: + :arg split_queries_on_whitespace: + :arg time_series_dimension: For internal use by Elastic only. Marks + the field as a time series dimension. Defaults to false. + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + name = "keyword" + _param_defs = { + "normalizer": {"type": "normalizer"}, + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } -class DoubleRange(RangeField): - name = "double_range" - _core_field = Double() + def __init__( + self, + *args: Any, + boost: Union[float, "DefaultType"] = DEFAULT, + eager_global_ordinals: Union[bool, "DefaultType"] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + index_options: Union[ + Literal["docs", "freqs", "positions", "offsets"], "DefaultType" + ] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, + normalizer: Union[str, DslBase, "DefaultType"] = DEFAULT, + norms: Union[bool, "DefaultType"] = DEFAULT, + null_value: Union[str, "DefaultType"] = DEFAULT, + similarity: Union[str, None, "DefaultType"] = DEFAULT, + split_queries_on_whitespace: Union[bool, "DefaultType"] = DEFAULT, + time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if boost is not DEFAULT: + kwargs["boost"] = boost + if eager_global_ordinals is not DEFAULT: + kwargs["eager_global_ordinals"] = eager_global_ordinals + if index is not DEFAULT: + kwargs["index"] = index + if index_options is not DEFAULT: + kwargs["index_options"] = index_options + if script is not DEFAULT: + kwargs["script"] = script + if on_script_error is not DEFAULT: + kwargs["on_script_error"] = on_script_error + if normalizer is not DEFAULT: + kwargs["normalizer"] = normalizer + if norms is not DEFAULT: + kwargs["norms"] = norms + if null_value is not DEFAULT: + kwargs["null_value"] = null_value + if similarity is not DEFAULT: + kwargs["similarity"] = similarity + if split_queries_on_whitespace is not DEFAULT: + kwargs["split_queries_on_whitespace"] = split_queries_on_whitespace + if time_series_dimension is not DEFAULT: + kwargs["time_series_dimension"] = time_series_dimension + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) -class DateRange(RangeField): - name = "date_range" - _core_field = Date() +class Long(Integer): + """ + :arg null_value: + :arg boost: + :arg coerce: + :arg ignore_malformed: + :arg index: + :arg on_script_error: + :arg script: + :arg time_series_metric: For internal use by Elastic only. Marks the + field as a time series dimension. Defaults to false. + :arg time_series_dimension: For internal use by Elastic only. Marks + the field as a time series dimension. Defaults to false. + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + name = "long" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } -class IpRange(Field): - # not a RangeField since ip_range supports CIDR ranges - name = "ip_range" + def __init__( + self, + *args: Any, + null_value: Union[int, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + coerce: Union[bool, "DefaultType"] = DEFAULT, + ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + time_series_metric: Union[ + Literal["gauge", "counter", "summary", "histogram", "position"], + "DefaultType", + ] = DEFAULT, + time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if null_value is not DEFAULT: + kwargs["null_value"] = null_value + if boost is not DEFAULT: + kwargs["boost"] = boost + if coerce is not DEFAULT: + kwargs["coerce"] = coerce + if ignore_malformed is not DEFAULT: + kwargs["ignore_malformed"] = ignore_malformed + if index is not DEFAULT: + kwargs["index"] = index + if on_script_error is not DEFAULT: + kwargs["on_script_error"] = on_script_error + if script is not DEFAULT: + kwargs["script"] = script + if time_series_metric is not DEFAULT: + kwargs["time_series_metric"] = time_series_metric + if time_series_dimension is not DEFAULT: + kwargs["time_series_dimension"] = time_series_dimension + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) -class Join(Field): - name = "join" +class LongRange(RangeField): + """ + :arg boost: + :arg coerce: + :arg index: + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + name = "long_range" + _core_field = Long() + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + boost: Union[float, "DefaultType"] = DEFAULT, + coerce: Union[bool, "DefaultType"] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if boost is not DEFAULT: + kwargs["boost"] = boost + if coerce is not DEFAULT: + kwargs["coerce"] = coerce + if index is not DEFAULT: + kwargs["index"] = index + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) -class TokenCount(Field): - name = "token_count" + +class MatchOnlyText(Field): + """ + A variant of text that trades scoring and efficiency of positional + queries for space efficiency. This field effectively stores data the + same way as a text field that only indexes documents (index_options: + docs) and disables norms (norms: false). Term queries perform as fast + if not faster as on text fields, however queries that need positions + such as the match_phrase query perform slower as they need to look at + the _source document to verify whether a phrase matches. All queries + return constant scores that are equal to 1.0. + + :arg fields: + :arg meta: Metadata about the field. + :arg copy_to: Allows you to copy the values of multiple fields into a + group field, which can then be queried as a single field. + """ + + name = "match_only_text" + _param_defs = { + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + **kwargs: Any, + ): + if fields is not DEFAULT: + kwargs["fields"] = fields + if meta is not DEFAULT: + kwargs["meta"] = meta + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + super().__init__(*args, **kwargs) class Murmur3(Field): + """ + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + name = "murmur3" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) + + +class Nested(Object): + """ + :arg enabled: + :arg include_in_parent: + :arg include_in_root: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "nested" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + enabled: Union[bool, "DefaultType"] = DEFAULT, + include_in_parent: Union[bool, "DefaultType"] = DEFAULT, + include_in_root: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if enabled is not DEFAULT: + kwargs["enabled"] = enabled + if include_in_parent is not DEFAULT: + kwargs["include_in_parent"] = include_in_parent + if include_in_root is not DEFAULT: + kwargs["include_in_root"] = include_in_root + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + kwargs.setdefault("multi", True) + super().__init__(*args, **kwargs) + + +class Percolator(Field): + """ + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "percolator" + _coerce = True + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) + + def _deserialize(self, data: Any) -> "Query": + return Q(data) # type: ignore[no-any-return] + + def _serialize(self, data: Any) -> Optional[Dict[str, Any]]: + if data is None: + return None + return data.to_dict() # type: ignore[no-any-return] + + +class Point(Field): + """ + :arg ignore_malformed: + :arg ignore_z_value: + :arg null_value: + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "point" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, + ignore_z_value: Union[bool, "DefaultType"] = DEFAULT, + null_value: Union[str, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if ignore_malformed is not DEFAULT: + kwargs["ignore_malformed"] = ignore_malformed + if ignore_z_value is not DEFAULT: + kwargs["ignore_z_value"] = ignore_z_value + if null_value is not DEFAULT: + kwargs["null_value"] = null_value + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) + + +class RankFeature(Float): + """ + :arg positive_score_impact: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "rank_feature" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + positive_score_impact: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if positive_score_impact is not DEFAULT: + kwargs["positive_score_impact"] = positive_score_impact + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) + + +class RankFeatures(Field): + """ + :arg positive_score_impact: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "rank_features" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + positive_score_impact: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if positive_score_impact is not DEFAULT: + kwargs["positive_score_impact"] = positive_score_impact + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) + + +class ScaledFloat(Float): + """ + :arg null_value: + :arg scaling_factor: + :arg boost: + :arg coerce: + :arg ignore_malformed: + :arg index: + :arg on_script_error: + :arg script: + :arg time_series_metric: For internal use by Elastic only. Marks the + field as a time series dimension. Defaults to false. + :arg time_series_dimension: For internal use by Elastic only. Marks + the field as a time series dimension. Defaults to false. + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "scaled_float" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + null_value: Union[float, "DefaultType"] = DEFAULT, + scaling_factor: Union[float, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + coerce: Union[bool, "DefaultType"] = DEFAULT, + ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + time_series_metric: Union[ + Literal["gauge", "counter", "summary", "histogram", "position"], + "DefaultType", + ] = DEFAULT, + time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if null_value is not DEFAULT: + kwargs["null_value"] = null_value + if scaling_factor is not DEFAULT: + kwargs["scaling_factor"] = scaling_factor + if boost is not DEFAULT: + kwargs["boost"] = boost + if coerce is not DEFAULT: + kwargs["coerce"] = coerce + if ignore_malformed is not DEFAULT: + kwargs["ignore_malformed"] = ignore_malformed + if index is not DEFAULT: + kwargs["index"] = index + if on_script_error is not DEFAULT: + kwargs["on_script_error"] = on_script_error + if script is not DEFAULT: + kwargs["script"] = script + if time_series_metric is not DEFAULT: + kwargs["time_series_metric"] = time_series_metric + if time_series_dimension is not DEFAULT: + kwargs["time_series_dimension"] = time_series_dimension + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + if "scaling_factor" not in kwargs: + if len(args) > 0: + kwargs["scaling_factor"] = args[0] + args = args[1:] + else: + raise TypeError("missing required argument: 'scaling_factor'") + super().__init__(*args, **kwargs) + + +class SearchAsYouType(Field): + """ + :arg analyzer: + :arg index: + :arg index_options: + :arg max_shingle_size: + :arg norms: + :arg search_analyzer: + :arg search_quote_analyzer: + :arg similarity: + :arg term_vector: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "search_as_you_type" + _param_defs = { + "analyzer": {"type": "analyzer"}, + "search_analyzer": {"type": "analyzer"}, + "search_quote_analyzer": {"type": "analyzer"}, + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + index_options: Union[ + Literal["docs", "freqs", "positions", "offsets"], "DefaultType" + ] = DEFAULT, + max_shingle_size: Union[int, "DefaultType"] = DEFAULT, + norms: Union[bool, "DefaultType"] = DEFAULT, + search_analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT, + search_quote_analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT, + similarity: Union[str, None, "DefaultType"] = DEFAULT, + term_vector: Union[ + Literal[ + "no", + "yes", + "with_offsets", + "with_positions", + "with_positions_offsets", + "with_positions_offsets_payloads", + "with_positions_payloads", + ], + "DefaultType", + ] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if index is not DEFAULT: + kwargs["index"] = index + if index_options is not DEFAULT: + kwargs["index_options"] = index_options + if max_shingle_size is not DEFAULT: + kwargs["max_shingle_size"] = max_shingle_size + if norms is not DEFAULT: + kwargs["norms"] = norms + if search_analyzer is not DEFAULT: + kwargs["search_analyzer"] = search_analyzer + if search_quote_analyzer is not DEFAULT: + kwargs["search_quote_analyzer"] = search_quote_analyzer + if similarity is not DEFAULT: + kwargs["similarity"] = similarity + if term_vector is not DEFAULT: + kwargs["term_vector"] = term_vector + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) class SemanticText(Field): + """ + :arg inference_id: (required) + :arg meta: + """ + name = "semantic_text" + + def __init__( + self, + *args: Any, + inference_id: Union[str, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if inference_id is not DEFAULT: + kwargs["inference_id"] = inference_id + if meta is not DEFAULT: + kwargs["meta"] = meta + super().__init__(*args, **kwargs) + + +class Shape(Field): + """ + The `shape` data type facilitates the indexing of and searching with + arbitrary `x, y` cartesian shapes such as rectangles and polygons. + + :arg coerce: + :arg ignore_malformed: + :arg ignore_z_value: + :arg orientation: + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "shape" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + coerce: Union[bool, "DefaultType"] = DEFAULT, + ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, + ignore_z_value: Union[bool, "DefaultType"] = DEFAULT, + orientation: Union[Literal["right", "left"], "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if coerce is not DEFAULT: + kwargs["coerce"] = coerce + if ignore_malformed is not DEFAULT: + kwargs["ignore_malformed"] = ignore_malformed + if ignore_z_value is not DEFAULT: + kwargs["ignore_z_value"] = ignore_z_value + if orientation is not DEFAULT: + kwargs["orientation"] = orientation + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) + + +class Short(Integer): + """ + :arg null_value: + :arg boost: + :arg coerce: + :arg ignore_malformed: + :arg index: + :arg on_script_error: + :arg script: + :arg time_series_metric: For internal use by Elastic only. Marks the + field as a time series dimension. Defaults to false. + :arg time_series_dimension: For internal use by Elastic only. Marks + the field as a time series dimension. Defaults to false. + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "short" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + null_value: Union[float, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + coerce: Union[bool, "DefaultType"] = DEFAULT, + ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + time_series_metric: Union[ + Literal["gauge", "counter", "summary", "histogram", "position"], + "DefaultType", + ] = DEFAULT, + time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if null_value is not DEFAULT: + kwargs["null_value"] = null_value + if boost is not DEFAULT: + kwargs["boost"] = boost + if coerce is not DEFAULT: + kwargs["coerce"] = coerce + if ignore_malformed is not DEFAULT: + kwargs["ignore_malformed"] = ignore_malformed + if index is not DEFAULT: + kwargs["index"] = index + if on_script_error is not DEFAULT: + kwargs["on_script_error"] = on_script_error + if script is not DEFAULT: + kwargs["script"] = script + if time_series_metric is not DEFAULT: + kwargs["time_series_metric"] = time_series_metric + if time_series_dimension is not DEFAULT: + kwargs["time_series_dimension"] = time_series_dimension + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) + + +class SparseVector(Field): + """ + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "sparse_vector" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) + + +class Text(Field): + """ + :arg analyzer: + :arg boost: + :arg eager_global_ordinals: + :arg fielddata: + :arg fielddata_frequency_filter: + :arg index: + :arg index_options: + :arg index_phrases: + :arg index_prefixes: + :arg norms: + :arg position_increment_gap: + :arg search_analyzer: + :arg search_quote_analyzer: + :arg similarity: + :arg term_vector: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "text" + _param_defs = { + "analyzer": {"type": "analyzer"}, + "search_analyzer": {"type": "analyzer"}, + "search_quote_analyzer": {"type": "analyzer"}, + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + eager_global_ordinals: Union[bool, "DefaultType"] = DEFAULT, + fielddata: Union[bool, "DefaultType"] = DEFAULT, + fielddata_frequency_filter: Union[ + "types.FielddataFrequencyFilter", Dict[str, Any], "DefaultType" + ] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + index_options: Union[ + Literal["docs", "freqs", "positions", "offsets"], "DefaultType" + ] = DEFAULT, + index_phrases: Union[bool, "DefaultType"] = DEFAULT, + index_prefixes: Union[ + "types.TextIndexPrefixes", None, Dict[str, Any], "DefaultType" + ] = DEFAULT, + norms: Union[bool, "DefaultType"] = DEFAULT, + position_increment_gap: Union[int, "DefaultType"] = DEFAULT, + search_analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT, + search_quote_analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT, + similarity: Union[str, None, "DefaultType"] = DEFAULT, + term_vector: Union[ + Literal[ + "no", + "yes", + "with_offsets", + "with_positions", + "with_positions_offsets", + "with_positions_offsets_payloads", + "with_positions_payloads", + ], + "DefaultType", + ] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if boost is not DEFAULT: + kwargs["boost"] = boost + if eager_global_ordinals is not DEFAULT: + kwargs["eager_global_ordinals"] = eager_global_ordinals + if fielddata is not DEFAULT: + kwargs["fielddata"] = fielddata + if fielddata_frequency_filter is not DEFAULT: + kwargs["fielddata_frequency_filter"] = fielddata_frequency_filter + if index is not DEFAULT: + kwargs["index"] = index + if index_options is not DEFAULT: + kwargs["index_options"] = index_options + if index_phrases is not DEFAULT: + kwargs["index_phrases"] = index_phrases + if index_prefixes is not DEFAULT: + kwargs["index_prefixes"] = index_prefixes + if norms is not DEFAULT: + kwargs["norms"] = norms + if position_increment_gap is not DEFAULT: + kwargs["position_increment_gap"] = position_increment_gap + if search_analyzer is not DEFAULT: + kwargs["search_analyzer"] = search_analyzer + if search_quote_analyzer is not DEFAULT: + kwargs["search_quote_analyzer"] = search_quote_analyzer + if similarity is not DEFAULT: + kwargs["similarity"] = similarity + if term_vector is not DEFAULT: + kwargs["term_vector"] = term_vector + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) + + +class TokenCount(Field): + """ + :arg analyzer: + :arg boost: + :arg index: + :arg null_value: + :arg enable_position_increments: + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "token_count" + _param_defs = { + "analyzer": {"type": "analyzer"}, + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + null_value: Union[float, "DefaultType"] = DEFAULT, + enable_position_increments: Union[bool, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if boost is not DEFAULT: + kwargs["boost"] = boost + if index is not DEFAULT: + kwargs["index"] = index + if null_value is not DEFAULT: + kwargs["null_value"] = null_value + if enable_position_increments is not DEFAULT: + kwargs["enable_position_increments"] = enable_position_increments + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) + + +class UnsignedLong(Field): + """ + :arg null_value: + :arg boost: + :arg coerce: + :arg ignore_malformed: + :arg index: + :arg on_script_error: + :arg script: + :arg time_series_metric: For internal use by Elastic only. Marks the + field as a time series dimension. Defaults to false. + :arg time_series_dimension: For internal use by Elastic only. Marks + the field as a time series dimension. Defaults to false. + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "unsigned_long" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + null_value: Union[int, "DefaultType"] = DEFAULT, + boost: Union[float, "DefaultType"] = DEFAULT, + coerce: Union[bool, "DefaultType"] = DEFAULT, + ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, + on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + time_series_metric: Union[ + Literal["gauge", "counter", "summary", "histogram", "position"], + "DefaultType", + ] = DEFAULT, + time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if null_value is not DEFAULT: + kwargs["null_value"] = null_value + if boost is not DEFAULT: + kwargs["boost"] = boost + if coerce is not DEFAULT: + kwargs["coerce"] = coerce + if ignore_malformed is not DEFAULT: + kwargs["ignore_malformed"] = ignore_malformed + if index is not DEFAULT: + kwargs["index"] = index + if on_script_error is not DEFAULT: + kwargs["on_script_error"] = on_script_error + if script is not DEFAULT: + kwargs["script"] = script + if time_series_metric is not DEFAULT: + kwargs["time_series_metric"] = time_series_metric + if time_series_dimension is not DEFAULT: + kwargs["time_series_dimension"] = time_series_dimension + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) + + +class Version(Field): + """ + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "version" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) + + +class Wildcard(Field): + """ + :arg null_value: + :arg doc_values: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "wildcard" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + null_value: Union[str, "DefaultType"] = DEFAULT, + doc_values: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if null_value is not DEFAULT: + kwargs["null_value"] = null_value + if doc_values is not DEFAULT: + kwargs["doc_values"] = doc_values + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) diff --git a/elasticsearch/dsl/types.py b/elasticsearch/dsl/types.py index 80332ce32..fe12a00e8 100644 --- a/elasticsearch/dsl/types.py +++ b/elasticsearch/dsl/types.py @@ -362,6 +362,39 @@ def __init__( super().__init__(kwargs) +class DenseVectorIndexOptions(AttrDict[Any]): + """ + :arg type: (required) + :arg m: + :arg ef_construction: + :arg confidence_interval: + """ + + type: Union[str, DefaultType] + m: Union[int, DefaultType] + ef_construction: Union[int, DefaultType] + confidence_interval: Union[float, DefaultType] + + def __init__( + self, + *, + type: Union[str, DefaultType] = DEFAULT, + m: Union[int, DefaultType] = DEFAULT, + ef_construction: Union[int, DefaultType] = DEFAULT, + confidence_interval: Union[float, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if type is not DEFAULT: + kwargs["type"] = type + if m is not DEFAULT: + kwargs["m"] = m + if ef_construction is not DEFAULT: + kwargs["ef_construction"] = ef_construction + if confidence_interval is not DEFAULT: + kwargs["confidence_interval"] = confidence_interval + super().__init__(kwargs) + + class EmptyObject(AttrDict[Any]): """ For empty Class assignments @@ -656,6 +689,34 @@ def __init__( super().__init__(kwargs) +class FielddataFrequencyFilter(AttrDict[Any]): + """ + :arg max: (required) + :arg min: (required) + :arg min_segment_size: (required) + """ + + max: Union[float, DefaultType] + min: Union[float, DefaultType] + min_segment_size: Union[int, DefaultType] + + def __init__( + self, + *, + max: Union[float, DefaultType] = DEFAULT, + min: Union[float, DefaultType] = DEFAULT, + min_segment_size: Union[int, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if max is not DEFAULT: + kwargs["max"] = max + if min is not DEFAULT: + kwargs["min"] = min + if min_segment_size is not DEFAULT: + kwargs["min_segment_size"] = min_segment_size + super().__init__(kwargs) + + class FrequentItemSetsField(AttrDict[Any]): """ :arg field: (required) @@ -2474,6 +2535,24 @@ def __init__( super().__init__(kwargs) +class NumericFielddata(AttrDict[Any]): + """ + :arg format: (required) + """ + + format: Union[Literal["array", "disabled"], DefaultType] + + def __init__( + self, + *, + format: Union[Literal["array", "disabled"], DefaultType] = DEFAULT, + **kwargs: Any, + ): + if format is not DEFAULT: + kwargs["format"] = format + super().__init__(kwargs) + + class PercentageScoreHeuristic(AttrDict[Any]): pass @@ -3404,6 +3483,39 @@ def __init__( super().__init__(kwargs) +class SuggestContext(AttrDict[Any]): + """ + :arg name: (required) + :arg type: (required) + :arg path: + :arg precision: + """ + + name: Union[str, DefaultType] + type: Union[str, DefaultType] + path: Union[str, InstrumentedField, DefaultType] + precision: Union[int, str, DefaultType] + + def __init__( + self, + *, + name: Union[str, DefaultType] = DEFAULT, + type: Union[str, DefaultType] = DEFAULT, + path: Union[str, InstrumentedField, DefaultType] = DEFAULT, + precision: Union[int, str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if name is not DEFAULT: + kwargs["name"] = name + if type is not DEFAULT: + kwargs["type"] = type + if path is not DEFAULT: + kwargs["path"] = str(path) + if precision is not DEFAULT: + kwargs["precision"] = precision + super().__init__(kwargs) + + class TDigest(AttrDict[Any]): """ :arg compression: Limits the maximum number of nodes used by the @@ -3668,6 +3780,29 @@ def __init__( super().__init__(kwargs) +class TextIndexPrefixes(AttrDict[Any]): + """ + :arg max_chars: (required) + :arg min_chars: (required) + """ + + max_chars: Union[int, DefaultType] + min_chars: Union[int, DefaultType] + + def __init__( + self, + *, + max_chars: Union[int, DefaultType] = DEFAULT, + min_chars: Union[int, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if max_chars is not DEFAULT: + kwargs["max_chars"] = max_chars + if min_chars is not DEFAULT: + kwargs["min_chars"] = min_chars + super().__init__(kwargs) + + class TokenPruningConfig(AttrDict[Any]): """ :arg tokens_freq_ratio_threshold: Tokens whose frequency is more than diff --git a/utils/dsl-generator.py b/utils/dsl-generator.py index cc905705a..3841967e7 100644 --- a/utils/dsl-generator.py +++ b/utils/dsl-generator.py @@ -31,6 +31,7 @@ trim_blocks=True, lstrip_blocks=True, ) +field_py = jinja_env.get_template("field.py.tpl") query_py = jinja_env.get_template("query.py.tpl") aggs_py = jinja_env.get_template("aggs.py.tpl") response_init_py = jinja_env.get_template("response.__init__.py.tpl") @@ -303,14 +304,20 @@ def get_python_type(self, schema_type, for_response=False): elif schema_type["kind"] == "enum": # enums are mapped to Literal[member, ...] - return ( + t = ( "Literal[" + ", ".join( [f"\"{member['name']}\"" for member in schema_type["members"]] ) - + "]", - None, + + "]" ) + if {"name": "true"} in schema_type["members"] and { + "name": "false" + } in schema_type["members"]: + # this is a boolean that was later upgraded to an enum, so we + # should also allow bools + t = f"Union[{t}, bool]" + return t, None elif schema_type["kind"] == "interface": if schema_type["name"]["namespace"] == "_types.query_dsl": @@ -738,6 +745,48 @@ def interface_to_python_class( } ) k["buckets_as_dict"] = generic_type + elif namespace == "_types.mapping": + if arg["name"] in ["fields", "properties"]: + # Python DSL provides a high level representation for the + # "fields" and 'properties' properties that many types support + k["args"].append( + { + "name": arg["name"], + "type": 'Union[Mapping[str, Field], "DefaultType"]', + "doc": [f":arg {arg['name']}:"], + "required": False, + } + ) + if "params" not in k: + k["params"] = [] + k["params"].append( + { + "name": arg["name"], + "param": {"type": "field", "hash": True}, + } + ) + + else: + # also the Python DSL provides implementations of analyzers + # and normalizers, so here we make sure these are noted as + # params and have an appropriate type hint. + self.add_attribute( + k, arg, for_types_py=for_types_py, for_response=for_response + ) + if arg["name"].endswith("analyzer"): + if "params" not in k: + k["params"] = [] + k["params"].append( + {"name": arg["name"], "param": {"type": "analyzer"}} + ) + k["args"][-1]["type"] = 'Union[str, DslBase, "DefaultType"]' + elif arg["name"].endswith("normalizer"): + if "params" not in k: + k["params"] = [] + k["params"].append( + {"name": arg["name"], "param": {"type": "normalizer"}} + ) + k["args"][-1]["type"] = 'Union[str, DslBase, "DefaultType"]' else: if interface == "Hit" and arg["name"].startswith("_"): # Python DSL removes the undersore prefix from all the @@ -766,6 +815,108 @@ def interface_to_python_class( return k +def generate_field_py(schema, filename): + """Generate field.py with all the Elasticsearch fields as Python classes.""" + float_fields = ["half_float", "scaled_float", "double", "rank_feature"] + integer_fields = ["byte", "short", "long"] + range_fields = [ + "integer_range", + "float_range", + "long_range", + "double_range", + "date_range", + ] + object_fields = ["nested"] + coerced_fields = [ + "boolean", + "date", + "float", + "object", + "dense_vector", + "integer", + "ip", + "binary", + "percolator", + ] + + classes = [] + property = schema.find_type("Property", "_types.mapping") + for type_ in property["type"]["items"]: + if type_["type"]["name"] == "DynamicProperty": + # no support for dynamic properties + continue + field = schema.find_type(type_["type"]["name"], type_["type"]["namespace"]) + name = class_name = "" + for prop in field["properties"]: + if prop["name"] == "type": + if prop["type"]["kind"] != "literal_value": + raise RuntimeError(f"Unexpected property type {prop}") + name = prop["type"]["value"] + class_name = "".join([n.title() for n in name.split("_")]) + k = schema.interface_to_python_class( + type_["type"]["name"], + type_["type"]["namespace"], + for_types_py=False, + for_response=False, + ) + k["name"] = class_name + k["field"] = name + k["coerced"] = name in coerced_fields + if name in float_fields: + k["parent"] = "Float" + elif name in integer_fields: + k["parent"] = "Integer" + elif name in range_fields: + k["parent"] = "RangeField" + elif name in object_fields: + k["parent"] = "Object" + else: + k["parent"] = "Field" + k["args"] = [prop for prop in k["args"] if prop["name"] != "type"] + if name == "object": + # the DSL's object field has a doc_class argument + k["args"] = [ + { + "name": "doc_class", + "type": 'Union[Type["InnerDoc"], "DefaultType"]', + "doc": [ + ":arg doc_class: base doc class that handles mapping.", + " If no `doc_class` is provided, new instance of `InnerDoc` will be created,", + " populated with `properties` and used. Can not be provided together with `properties`", + ], + "positional": True, + "required": False, + } + ] + k["args"] + elif name == "date": + k["args"] = [ + { + "name": "default_timezone", + "type": 'Union[str, "tzinfo", "DefaultType"]', + "doc": [ + ":arg default_timezone: timezone that will be automatically used for tz-naive values", + " May be instance of `datetime.tzinfo` or string containing TZ offset", + ], + "positional": True, + "required": False, + } + ] + k["args"] + classes.append(k) + # make sure parent classes appear first + classes = sorted( + classes, + key=lambda k: ( + f'AA{k["name"]}' + if k["name"] in ["Float", "Integer", "Object"] + else k["name"] + ), + ) + + with open(filename, "wt") as f: + f.write(field_py.render(classes=classes)) + print(f"Generated {filename}.") + + def generate_query_py(schema, filename): """Generate query.py with all the properties of `QueryContainer` as Python classes. @@ -849,6 +1000,7 @@ def generate_types_py(schema, filename): if __name__ == "__main__": schema = ElasticsearchSchema() + generate_field_py(schema, "elasticsearch/dsl/field.py") generate_query_py(schema, "elasticsearch/dsl/query.py") generate_aggs_py(schema, "elasticsearch/dsl/aggs.py") generate_response_init_py(schema, "elasticsearch/dsl/response/__init__.py") diff --git a/utils/templates/field.py.tpl b/utils/templates/field.py.tpl new file mode 100644 index 000000000..95ee2f391 --- /dev/null +++ b/utils/templates/field.py.tpl @@ -0,0 +1,463 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import base64 +import collections.abc +import ipaddress +from copy import deepcopy +from datetime import date, datetime +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Iterable, + Iterator, + Literal, + Mapping, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +from dateutil import parser, tz +from elastic_transport.client_utils import DEFAULT, DefaultType + +from .exceptions import ValidationException +from .query import Q +from .utils import AttrDict, AttrList, DslBase +from .wrappers import Range + +if TYPE_CHECKING: + from datetime import tzinfo + from ipaddress import IPv4Address, IPv6Address + + from _operator import _SupportsComparison + + from .document import InnerDoc + from .document_base import InstrumentedField + from .mapping_base import MappingBase + from .query import Query + from . import types + +unicode = str + + +def construct_field( + name_or_field: Union[ + str, + "Field", + Dict[str, Any], + ], + **params: Any, +) -> "Field": + # {"type": "text", "analyzer": "snowball"} + if isinstance(name_or_field, collections.abc.Mapping): + if params: + raise ValueError( + "construct_field() cannot accept parameters when passing in a dict." + ) + params = deepcopy(name_or_field) + if "type" not in params: + # inner object can be implicitly defined + if "properties" in params: + name = "object" + else: + raise ValueError('construct_field() needs to have a "type" key.') + else: + name = params.pop("type") + return Field.get_dsl_class(name)(**params) + + # Text() + if isinstance(name_or_field, Field): + if params: + raise ValueError( + "construct_field() cannot accept parameters " + "when passing in a construct_field object." + ) + return name_or_field + + # "text", analyzer="snowball" + return Field.get_dsl_class(name_or_field)(**params) + + +class Field(DslBase): + _type_name = "field" + _type_shortcut = staticmethod(construct_field) + # all fields can be multifields + _param_defs = {"fields": {"type": "field", "hash": True}} + name = "" + _coerce = False + + def __init__( + self, multi: bool = False, required: bool = False, *args: Any, **kwargs: Any + ): + """ + :arg bool multi: specifies whether field can contain array of values + :arg bool required: specifies whether field is required + """ + self._multi = multi + self._required = required + super().__init__(*args, **kwargs) + + def __getitem__(self, subfield: str) -> "Field": + return cast(Field, self._params.get("fields", {})[subfield]) + + def _serialize(self, data: Any) -> Any: + return data + + def _deserialize(self, data: Any) -> Any: + return data + + def _empty(self) -> Optional[Any]: + return None + + def empty(self) -> Optional[Any]: + if self._multi: + return AttrList([]) + return self._empty() + + def serialize(self, data: Any) -> Any: + if isinstance(data, (list, AttrList, tuple)): + return list(map(self._serialize, cast(Iterable[Any], data))) + return self._serialize(data) + + def deserialize(self, data: Any) -> Any: + if isinstance(data, (list, AttrList, tuple)): + data = [ + None if d is None else self._deserialize(d) + for d in cast(Iterable[Any], data) + ] + return data + if data is None: + return None + return self._deserialize(data) + + def clean(self, data: Any) -> Any: + if data is not None: + data = self.deserialize(data) + if data in (None, [], {}) and self._required: + raise ValidationException("Value required for this field.") + return data + + def to_dict(self) -> Dict[str, Any]: + d = super().to_dict() + name, value = cast(Tuple[str, Dict[str, Any]], d.popitem()) + value["type"] = name + return value + + +class CustomField(Field): + name = "custom" + _coerce = True + + def to_dict(self) -> Dict[str, Any]: + if isinstance(self.builtin_type, Field): + return self.builtin_type.to_dict() + + d = super().to_dict() + d["type"] = self.builtin_type + return d + + +class RangeField(Field): + _coerce = True + _core_field: Optional[Field] = None + + def _deserialize(self, data: Any) -> Range["_SupportsComparison"]: + if isinstance(data, Range): + return data + data = {k: self._core_field.deserialize(v) for k, v in data.items()} # type: ignore[union-attr] + return Range(data) + + def _serialize(self, data: Any) -> Optional[Dict[str, Any]]: + if data is None: + return None + if not isinstance(data, collections.abc.Mapping): + data = data.to_dict() + return {k: self._core_field.serialize(v) for k, v in data.items()} # type: ignore[union-attr] + + +{% for k in classes %} +class {{ k.name }}({{ k.parent }}): + """ + {% for line in k.docstring %} + {{ line }} + {% endfor %} + {% if k.args %} + {% if k.docstring %} + + {% endif %} + {% for kwarg in k.args %} + {% for line in kwarg.doc %} + {{ line }} + {% endfor %} + {% endfor %} + {% endif %} + """ + name = "{{ k.field }}" + {% if k.coerced %} + _coerce = True + {% endif %} + {% if k.name.endswith('Range') %} + _core_field = {{ k.name[:-5] }}() + {% endif %} + {% if k.params %} + _param_defs = { + {% for param in k.params %} + "{{ param.name }}": {{ param.param }}, + {% endfor %} + } + {% endif %} + + def __init__( + self, + {% for arg in k.args %} + {% if arg.positional %} + {{ arg.name }}: {{ arg.type }} = DEFAULT, + {% endif %} + {% endfor %} + *args: Any, + {% for arg in k.args %} + {% if not arg.positional %} + {{ arg.name }}: {{ arg.type }} = DEFAULT, + {% endif %} + {% endfor %} + **kwargs: Any + ): + {% for arg in k.args %} + {% if not arg.positional %} + if {{ arg.name }} is not DEFAULT: + {% if "InstrumentedField" in arg.type %} + kwargs["{{ arg.name }}"] = str({{ arg.name }}) + {% else %} + kwargs["{{ arg.name }}"] = {{ arg.name }} + {% endif %} + {% endif %} + {% endfor %} + {% if k.field == 'object' %} + + if doc_class is not DEFAULT and (properties is not DEFAULT or dynamic is not DEFAULT): + raise ValidationException( + "doc_class and properties/dynamic should not be provided together" + ) + if doc_class is not DEFAULT: + self._doc_class: Type["InnerDoc"] = doc_class + else: + # FIXME import + from .document import InnerDoc + + # no InnerDoc subclass, creating one instead... + self._doc_class = type("InnerDoc", (InnerDoc,), {}) + for name, field in (properties if properties is not DEFAULT else {}).items(): + self._doc_class._doc_type.mapping.field(name, field) + if "properties" in kwargs: + del kwargs["properties"] + if dynamic is not DEFAULT: + self._doc_class._doc_type.mapping.meta("dynamic", dynamic) + + self._mapping: "MappingBase" = deepcopy(self._doc_class._doc_type.mapping) + super().__init__(**kwargs) + + def __getitem__(self, name: str) -> Field: + return self._mapping[name] + + def __contains__(self, name: str) -> bool: + return name in self._mapping + + def _empty(self) -> "InnerDoc": + return self._wrap({}) + + def _wrap(self, data: Dict[str, Any]) -> "InnerDoc": + return self._doc_class.from_es(data, data_only=True) + + def empty(self) -> Union["InnerDoc", AttrList[Any]]: + if self._multi: + return AttrList[Any]([], self._wrap) + return self._empty() + + def to_dict(self) -> Dict[str, Any]: + d = self._mapping.to_dict() + d.update(super().to_dict()) + return d + + def _collect_fields(self) -> Iterator[Field]: + return self._mapping.properties._collect_fields() + + def _deserialize(self, data: Any) -> "InnerDoc": + # don't wrap already wrapped data + if isinstance(data, self._doc_class): + return data + + if isinstance(data, AttrDict): + data = data._d_ + + return self._wrap(data) + + def _serialize( + self, data: Optional[Union[Dict[str, Any], "InnerDoc"]] + ) -> Optional[Dict[str, Any]]: + if data is None: + return None + + # somebody assigned raw dict to the field, we should tolerate that + if isinstance(data, collections.abc.Mapping): + return data + + return data.to_dict() + + def clean(self, data: Any) -> Any: + data = super().clean(data) + if data is None: + return None + if isinstance(data, (list, AttrList)): + for d in cast(Iterator["InnerDoc"], data): + d.full_clean() + else: + data.full_clean() + return data + + def update(self, other: Any, update_only: bool = False) -> None: + if not isinstance(other, Object): + # not an inner/nested object, no merge possible + return + + self._mapping.update(other._mapping, update_only) + {% elif k.field == "nested" %} + kwargs.setdefault("multi", True) + super().__init__(*args, **kwargs) + {% elif k.field == "date" %} + + if default_timezone is DEFAULT: + self._default_timezone = None + elif isinstance(default_timezone, str): + self._default_timezone = tz.gettz(default_timezone) + else: + self._default_timezone = default_timezone + super().__init__(*args, **kwargs) + + def _deserialize(self, data: Any) -> Union[datetime, date]: + if isinstance(data, str): + try: + data = parser.parse(data) + except Exception as e: + raise ValidationException( + f"Could not parse date from the value ({data!r})", e + ) + # we treat the yyyy-MM-dd format as a special case + if hasattr(self, "format") and self.format == "yyyy-MM-dd": + data = data.date() + + if isinstance(data, datetime): + if self._default_timezone and data.tzinfo is None: + data = data.replace(tzinfo=self._default_timezone) + return data + if isinstance(data, date): + return data + if isinstance(data, int): + # Divide by a float to preserve milliseconds on the datetime. + return datetime.utcfromtimestamp(data / 1000.0) + + raise ValidationException(f"Could not parse date from the value ({data!r})") + {% elif k.field == "boolean" %} + super().__init__(*args, **kwargs) + + def _deserialize(self, data: Any) -> bool: + if data == "false": + return False + return bool(data) + + def clean(self, data: Any) -> Optional[bool]: + if data is not None: + data = self.deserialize(data) + if data is None and self._required: + raise ValidationException("Value required for this field.") + return data # type: ignore[no-any-return] + {% elif k.field == "float" %} + super().__init__(*args, **kwargs) + + def _deserialize(self, data: Any) -> float: + return float(data) + {% elif k.field == "dense_vector" %} + self._element_type = kwargs.get("element_type", "float") + if self._element_type in ["float", "byte"]: + kwargs["multi"] = True + super().__init__(*args, **kwargs) + + def _deserialize(self, data: Any) -> Any: + if self._element_type == "float": + return float(data) + elif self._element_type == "byte": + return int(data) + return data + {% elif k.field == "scaled_float" %} + if 'scaling_factor' not in kwargs: + if len(args) > 0: + kwargs['scaling_factor'] = args[0] + args = args[1:] + else: + raise TypeError("missing required argument: 'scaling_factor'") + super().__init__(*args, **kwargs) + {% elif k.field == "integer" %} + super().__init__(*args, **kwargs) + + def _deserialize(self, data: Any) -> int: + return int(data) + {% elif k.field == "ip" %} + super().__init__(*args, **kwargs) + + def _deserialize(self, data: Any) -> Union["IPv4Address", "IPv6Address"]: + # the ipaddress library for pypy only accepts unicode. + return ipaddress.ip_address(unicode(data)) + + def _serialize(self, data: Any) -> Optional[str]: + if data is None: + return None + return str(data) + {% elif k.field == "binary" %} + super().__init__(*args, **kwargs) + + def clean(self, data: str) -> str: + # Binary fields are opaque, so there's not much cleaning + # that can be done. + return data + + def _deserialize(self, data: Any) -> bytes: + return base64.b64decode(data) + + def _serialize(self, data: Any) -> Optional[str]: + if data is None: + return None + return base64.b64encode(data).decode() + {% elif k.field == "percolator" %} + super().__init__(*args, **kwargs) + + def _deserialize(self, data: Any) -> "Query": + return Q(data) # type: ignore[no-any-return] + + def _serialize(self, data: Any) -> Optional[Dict[str, Any]]: + if data is None: + return None + return data.to_dict() # type: ignore[no-any-return] + {% else %} + super().__init__(*args, **kwargs) + {% endif %} + + +{% endfor %} From 986dccea114087894a772033a7053237b2d6e1fc Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 18 Feb 2025 17:57:31 +0400 Subject: [PATCH 34/65] Explain how to use sub clients in API docs (#2798) (#2801) (cherry picked from commit db22037da287063c1c00f7d2dfc26a8945ff5de5) Co-authored-by: Quentin Pradet --- docs/sphinx/api/elasticsearch.rst | 2 +- docs/sphinx/conf.py | 34 ++++++++++++++++++++++++++++++- elasticsearch/client.py | 2 ++ 3 files changed, 36 insertions(+), 2 deletions(-) diff --git a/docs/sphinx/api/elasticsearch.rst b/docs/sphinx/api/elasticsearch.rst index 64df4540b..b8831a407 100644 --- a/docs/sphinx/api/elasticsearch.rst +++ b/docs/sphinx/api/elasticsearch.rst @@ -3,7 +3,7 @@ Elasticsearch ------------- -.. py:module:: elasticsearch.client +.. py:module:: elasticsearch .. autoclass:: Elasticsearch :members: diff --git a/docs/sphinx/conf.py b/docs/sphinx/conf.py index 7104660b5..d1537932b 100644 --- a/docs/sphinx/conf.py +++ b/docs/sphinx/conf.py @@ -21,10 +21,42 @@ extensions = ["sphinx.ext.autodoc", "sphinx.ext.doctest", "sphinx.ext.intersphinx"] -autoclass_content = "both" +autoclass_content = "class" +autodoc_class_signature = "separated" autodoc_typehints = "description" + +def client_name(full_name): + # Get the class name, e.g. ['elasticsearch', 'client', 'TextStructureClient'] -> 'TextStructure' + class_name = full_name.split(".")[-1].removesuffix("Client") + # Convert to snake case, e.g. 'TextStructure' -> '_text_structure' + snake_case = "".join(["_" + c.lower() if c.isupper() else c for c in class_name]) + # Remove the leading underscore + return snake_case.lstrip("_") + + +def add_client_usage_example(app, what, name, obj, options, lines): + if what == "class" and "Client" in name: + sub_client_name = client_name(name) + lines.append( + f"To use this client, access ``client.{sub_client_name}`` from an " + " :class:`~elasticsearch.Elasticsearch` client. For example::" + ) + lines.append("") + lines.append(" from elasticsearch import Elasticsearch") + lines.append("") + lines.append(" # Create the client instance") + lines.append(" client = Elasticsearch(...)") + lines.append(f" # Use the {sub_client_name} client") + lines.append(f" client.{sub_client_name}.(...)") + lines.append("") + + +def setup(app): + app.connect("autodoc-process-docstring", add_client_usage_example) + + # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] diff --git a/elasticsearch/client.py b/elasticsearch/client.py index af25c5ae1..926ed5fe3 100644 --- a/elasticsearch/client.py +++ b/elasticsearch/client.py @@ -73,6 +73,8 @@ from ._utils import fixup_module_metadata # This file exists for backwards compatibility. +# We can't remove it as we use it for the Sphinx docs which show the full page, and we'd +# rather show `elasticsearch.client.FooClient` than `elasticsearch._sync.client.FooClient`. warnings.warn( "Importing from the 'elasticsearch.client' module is deprecated. " "Instead use 'elasticsearch' module for importing the client.", From 944523e4ef07b34a12ffaeef4180162ba2ace511 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 3 Mar 2025 16:16:00 +0400 Subject: [PATCH 35/65] Skip failing test to restore CI (#2808) (#2818) * Skip failing test to restore CI * Fix name of skipped test (cherry picked from commit b41c65019068aae1d2f7fe0ed222f558a006cf0e) Co-authored-by: Quentin Pradet --- test_elasticsearch/test_server/test_rest_api_spec.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test_elasticsearch/test_server/test_rest_api_spec.py b/test_elasticsearch/test_server/test_rest_api_spec.py index 058daa121..0b602684a 100644 --- a/test_elasticsearch/test_server/test_rest_api_spec.py +++ b/test_elasticsearch/test_server/test_rest_api_spec.py @@ -87,6 +87,7 @@ "machine_learning/jobs_crud", "scroll/10_basic", "security/10_api_key_basic", + "security/130_user_profile", "transform/10_basic", } SKIPPED_TESTS = { From 75116899bc352de396b8cc1a00f8c01762ba9128 Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Mon, 3 Mar 2025 17:02:54 +0400 Subject: [PATCH 36/65] [8.x] Bump STACK_VERSION to 8.19.0-SNAPSHOT (#2814) * Bump STACK_VERSION to 8.19.0-SNAPSHOT * Ignore agentless templates --- .buildkite/pipeline.yml | 2 +- test_elasticsearch/utils.py | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 9ec0f81b0..5e38b6749 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -5,7 +5,7 @@ steps: env: PYTHON_VERSION: "{{ matrix.python }}" TEST_SUITE: "platinum" - STACK_VERSION: "8.16.0-SNAPSHOT" + STACK_VERSION: "8.19.0-SNAPSHOT" PYTHON_CONNECTION_CLASS: "{{ matrix.connection }}" NOX_SESSION: "{{ matrix.nox_session }}" matrix: diff --git a/test_elasticsearch/utils.py b/test_elasticsearch/utils.py index 4a26aa4c0..8a13ff62f 100644 --- a/test_elasticsearch/utils.py +++ b/test_elasticsearch/utils.py @@ -425,6 +425,9 @@ def is_xpack_template(name): return True return name in { + "agentless", + "agentless@mappings", + "agentless@settings", "apm-10d@lifecycle", "apm-180d@lifecycle", "apm-390d@lifecycle", From 7a548c1ae413a8bc34eb934700413038a878797f Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 3 Mar 2025 13:56:01 +0000 Subject: [PATCH 37/65] Auto-generated API code (#2777) Co-authored-by: Quentin Pradet --- elasticsearch/_async/client/__init__.py | 1014 +++++++++++++---- elasticsearch/_async/client/async_search.py | 18 +- elasticsearch/_async/client/cat.py | 2 +- elasticsearch/_async/client/ccr.py | 113 +- elasticsearch/_async/client/cluster.py | 21 +- elasticsearch/_async/client/eql.py | 13 +- elasticsearch/_async/client/esql.py | 71 ++ elasticsearch/_async/client/features.py | 2 +- elasticsearch/_async/client/fleet.py | 8 +- elasticsearch/_async/client/ilm.py | 4 +- elasticsearch/_async/client/indices.py | 202 ++-- elasticsearch/_async/client/inference.py | 35 +- elasticsearch/_async/client/ingest.py | 46 +- elasticsearch/_async/client/license.py | 33 +- elasticsearch/_async/client/ml.py | 106 +- elasticsearch/_async/client/monitoring.py | 2 +- elasticsearch/_async/client/nodes.py | 6 +- .../_async/client/search_application.py | 8 +- elasticsearch/_async/client/transform.py | 31 +- elasticsearch/_sync/client/__init__.py | 1014 +++++++++++++---- elasticsearch/_sync/client/async_search.py | 18 +- elasticsearch/_sync/client/cat.py | 2 +- elasticsearch/_sync/client/ccr.py | 113 +- elasticsearch/_sync/client/cluster.py | 21 +- elasticsearch/_sync/client/eql.py | 13 +- elasticsearch/_sync/client/esql.py | 71 ++ elasticsearch/_sync/client/features.py | 2 +- elasticsearch/_sync/client/fleet.py | 8 +- elasticsearch/_sync/client/ilm.py | 4 +- elasticsearch/_sync/client/indices.py | 202 ++-- elasticsearch/_sync/client/inference.py | 35 +- elasticsearch/_sync/client/ingest.py | 46 +- elasticsearch/_sync/client/license.py | 33 +- elasticsearch/_sync/client/ml.py | 106 +- elasticsearch/_sync/client/monitoring.py | 2 +- elasticsearch/_sync/client/nodes.py | 6 +- .../_sync/client/search_application.py | 8 +- elasticsearch/_sync/client/transform.py | 31 +- elasticsearch/dsl/types.py | 2 - 39 files changed, 2508 insertions(+), 964 deletions(-) diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index 25f832f5d..d1ff463f1 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -628,6 +628,7 @@ async def bulk( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + include_source_on_error: t.Optional[bool] = None, list_executed_pipelines: t.Optional[bool] = None, pipeline: t.Optional[str] = None, pretty: t.Optional[bool] = None, @@ -735,6 +736,8 @@ async def bulk( :param operations: :param index: The name of the data stream, index, or index alias to perform bulk actions on. + :param include_source_on_error: True or false if to include the document source + in the error message in case of parsing errors. :param list_executed_pipelines: If `true`, the response will include the ingest pipelines that were run for each index or create. :param pipeline: The pipeline identifier to use to preprocess incoming documents. @@ -792,6 +795,8 @@ async def bulk( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if include_source_on_error is not None: + __query["include_source_on_error"] = include_source_on_error if list_executed_pipelines is not None: __query["list_executed_pipelines"] = list_executed_pipelines if pipeline is not None: @@ -984,8 +989,8 @@ async def count(

    Count search results. Get the number of documents matching a query.

    -

    The query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body. - The latter must be nested in a query key, which is the same as the search API.

    +

    The query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body. + The query is optional. When no query is provided, the API uses match_all to count all the documents.

    The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.

    The operation is broadcast across all shards. For each shard ID group, a replica is chosen and the search is run against it. @@ -1027,10 +1032,10 @@ async def count( in the result. :param preference: The node or shard the operation should be performed on. By default, it is random. - :param q: The query in Lucene query string syntax. - :param query: Defines the search definition using the Query DSL. The query is - optional, and when not provided, it will use `match_all` to count all the - docs. + :param q: The query in Lucene query string syntax. This parameter cannot be used + with a request body. + :param query: Defines the search query using Query DSL. A request body query + cannot be used with the `q` query string parameter. :param routing: A custom value used to route operations to a specific shard. :param terminate_after: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. @@ -1116,6 +1121,7 @@ async def create( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + include_source_on_error: t.Optional[bool] = None, pipeline: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ @@ -1198,6 +1204,8 @@ async def create( :param id: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format. :param document: + :param include_source_on_error: True or false if to include the document source + in the error message in case of parsing errors. :param pipeline: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final @@ -1246,6 +1254,8 @@ async def create( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if include_source_on_error is not None: + __query["include_source_on_error"] = include_source_on_error if pipeline is not None: __query["pipeline"] = pipeline if pretty is not None: @@ -1764,14 +1774,16 @@ async def delete_script( Deletes a stored script or search template.

    - ``_ + ``_ - :param id: Identifier for the stored script or search template. - :param master_timeout: Period to wait for a connection to the master node. If - no response is received before the timeout expires, the request fails and - returns an error. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. + :param id: The identifier for the stored script or search template. + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. It can also be set to `-1` to indicate that the request + should never timeout. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. It can + also be set to `-1` to indicate that the request should never timeout. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -2071,34 +2083,44 @@ async def explain( .. raw:: html

    Explain a document match result. - Returns information about why a specific document matches, or doesn’t match, a query.

    + Get information about why a specific document matches, or doesn't match, a query. + It computes a score explanation for a query and a specific document.

    ``_ - :param index: Index names used to limit the request. Only a single index name - can be provided to this parameter. - :param id: Defines the document ID. + :param index: Index names that are used to limit the request. Only a single index + name can be provided to this parameter. + :param id: The document identifier. :param analyze_wildcard: If `true`, wildcard and prefix queries are analyzed. - :param analyzer: Analyzer to use for the query string. This parameter can only - be used when the `q` query string parameter is specified. + This parameter can be used only when the `q` query string parameter is specified. + :param analyzer: The analyzer to use for the query string. This parameter can + be used only when the `q` query string parameter is specified. :param default_operator: The default operator for query string query: `AND` or - `OR`. - :param df: Field to use as default where no field prefix is given in the query - string. + `OR`. This parameter can be used only when the `q` query string parameter + is specified. + :param df: The field to use as default where no field prefix is given in the + query string. This parameter can be used only when the `q` query string parameter + is specified. :param lenient: If `true`, format-based query failures (such as providing text - to a numeric field) in the query string will be ignored. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. - :param q: Query in the Lucene query string syntax. + to a numeric field) in the query string will be ignored. This parameter can + be used only when the `q` query string parameter is specified. + :param preference: The node or shard the operation should be performed on. It + is random by default. + :param q: The query in the Lucene query string syntax. :param query: Defines the search definition using the Query DSL. - :param routing: Custom value used to route operations to a specific shard. - :param source: True or false to return the `_source` field or not, or a list + :param routing: A custom value used to route operations to a specific shard. + :param source: `True` or `false` to return the `_source` field or not or a list of fields to return. :param source_excludes: A comma-separated list of source fields to exclude from - the response. + the response. You can also use this parameter to exclude fields from the + subset specified in `_source_includes` query parameter. If the `_source` + parameter is `false`, this parameter is ignored. :param source_includes: A comma-separated list of source fields to include in - the response. + the response. If this parameter is specified, only these source fields are + returned. You can exclude fields from this subset using the `_source_excludes` + query parameter. If the `_source` parameter is `false`, this parameter is + ignored. :param stored_fields: A comma-separated list of stored fields to return in the response. """ @@ -2202,7 +2224,7 @@ async def field_caps( ``_ - :param index: Comma-separated list of data streams, indices, and aliases used + :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. :param allow_no_indices: If false, the request returns an error if any wildcard @@ -2210,25 +2232,32 @@ async def field_caps( This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. - :param expand_wildcards: Type of index that wildcard patterns can match. If the - request can target data streams, this argument determines whether wildcard - expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. - :param fields: List of fields to retrieve capabilities for. Wildcard (`*`) expressions - are supported. - :param filters: An optional set of filters: can include +metadata,-metadata,-nested,-multifield,-parent + :param expand_wildcards: The type of index that wildcard patterns can match. + If the request can target data streams, this argument determines whether + wildcard expressions match hidden data streams. Supports comma-separated + values, such as `open,hidden`. + :param fields: A list of fields to retrieve capabilities for. Wildcard (`*`) + expressions are supported. + :param filters: A comma-separated list of filters to apply to the response. :param ignore_unavailable: If `true`, missing or closed indices are not included in the response. :param include_empty_fields: If false, empty fields are not included in the response. :param include_unmapped: If true, unmapped fields are included in the response. - :param index_filter: Allows to filter indices if the provided query rewrites - to match_none on every shard. - :param runtime_mappings: Defines ad-hoc runtime fields in the request similar + :param index_filter: Filter indices if the provided query rewrites to `match_none` + on every shard. IMPORTANT: The filtering is done on a best-effort basis, + it uses index statistics and mappings to rewrite queries to `match_none` + instead of fully running the request. For instance a range query over a date + field can rewrite to `match_none` if all documents within a shard (including + deleted documents) are outside of the provided range. However, not all queries + can rewrite to `match_none` so this API may return an index even if the provided + filter matches no document. + :param runtime_mappings: Define ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. - :param types: Only return results for fields that have one of the types in the - list + :param types: A comma-separated list of field types to include. Any fields that + do not match one of these types will be excluded from the results. It defaults + to empty, meaning that all field types are returned. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: @@ -2461,10 +2490,13 @@ async def get_script( Retrieves a stored script or search template.

    - ``_ + ``_ - :param id: Identifier for the stored script or search template. - :param master_timeout: Specify timeout for connection to master + :param id: The identifier for the stored script or search template. + :param master_timeout: The period to wait for the master node. If the master + node is not available before the timeout expires, the request fails and returns + an error. It can also be set to `-1` to indicate that the request should + never timeout. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -2507,7 +2539,7 @@ async def get_script_context(

    Get a list of supported script contexts and their methods.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_script_context" @@ -2546,7 +2578,7 @@ async def get_script_languages(

    Get a list of available script types, languages, and contexts.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_script_language" @@ -2766,6 +2798,7 @@ async def index( human: t.Optional[bool] = None, if_primary_term: t.Optional[int] = None, if_seq_no: t.Optional[int] = None, + include_source_on_error: t.Optional[bool] = None, op_type: t.Optional[t.Union[str, t.Literal["create", "index"]]] = None, pipeline: t.Optional[str] = None, pretty: t.Optional[bool] = None, @@ -2891,6 +2924,8 @@ async def index( term. :param if_seq_no: Only perform the operation if the document has this sequence number. + :param include_source_on_error: True or false if to include the document source + in the error message in case of parsing errors. :param op_type: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` @@ -2955,6 +2990,8 @@ async def index( __query["if_primary_term"] = if_primary_term if if_seq_no is not None: __query["if_seq_no"] = if_seq_no + if include_source_on_error is not None: + __query["include_source_on_error"] = include_source_on_error if op_type is not None: __query["op_type"] = op_type if pipeline is not None: @@ -3069,30 +3106,37 @@ async def knn_search( This means the results returned are not always the true k closest neighbors.

    The kNN search API supports restricting the search using a filter. The search will return the top k documents that also match the filter query.

    +

    A kNN search response has the exact same structure as a search API response. + However, certain sections have a meaning specific to kNN search:

    +
      +
    • The document _score is determined by the similarity between the query and document vector.
    • +
    • The hits.total object contains the total number of nearest neighbor candidates considered, which is num_candidates * num_shards. The hits.total.relation will always be eq, indicating an exact value.
    • +
    - ``_ + ``_ :param index: A comma-separated list of index names to search; use `_all` or - to perform the operation on all indices - :param knn: kNN query to execute + to perform the operation on all indices. + :param knn: The kNN query to run. :param docvalue_fields: The request returns doc values for field names matching - these patterns in the hits.fields property of the response. Accepts wildcard - (*) patterns. + these patterns in the `hits.fields` property of the response. It accepts + wildcard (`*`) patterns. :param fields: The request returns values for field names matching these patterns - in the hits.fields property of the response. Accepts wildcard (*) patterns. - :param filter: Query to filter the documents that can match. The kNN search will - return the top `k` documents that also match this filter. The value can be - a single query or a list of queries. If `filter` isn't provided, all documents - are allowed to match. - :param routing: A comma-separated list of specific routing values + in the `hits.fields` property of the response. It accepts wildcard (`*`) + patterns. + :param filter: A query to filter the documents that can match. The kNN search + will return the top `k` documents that also match this filter. The value + can be a single query or a list of queries. If `filter` isn't provided, all + documents are allowed to match. + :param routing: A comma-separated list of specific routing values. :param source: Indicates which source fields are returned for matching documents. - These fields are returned in the hits._source property of the search response. - :param stored_fields: List of stored fields to return as part of a hit. If no - fields are specified, no stored fields are included in the response. If this - field is specified, the _source parameter defaults to false. You can pass - _source: true to return both source fields and stored fields in the search - response. + These fields are returned in the `hits._source` property of the search response. + :param stored_fields: A list of stored fields to return as part of a hit. If + no fields are specified, no stored fields are included in the response. If + this field is specified, the `_source` parameter defaults to `false`. You + can pass `_source: true` to return both source fields and stored fields in + the search response. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -3176,6 +3220,14 @@ async def mget(

    Get multiple JSON documents by ID from one or more indices. If you specify an index in the request URI, you only need to specify the document IDs in the request body. To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.

    +

    Filter source fields

    +

    By default, the _source field is returned for every document (if stored). + Use the _source and _source_include or source_exclude attributes to filter what fields are returned for a particular document. + You can include the _source, _source_includes, and _source_excludes query parameters in the request URI to specify the defaults to use when there are no per-document instructions.

    +

    Get stored fields

    +

    Use the stored_fields attribute to specify the set of stored fields you want to retrieve. + Any requested fields that are not stored are ignored. + You can include the stored_fields query parameter in the request URI to specify the defaults to use when there are no per-document instructions.

    ``_ @@ -3446,22 +3498,32 @@ async def msearch_template( .. raw:: html

    Run multiple templated searches.

    +

    Run multiple templated searches with a single request. + If you are providing a text file or text input to curl, use the --data-binary flag instead of -d to preserve newlines. + For example:

    +
    $ cat requests
    +          { "index": "my-index" }
    +          { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }}
    +          { "index": "my-other-index" }
    +          { "id": "my-other-search-template", "params": { "query_type": "match_all" }}
    +
    +          $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo
    +          
    - ``_ + ``_ :param search_templates: - :param index: Comma-separated list of data streams, indices, and aliases to search. - Supports wildcards (`*`). To search all data streams and indices, omit this - parameter or use `*`. + :param index: A comma-separated list of data streams, indices, and aliases to + search. It supports wildcards (`*`). To search all data streams and indices, + omit this parameter or use `*`. :param ccs_minimize_roundtrips: If `true`, network round-trips are minimized for cross-cluster search requests. - :param max_concurrent_searches: Maximum number of concurrent searches the API - can run. + :param max_concurrent_searches: The maximum number of concurrent searches the + API can run. :param rest_total_hits_as_int: If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object. - :param search_type: The type of the search operation. Available options: `query_then_fetch`, - `dfs_query_then_fetch`. + :param search_type: The type of the search operation. :param typed_keys: If `true`, the response prefixes aggregation and suggester names with their respective types. """ @@ -3544,34 +3606,38 @@ async def mtermvectors( .. raw:: html

    Get multiple term vectors.

    -

    You can specify existing documents by index and ID or provide artificial documents in the body of the request. +

    Get multiple term vectors with a single request. + You can specify existing documents by index and ID or provide artificial documents in the body of the request. You can specify the index in the request body or request URI. The response contains a docs array with all the fetched termvectors. Each element has the structure provided by the termvectors API.

    +

    Artificial documents

    +

    You can also use mtermvectors to generate term vectors for artificial documents provided in the body of the request. + The mapping used is determined by the specified _index.

    ``_ - :param index: Name of the index that contains the documents. - :param docs: Array of existing or artificial documents. + :param index: The name of the index that contains the documents. + :param docs: An array of existing or artificial documents. :param field_statistics: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. - :param fields: Comma-separated list or wildcard expressions of fields to include - in the statistics. Used as the default list unless a specific field list - is provided in the `completion_fields` or `fielddata_fields` parameters. - :param ids: Simplified syntax to specify documents by their ID if they're in + :param fields: A comma-separated list or wildcard expressions of fields to include + in the statistics. It is used as the default list unless a specific field + list is provided in the `completion_fields` or `fielddata_fields` parameters. + :param ids: A simplified syntax to specify documents by their ID if they're in the same index. :param offsets: If `true`, the response includes term offsets. :param payloads: If `true`, the response includes term payloads. :param positions: If `true`, the response includes term positions. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. + :param preference: The node or shard the operation should be performed on. It + is random by default. :param realtime: If true, the request is real-time as opposed to near-real-time. - :param routing: Custom value used to route operations to a specific shard. + :param routing: A custom value used to route operations to a specific shard. :param term_statistics: If true, the response includes term frequency and document frequency. :param version: If `true`, returns the document version as part of a hit. - :param version_type: Specific version type. + :param version_type: The version type. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: @@ -3784,20 +3850,21 @@ async def put_script( Creates or updates a stored script or search template.

    - ``_ - - :param id: Identifier for the stored script or search template. Must be unique - within the cluster. - :param script: Contains the script or search template, its parameters, and its - language. - :param context: Context in which the script or search template should run. To - prevent errors, the API immediately compiles the script or template in this - context. - :param master_timeout: Period to wait for a connection to the master node. If - no response is received before the timeout expires, the request fails and - returns an error. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. + ``_ + + :param id: The identifier for the stored script or search template. It must be + unique within the cluster. + :param script: The script or search template, its parameters, and its language. + :param context: The context in which the script or search template should run. + To prevent errors, the API immediately compiles the script or template in + this context. + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. It can also be set to `-1` to indicate that the request + should never timeout. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. It can + also be set to `-1` to indicate that the request should never timeout. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -3877,7 +3944,7 @@ async def rank_eval( :param requests: A set of typical search requests, together with their provided ratings. - :param index: Comma-separated list of data streams, indices, and index aliases + :param index: A comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. @@ -4287,13 +4354,13 @@ async def render_search_template( ``_ - :param id: ID of the search template to render. If no `source` is specified, + :param id: The ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. :param file: :param params: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. - :param source: An inline search template. Supports the same parameters as the - search API's request body. These parameters also support Mustache variables. + :param source: An inline search template. It supports the same parameters as + the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. """ __path_parts: t.Dict[str, str] @@ -4342,7 +4409,24 @@ async def render_search_template( async def scripts_painless_execute( self, *, - context: t.Optional[str] = None, + context: t.Optional[ + t.Union[ + str, + t.Literal[ + "boolean_field", + "composite_field", + "date_field", + "double_field", + "filter", + "geo_point_field", + "ip_field", + "keyword_field", + "long_field", + "painless_test", + "score", + ], + ] + ] = None, context_setup: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, @@ -4354,15 +4438,22 @@ async def scripts_painless_execute( """ .. raw:: html -

    Run a script. - Runs a script and returns a result.

    +

    Run a script.

    +

    Runs a script and returns a result. + Use this API to build and test scripts, such as when defining a script for a runtime field. + This API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster.

    +

    The API uses several contexts, which control how scripts are run, what variables are available at runtime, and what the return type is.

    +

    Each context requires a script, but additional parameters depend on the context you're using for that script.

    ``_ - :param context: The context that the script should run in. - :param context_setup: Additional parameters for the `context`. - :param script: The Painless script to execute. + :param context: The context that the script should run in. NOTE: Result ordering + in the field contexts is not guaranteed. + :param context_setup: Additional parameters for the `context`. NOTE: This parameter + is required for all contexts except `painless_test`, which is the default + if no value is provided for `context`. + :param script: The Painless script to run. """ __path_parts: t.Dict[str, str] = {} __path = "/_scripts/painless/_execute" @@ -4428,13 +4519,13 @@ async def scroll(

    IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.

    - ``_ + ``_ - :param scroll_id: Scroll ID of the search. + :param scroll_id: The scroll ID of the search. :param rest_total_hits_as_int: If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object. - :param scroll: Period to retain the search context for scrolling. + :param scroll: The period to retain the search context for scrolling. """ if scroll_id is None and body is None: raise ValueError("Empty value passed for parameter 'scroll_id'") @@ -5073,7 +5164,315 @@ async def search_mvt( .. raw:: html

    Search a vector tile.

    -

    Search a vector tile for geospatial values.

    +

    Search a vector tile for geospatial values. + Before using this API, you should be familiar with the Mapbox vector tile specification. + The API returns results as a binary mapbox vector tile.

    +

    Internally, Elasticsearch translates a vector tile search API request into a search containing:

    +
      +
    • A geo_bounding_box query on the <field>. The query uses the <zoom>/<x>/<y> tile as a bounding box.
    • +
    • A geotile_grid or geohex_grid aggregation on the <field>. The grid_agg parameter determines the aggregation type. The aggregation uses the <zoom>/<x>/<y> tile as a bounding box.
    • +
    • Optionally, a geo_bounds aggregation on the <field>. The search only includes this aggregation if the exact_bounds parameter is true.
    • +
    • If the optional parameter with_labels is true, the internal search will include a dynamic runtime field that calls the getLabelPosition function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label.
    • +
    +

    For example, Elasticsearch may translate a vector tile search API request with a grid_agg argument of geotile and an exact_bounds argument of true into the following search

    +
    GET my-index/_search
    +          {
    +            "size": 10000,
    +            "query": {
    +              "geo_bounding_box": {
    +                "my-geo-field": {
    +                  "top_left": {
    +                    "lat": -40.979898069620134,
    +                    "lon": -45
    +                  },
    +                  "bottom_right": {
    +                    "lat": -66.51326044311186,
    +                    "lon": 0
    +                  }
    +                }
    +              }
    +            },
    +            "aggregations": {
    +              "grid": {
    +                "geotile_grid": {
    +                  "field": "my-geo-field",
    +                  "precision": 11,
    +                  "size": 65536,
    +                  "bounds": {
    +                    "top_left": {
    +                      "lat": -40.979898069620134,
    +                      "lon": -45
    +                    },
    +                    "bottom_right": {
    +                      "lat": -66.51326044311186,
    +                      "lon": 0
    +                    }
    +                  }
    +                }
    +              },
    +              "bounds": {
    +                "geo_bounds": {
    +                  "field": "my-geo-field",
    +                  "wrap_longitude": false
    +                }
    +              }
    +            }
    +          }
    +          
    +

    The API returns results as a binary Mapbox vector tile. + Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers:

    +
      +
    • A hits layer containing a feature for each <field> value matching the geo_bounding_box query.
    • +
    • An aggs layer containing a feature for each cell of the geotile_grid or geohex_grid. The layer only contains features for cells with matching data.
    • +
    • A meta layer containing: +
        +
      • A feature containing a bounding box. By default, this is the bounding box of the tile.
      • +
      • Value ranges for any sub-aggregations on the geotile_grid or geohex_grid.
      • +
      • Metadata for the search.
      • +
      +
    • +
    +

    The API only returns features that can display at its zoom level. + For example, if a polygon feature has no area at its zoom level, the API omits it. + The API returns errors as UTF-8 encoded JSON.

    +

    IMPORTANT: You can specify several options for this API as either a query parameter or request body parameter. + If you specify both parameters, the query parameter takes precedence.

    +

    Grid precision for geotile

    +

    For a grid_agg of geotile, you can use cells in the aggs layer as tiles for lower zoom levels. + grid_precision represents the additional zoom levels available through these cells. The final precision is computed by as follows: <zoom> + grid_precision. + For example, if <zoom> is 7 and grid_precision is 8, then the geotile_grid aggregation will use a precision of 15. + The maximum final precision is 29. + The grid_precision also determines the number of cells for the grid as follows: (2^grid_precision) x (2^grid_precision). + For example, a value of 8 divides the tile into a grid of 256 x 256 cells. + The aggs layer only contains features for cells with matching data.

    +

    Grid precision for geohex

    +

    For a grid_agg of geohex, Elasticsearch uses <zoom> and grid_precision to calculate a final precision as follows: <zoom> + grid_precision.

    +

    This precision determines the H3 resolution of the hexagonal cells produced by the geohex aggregation. + The following table maps the H3 resolution for each precision. + For example, if <zoom> is 3 and grid_precision is 3, the precision is 6. + At a precision of 6, hexagonal cells have an H3 resolution of 2. + If <zoom> is 3 and grid_precision is 4, the precision is 7. + At a precision of 7, hexagonal cells have an H3 resolution of 3.

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    PrecisionUnique tile binsH3 resolutionUnique hex binsRatio
    14012230.5
    21601227.625
    364184213.15625
    425618423.2890625
    51024258825.744140625
    64096258821.436035156
    7163843411622.512329102
    8655363411620.6280822754
    926214442881221.099098206
    10104857642881220.2747745514
    114194304520168420.4808526039
    12167772166141178820.8414913416
    13671088646141178820.2103728354
    142684354567988251620.3681524172
    15107374182486917761220.644266719
    16429496729686917761220.1610666797
    1717179869184948424328420.2818666889
    186871947673610338970298820.4932667053
    19274877906944112372792091620.8632167343
    201099511627776112372792091620.2158041836
    2143980465111041216609544641220.3776573213
    221759218604441613116266812488420.6609003122
    237036874417766413116266812488420.165225078
    2428147497671065614813867687418820.2891438866
    251125899906842620155697073811931620.5060018015
    264503599627370500155697073811931620.1265004504
    2718014398509482000155697073811931620.03162511259
    2872057594037927900155697073811931620.007906278149
    29288230376151712000155697073811931620.001976569537
    +

    Hexagonal cells don't align perfectly on a vector tile. + Some cells may intersect more than one vector tile. + To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level. + Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density.

    ``_ @@ -5083,43 +5482,55 @@ async def search_mvt( :param zoom: Zoom level for the vector tile to search :param x: X coordinate for the vector tile to search :param y: Y coordinate for the vector tile to search - :param aggs: Sub-aggregations for the geotile_grid. Supports the following aggregation - types: - avg - cardinality - max - min - sum - :param buffer: Size, in pixels, of a clipping buffer outside the tile. This allows - renderers to avoid outline artifacts from geometries that extend past the - extent of the tile. - :param exact_bounds: If false, the meta layer’s feature is the bounding box of - the tile. If true, the meta layer’s feature is a bounding box resulting from - a geo_bounds aggregation. The aggregation runs on values that intersect - the // tile with wrap_longitude set to false. The resulting bounding - box may be larger than the vector tile. - :param extent: Size, in pixels, of a side of the tile. Vector tiles are square + :param aggs: Sub-aggregations for the geotile_grid. It supports the following + aggregation types: - `avg` - `boxplot` - `cardinality` - `extended stats` + - `max` - `median absolute deviation` - `min` - `percentile` - `percentile-rank` + - `stats` - `sum` - `value count` The aggregation names can't start with + `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations. + :param buffer: The size, in pixels, of a clipping buffer outside the tile. This + allows renderers to avoid outline artifacts from geometries that extend past + the extent of the tile. + :param exact_bounds: If `false`, the meta layer's feature is the bounding box + of the tile. If `true`, the meta layer's feature is a bounding box resulting + from a `geo_bounds` aggregation. The aggregation runs on values that + intersect the `//` tile with `wrap_longitude` set to `false`. + The resulting bounding box may be larger than the vector tile. + :param extent: The size, in pixels, of a side of the tile. Vector tiles are square with equal sides. - :param fields: Fields to return in the `hits` layer. Supports wildcards (`*`). - This parameter does not support fields with array values. Fields with array - values may return inconsistent results. - :param grid_agg: Aggregation used to create a grid for the `field`. + :param fields: The fields to return in the `hits` layer. It supports wildcards + (`*`). This parameter does not support fields with array values. Fields with + array values may return inconsistent results. + :param grid_agg: The aggregation used to create a grid for the `field`. :param grid_precision: Additional zoom levels available through the aggs layer. - For example, if is 7 and grid_precision is 8, you can zoom in up to - level 15. Accepts 0-8. If 0, results don’t include the aggs layer. + For example, if `` is `7` and `grid_precision` is `8`, you can zoom + in up to level 15. Accepts 0-8. If 0, results don't include the aggs layer. :param grid_type: Determines the geometry type for features in the aggs layer. - In the aggs layer, each feature represents a geotile_grid cell. If 'grid' - each feature is a Polygon of the cells bounding box. If 'point' each feature + In the aggs layer, each feature represents a `geotile_grid` cell. If `grid, + each feature is a polygon of the cells bounding box. If `point`, each feature is a Point that is the centroid of the cell. - :param query: Query DSL used to filter documents for the search. + :param query: The query DSL used to filter documents for the search. :param runtime_mappings: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. - :param size: Maximum number of features to return in the hits layer. Accepts - 0-10000. If 0, results don’t include the hits layer. - :param sort: Sorts features in the hits layer. By default, the API calculates - a bounding box for each feature. It sorts features based on this box’s diagonal + :param size: The maximum number of features to return in the hits layer. Accepts + 0-10000. If 0, results don't include the hits layer. + :param sort: Sort the features in the hits layer. By default, the API calculates + a bounding box for each feature. It sorts features based on this box's diagonal length, from longest to shortest. - :param track_total_hits: Number of hits matching the query to count accurately. + :param track_total_hits: The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. :param with_labels: If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features. + * `Point` and `MultiPoint` features will have one of the points selected. + * `Polygon` and `MultiPolygon` features will have a single point generated, + either the centroid, if it is within the polygon, or another point within + the polygon selected from the sorted triangle-tree. * `LineString` features + will likewise provide a roughly central point selected from the triangle-tree. + * The aggregation results will provide one central point for each aggregation + bucket. All attributes from the original features will also be copied to + the new label features. In addition, the new features will be distinguishable + using the tag `_mvt_label_position`. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -5234,13 +5645,15 @@ async def search_shards(

    Get the search shards.

    Get the indices and shards that a search request would be run against. This information can be useful for working out issues or planning optimizations with routing and shard preferences. - When filtered aliases are used, the filter is returned as part of the indices section.

    + When filtered aliases are used, the filter is returned as part of the indices section.

    +

    If the Elasticsearch security features are enabled, you must have the view_index_metadata or manage index privilege for the target data stream, index, or alias.

    ``_ - :param index: Returns the indices and shards that a search request would be executed - against. + :param index: A comma-separated list of data streams, indices, and aliases to + search. It supports wildcards (`*`). To search all data streams and indices, + omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For @@ -5254,10 +5667,13 @@ async def search_shards( a missing or closed index. :param local: If `true`, the request retrieves information from the local node only. - :param master_timeout: Period to wait for a connection to the master node. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. - :param routing: Custom value used to route operations to a specific shard. + :param master_timeout: The period to wait for a connection to the master node. + If the master node is not available before the timeout expires, the request + fails and returns an error. IT can also be set to `-1` to indicate that the + request should never timeout. + :param preference: The node or shard the operation should be performed on. It + is random by default. + :param routing: A custom value used to route operations to a specific shard. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: @@ -5344,10 +5760,10 @@ async def search_template(

    Run a search with a search template.

    - ``_ + ``_ - :param index: Comma-separated list of data streams, indices, and aliases to search. - Supports wildcards (*). + :param index: A comma-separated list of data streams, indices, and aliases to + search. It supports wildcards (`*`). :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For @@ -5355,32 +5771,34 @@ async def search_template( with `foo` but no index starts with `bar`. :param ccs_minimize_roundtrips: If `true`, network round-trips are minimized for cross-cluster search requests. - :param expand_wildcards: Type of index that wildcard patterns can match. If the - request can target data streams, this argument determines whether wildcard - expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + :param expand_wildcards: The type of index that wildcard patterns can match. + If the request can target data streams, this argument determines whether + wildcard expressions match hidden data streams. Supports comma-separated + values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, + `hidden`, `none`. :param explain: If `true`, returns detailed information about score calculation - as part of each hit. - :param id: ID of the search template to use. If no source is specified, this - parameter is required. + as part of each hit. If you specify both this and the `explain` query parameter, + the API uses only the query parameter. + :param id: The ID of the search template to use. If no `source` is specified, + this parameter is required. :param ignore_throttled: If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param params: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. + :param preference: The node or shard the operation should be performed on. It + is random by default. :param profile: If `true`, the query execution is profiled. - :param rest_total_hits_as_int: If true, hits.total are rendered as an integer - in the response. - :param routing: Custom value used to route operations to a specific shard. + :param rest_total_hits_as_int: If `true`, `hits.total` is rendered as an integer + in the response. If `false`, it is rendered as an object. + :param routing: A custom value used to route operations to a specific shard. :param scroll: Specifies how long a consistent view of the index should be maintained for scrolled search. :param search_type: The type of the search operation. :param source: An inline search template. Supports the same parameters as the - search API's request body. Also supports Mustache variables. If no id is - specified, this parameter is required. + search API's request body. It also supports Mustache variables. If no `id` + is specified, this parameter is required. :param typed_keys: If `true`, the response prefixes aggregation and suggester names with their respective types. """ @@ -5478,30 +5896,35 @@ async def terms_enum(

    Get terms in an index.

    Discover terms that match a partial string in an index. - This "terms enum" API is designed for low-latency look-ups used in auto-complete scenarios.

    -

    If the complete property in the response is false, the returned terms set may be incomplete and should be treated as approximate. - This can occur due to a few reasons, such as a request timeout or a node error.

    -

    NOTE: The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents.

    + This API is designed for low-latency look-ups used in auto-complete scenarios.

    +
    +

    info + The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents.

    +
    ``_ - :param index: Comma-separated list of data streams, indices, and index aliases - to search. Wildcard (*) expressions are supported. + :param index: A comma-separated list of data streams, indices, and index aliases + to search. Wildcard (`*`) expressions are supported. To search all data streams + or indices, omit this parameter or use `*` or `_all`. :param field: The string to match at the start of indexed terms. If not provided, all terms in the field are considered. - :param case_insensitive: When true the provided search string is matched against + :param case_insensitive: When `true`, the provided search string is matched against index terms without case sensitivity. - :param index_filter: Allows to filter an index shard if the provided query rewrites - to match_none. - :param search_after: - :param size: How many matching terms to return. - :param string: The string after which terms in the index should be returned. - Allows for a form of pagination if the last result from one request is passed - as the search_after parameter for a subsequent request. - :param timeout: The maximum length of time to spend collecting results. Defaults - to "1s" (one second). If the timeout is exceeded the complete flag set to - false in the response and the results may be partial or empty. + :param index_filter: Filter an index shard if the provided query rewrites to + `match_none`. + :param search_after: The string after which terms in the index should be returned. + It allows for a form of pagination if the last result from one request is + passed as the `search_after` parameter for a subsequent request. + :param size: The number of matching terms to return. + :param string: The string to match at the start of indexed terms. If it is not + provided, all terms in the field are considered. > info > The prefix string + cannot be larger than the largest possible keyword value, which is Lucene's + term byte-length limit of 32766. + :param timeout: The maximum length of time to spend collecting results. If the + timeout is exceeded the `complete` flag set to `false` in the response and + the results may be partial or empty. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -5584,32 +6007,73 @@ async def termvectors(

    Get term vector information.

    Get information and statistics about terms in the fields of a particular document.

    +

    You can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request. + You can specify the fields you are interested in through the fields parameter or by adding the fields to the request body. + For example:

    +
    GET /my-index-000001/_termvectors/1?fields=message
    +          
    +

    Fields can be specified using wildcards, similar to the multi match query.

    +

    Term vectors are real-time by default, not near real-time. + This can be changed by setting realtime parameter to false.

    +

    You can request three types of values: term information, term statistics, and field statistics. + By default, all term information and field statistics are returned for all fields but term statistics are excluded.

    +

    Term information

    +
      +
    • term frequency in the field (always returned)
    • +
    • term positions (positions: true)
    • +
    • start and end offsets (offsets: true)
    • +
    • term payloads (payloads: true), as base64 encoded bytes
    • +
    +

    If the requested information wasn't stored in the index, it will be computed on the fly if possible. + Additionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user.

    +
    +

    warn + Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16.

    +
    +

    Behaviour

    +

    The term and field statistics are not accurate. + Deleted documents are not taken into account. + The information is only retrieved for the shard the requested document resides in. + The term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context. + By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected. + Use routing only to hit a particular shard.

    ``_ - :param index: Name of the index that contains the document. - :param id: Unique identifier of the document. + :param index: The name of the index that contains the document. + :param id: A unique identifier for the document. :param doc: An artificial document (a document not present in the index) for which you want to retrieve term vectors. - :param field_statistics: If `true`, the response includes the document count, - sum of document frequencies, and sum of total term frequencies. - :param fields: Comma-separated list or wildcard expressions of fields to include - in the statistics. Used as the default list unless a specific field list - is provided in the `completion_fields` or `fielddata_fields` parameters. - :param filter: Filter terms based on their tf-idf scores. + :param field_statistics: If `true`, the response includes: * The document count + (how many documents contain this field). * The sum of document frequencies + (the sum of document frequencies for all terms in this field). * The sum + of total term frequencies (the sum of total term frequencies of each term + in this field). + :param fields: A comma-separated list or wildcard expressions of fields to include + in the statistics. It is used as the default list unless a specific field + list is provided in the `completion_fields` or `fielddata_fields` parameters. + :param filter: Filter terms based on their tf-idf scores. This could be useful + in order find out a good characteristic vector of a document. This feature + works in a similar manner to the second phase of the More Like This Query. :param offsets: If `true`, the response includes term offsets. :param payloads: If `true`, the response includes term payloads. - :param per_field_analyzer: Overrides the default per-field analyzer. + :param per_field_analyzer: Override the default per-field analyzer. This is useful + in order to generate term vectors in any fashion, especially when using artificial + documents. When providing an analyzer for a field that already stores term + vectors, the term vectors will be regenerated. :param positions: If `true`, the response includes term positions. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. + :param preference: The node or shard the operation should be performed on. It + is random by default. :param realtime: If true, the request is real-time as opposed to near-real-time. - :param routing: Custom value used to route operations to a specific shard. - :param term_statistics: If `true`, the response includes term frequency and document - frequency. + :param routing: A custom value that is used to route operations to a specific + shard. + :param term_statistics: If `true`, the response includes: * The total term frequency + (how often a term occurs in all documents). * The document frequency (the + number of documents containing the current term). By default these values + are not returned since term statistics can have a serious performance impact. :param version: If `true`, returns the document version as part of a hit. - :param version_type: Specific version type. + :param version_type: The version type. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -5705,6 +6169,7 @@ async def update( human: t.Optional[bool] = None, if_primary_term: t.Optional[int] = None, if_seq_no: t.Optional[int] = None, + include_source_on_error: t.Optional[bool] = None, lang: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ @@ -5760,6 +6225,8 @@ async def update( term. :param if_seq_no: Only perform the operation if the document has this sequence number. + :param include_source_on_error: True or false if to include the document source + in the error message in case of parsing errors. :param lang: The script language. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to @@ -5804,6 +6271,8 @@ async def update( __query["if_primary_term"] = if_primary_term if if_seq_no is not None: __query["if_seq_no"] = if_seq_no + if include_source_on_error is not None: + __query["include_source_on_error"] = include_source_on_error if lang is not None: __query["lang"] = lang if pretty is not None: @@ -5915,80 +6384,161 @@ async def update_by_query(

    Update documents. Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes.

    +

    If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:

    +
      +
    • read
    • +
    • index or write
    • +
    +

    You can specify the query criteria in the request URI or the request body using the same syntax as the search API.

    +

    When you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning. + When the versions match, the document is updated and the version number is incremented. + If a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails. + You can opt to count version conflicts instead of halting and returning by setting conflicts to proceed. + Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than max_docs until it has successfully updated max_docs documents or it has gone through every document in the source query.

    +

    NOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number.

    +

    While processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents. + A bulk update request is performed for each batch of matching documents. + Any query or update failures cause the update by query request to fail and the failures are shown in the response. + Any update requests that completed successfully still stick, they are not rolled back.

    +

    Throttling update requests

    +

    To control the rate at which update by query issues batches of update operations, you can set requests_per_second to any positive decimal number. + This pads each batch with a wait time to throttle the rate. + Set requests_per_second to -1 to turn off throttling.

    +

    Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. + The padding time is the difference between the batch size divided by the requests_per_second and the time spent writing. + By default the batch size is 1000, so if requests_per_second is set to 500:

    +
    target_time = 1000 / 500 per second = 2 seconds
    +          wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
    +          
    +

    Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. + This is "bursty" instead of "smooth".

    +

    Slicing

    +

    Update by query supports sliced scroll to parallelize the update process. + This can improve efficiency and provide a convenient way to break the request down into smaller parts.

    +

    Setting slices to auto chooses a reasonable number for most data streams and indices. + This setting will use one slice per shard, up to a certain limit. + If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.

    +

    Adding slices to _update_by_query just automates the manual process of creating sub-requests, which means it has some quirks:

    +
      +
    • You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices.
    • +
    • Fetching the status of the task for the request with slices only contains the status of completed slices.
    • +
    • These sub-requests are individually addressable for things like cancellation and rethrottling.
    • +
    • Rethrottling the request with slices will rethrottle the unfinished sub-request proportionally.
    • +
    • Canceling the request with slices will cancel each sub-request.
    • +
    • Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.
    • +
    • Parameters like requests_per_second and max_docs on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using max_docs with slices might not result in exactly max_docs documents being updated.
    • +
    • Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.
    • +
    +

    If you're slicing manually or otherwise tuning automatic slicing, keep in mind that:

    +
      +
    • Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.
    • +
    • Update performance scales linearly across available resources with the number of slices.
    • +
    +

    Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources.

    +

    Update the document source

    +

    Update by query supports scripts to update the document source. + As with the update API, you can set ctx.op to change the operation that is performed.

    +

    Set ctx.op = "noop" if your script decides that it doesn't have to make any changes. + The update by query operation skips updating the document and increments the noop counter.

    +

    Set ctx.op = "delete" if your script decides that the document should be deleted. + The update by query operation deletes the document and increments the deleted counter.

    +

    Update by query supports only index, noop, and delete. + Setting ctx.op to anything else is an error. + Setting any other field in ctx is an error. + This API enables you to only modify the source of matching documents; you cannot move them.

    ``_ - :param index: Comma-separated list of data streams, indices, and aliases to search. - Supports wildcards (`*`). To search all data streams or indices, omit this - parameter or use `*` or `_all`. + :param index: A comma-separated list of data streams, indices, and aliases to + search. It supports wildcards (`*`). To search all data streams or indices, + omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param analyze_wildcard: If `true`, wildcard and prefix queries are analyzed. - :param analyzer: Analyzer to use for the query string. - :param conflicts: What to do if update by query hits version conflicts: `abort` - or `proceed`. + This parameter can be used only when the `q` query string parameter is specified. + :param analyzer: The analyzer to use for the query string. This parameter can + be used only when the `q` query string parameter is specified. + :param conflicts: The preferred behavior when update by query hits version conflicts: + `abort` or `proceed`. :param default_operator: The default operator for query string query: `AND` or - `OR`. - :param df: Field to use as default where no field prefix is given in the query - string. - :param expand_wildcards: Type of index that wildcard patterns can match. If the - request can target data streams, this argument determines whether wildcard - expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + `OR`. This parameter can be used only when the `q` query string parameter + is specified. + :param df: The field to use as default where no field prefix is given in the + query string. This parameter can be used only when the `q` query string parameter + is specified. + :param expand_wildcards: The type of index that wildcard patterns can match. + If the request can target data streams, this argument determines whether + wildcard expressions match hidden data streams. It supports comma-separated + values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, + `hidden`, `none`. :param from_: Starting offset (default: 0) :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param lenient: If `true`, format-based query failures (such as providing text - to a numeric field) in the query string will be ignored. + to a numeric field) in the query string will be ignored. This parameter can + be used only when the `q` query string parameter is specified. :param max_docs: The maximum number of documents to update. - :param pipeline: ID of the pipeline to use to preprocess incoming documents. + :param pipeline: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. - :param q: Query in the Lucene query string syntax. - :param query: Specifies the documents to update using the Query DSL. + :param preference: The node or shard the operation should be performed on. It + is random by default. + :param q: A query in the Lucene query string syntax. + :param query: The documents to update using the Query DSL. :param refresh: If `true`, Elasticsearch refreshes affected shards to make the - operation visible to search. + operation visible to search after the request completes. This is different + than the update API's `refresh` parameter, which causes just the shard that + received the request to be refreshed. :param request_cache: If `true`, the request cache is used for this request. + It defaults to the index-level setting. :param requests_per_second: The throttle for this request in sub-requests per second. - :param routing: Custom value used to route operations to a specific shard. + :param routing: A custom value used to route operations to a specific shard. :param script: The script to run to update the document source or metadata when updating. - :param scroll: Period to retain the search context for scrolling. - :param scroll_size: Size of the scroll request that powers the operation. - :param search_timeout: Explicit timeout for each search request. - :param search_type: The type of the search operation. Available options: `query_then_fetch`, - `dfs_query_then_fetch`. + :param scroll: The period to retain the search context for scrolling. + :param scroll_size: The size of the scroll request that powers the operation. + :param search_timeout: An explicit timeout for each search request. By default, + there is no timeout. + :param search_type: The type of the search operation. Available options include + `query_then_fetch` and `dfs_query_then_fetch`. :param slice: Slice the request manually using the provided slice ID and total number of slices. :param slices: The number of slices this task should be divided into. :param sort: A comma-separated list of : pairs. - :param stats: Specific `tag` of the request for logging and statistical purposes. - :param terminate_after: Maximum number of documents to collect for each shard. + :param stats: The specific `tag` of the request for logging and statistical purposes. + :param terminate_after: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. - Elasticsearch collects documents before sorting. Use with caution. Elasticsearch - applies this parameter to each shard handling the request. When possible, - let Elasticsearch perform early termination automatically. Avoid specifying - this parameter for requests that target data streams with backing indices - across multiple data tiers. - :param timeout: Period each update request waits for the following operations: - dynamic mapping updates, waiting for active shards. + Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. + Elasticsearch applies this parameter to each shard handling the request. + When possible, let Elasticsearch perform early termination automatically. + Avoid specifying this parameter for requests that target data streams with + backing indices across multiple data tiers. + :param timeout: The period each update request waits for the following operations: + dynamic mapping updates, waiting for active shards. By default, it is one + minute. This guarantees Elasticsearch waits for at least the timeout before + failing. The actual wait time could be longer, particularly when multiple + waits occur. :param version: If `true`, returns the document version as part of a hit. :param version_type: Should the document increment the version number (internal) on hit or not (reindex) :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer - up to the total number of shards in the index (`number_of_replicas+1`). + up to the total number of shards in the index (`number_of_replicas+1`). The + `timeout` parameter controls how long each write request waits for unavailable + shards to become available. Both work exactly the way they work in the bulk + API. :param wait_for_completion: If `true`, the request blocks until the operation - is complete. + is complete. If `false`, Elasticsearch performs some preflight checks, launches + the request, and returns a task ID that you can use to cancel or get the + status of the task. Elasticsearch creates a record of this task as a document + at `.tasks/task/${taskId}`. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -6118,11 +6668,11 @@ async def update_by_query_rethrottle( Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts.

    - ``_ + ``_ :param task_id: The ID for the task. :param requests_per_second: The throttle for this request in sub-requests per - second. + second. To turn off throttling, set it to `-1`. """ if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_id'") diff --git a/elasticsearch/_async/client/async_search.py b/elasticsearch/_async/client/async_search.py index ab8421898..1ca196f13 100644 --- a/elasticsearch/_async/client/async_search.py +++ b/elasticsearch/_async/client/async_search.py @@ -97,8 +97,8 @@ async def get( ``_ :param id: A unique identifier for the async search. - :param keep_alive: Specifies how long the async search should be available in - the cluster. When not specified, the `keep_alive` set with the corresponding + :param keep_alive: The length of time that the async search should be available + in the cluster. When not specified, the `keep_alive` set with the corresponding submit async request will be used. Otherwise, it is possible to override the value and extend the validity of the request. When this period expires, the search, if still running, is cancelled. If the search is completed, its @@ -157,13 +157,17 @@ async def status(

    Get the async search status.

    Get the status of a previously submitted async search request given its identifier, without retrieving search results. - If the Elasticsearch security features are enabled, use of this API is restricted to the monitoring_user role.

    + If the Elasticsearch security features are enabled, the access to the status of a specific async search is restricted to:

    +
      +
    • The user or API key that submitted the original async search request.
    • +
    • Users that have the monitor cluster privilege or greater privileges.
    • +
    ``_ :param id: A unique identifier for the async search. - :param keep_alive: Specifies how long the async search needs to be available. + :param keep_alive: The length of time that the async search needs to be available. Ongoing async searches and any saved search results are deleted after this period. """ @@ -270,6 +274,7 @@ async def submit( ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, indices_boost: t.Optional[t.Sequence[t.Mapping[str, float]]] = None, + keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, keep_on_completion: t.Optional[bool] = None, knn: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] @@ -384,6 +389,9 @@ async def submit( :param ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :param indices_boost: Boosts the _score of documents from specified indices. + :param keep_alive: Specifies how long the async search needs to be available. + Ongoing async searches and any saved search results are deleted after this + period. :param keep_on_completion: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. :param knn: Defines the approximate kNN search to run. @@ -510,6 +518,8 @@ async def submit( __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable + if keep_alive is not None: + __query["keep_alive"] = keep_alive if keep_on_completion is not None: __query["keep_on_completion"] = keep_on_completion if lenient is not None: diff --git a/elasticsearch/_async/client/cat.py b/elasticsearch/_async/client/cat.py index 6ffa36b68..2bd625661 100644 --- a/elasticsearch/_async/client/cat.py +++ b/elasticsearch/_async/client/cat.py @@ -2494,7 +2494,7 @@ async def tasks( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API.

    - ``_ + ``_ :param actions: The task action names, which are used to limit the response. :param detailed: If `true`, the response includes detailed information about diff --git a/elasticsearch/_async/client/ccr.py b/elasticsearch/_async/client/ccr.py index aa7b4d085..cfa80673b 100644 --- a/elasticsearch/_async/client/ccr.py +++ b/elasticsearch/_async/client/ccr.py @@ -39,14 +39,17 @@ async def delete_auto_follow_pattern( """ .. raw:: html -

    Delete auto-follow patterns. - Delete a collection of cross-cluster replication auto-follow patterns.

    +

    Delete auto-follow patterns.

    +

    Delete a collection of cross-cluster replication auto-follow patterns.

    ``_ - :param name: The name of the auto follow pattern. - :param master_timeout: Period to wait for a connection to the master node. + :param name: The auto-follow pattern collection to delete. + :param master_timeout: The period to wait for a connection to the master node. + If the master node is not available before the timeout expires, the request + fails and returns an error. It can also be set to `-1` to indicate that the + request should never timeout. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -251,16 +254,18 @@ async def follow_info( """ .. raw:: html -

    Get follower information. - Get information about all cross-cluster replication follower indices. +

    Get follower information.

    +

    Get information about all cross-cluster replication follower indices. For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused.

    ``_ - :param index: A comma-separated list of index patterns; use `_all` to perform - the operation on all indices - :param master_timeout: Period to wait for a connection to the master node. + :param index: A comma-delimited list of follower index patterns. + :param master_timeout: The period to wait for a connection to the master node. + If the master node is not available before the timeout expires, the request + fails and returns an error. It can also be set to `-1` to indicate that the + request should never timeout. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -301,17 +306,16 @@ async def follow_stats( """ .. raw:: html -

    Get follower stats. - Get cross-cluster replication follower stats. +

    Get follower stats.

    +

    Get cross-cluster replication follower stats. The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices.

    ``_ - :param index: A comma-separated list of index patterns; use `_all` to perform - the operation on all indices - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. + :param index: A comma-delimited list of index patterns. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -437,15 +441,18 @@ async def get_auto_follow_pattern( """ .. raw:: html -

    Get auto-follow patterns. - Get cross-cluster replication auto-follow patterns.

    +

    Get auto-follow patterns.

    +

    Get cross-cluster replication auto-follow patterns.

    ``_ - :param name: Specifies the auto-follow pattern collection that you want to retrieve. - If you do not specify a name, the API returns information for all collections. - :param master_timeout: Period to wait for a connection to the master node. + :param name: The auto-follow pattern collection that you want to retrieve. If + you do not specify a name, the API returns information for all collections. + :param master_timeout: The period to wait for a connection to the master node. + If the master node is not available before the timeout expires, the request + fails and returns an error. It can also be set to `-1` to indicate that the + request should never timeout. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: @@ -489,8 +496,8 @@ async def pause_auto_follow_pattern( """ .. raw:: html -

    Pause an auto-follow pattern. - Pause a cross-cluster replication auto-follow pattern. +

    Pause an auto-follow pattern.

    +

    Pause a cross-cluster replication auto-follow pattern. When the API returns, the auto-follow pattern is inactive. New indices that are created on the remote cluster and match the auto-follow patterns are ignored.

    You can resume auto-following with the resume auto-follow pattern API. @@ -500,9 +507,11 @@ async def pause_auto_follow_pattern( ``_ - :param name: The name of the auto follow pattern that should pause discovering - new indices to follow. - :param master_timeout: Period to wait for a connection to the master node. + :param name: The name of the auto-follow pattern to pause. + :param master_timeout: The period to wait for a connection to the master node. + If the master node is not available before the timeout expires, the request + fails and returns an error. It can also be set to `-1` to indicate that the + request should never timeout. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -543,8 +552,8 @@ async def pause_follow( """ .. raw:: html -

    Pause a follower. - Pause a cross-cluster replication follower index. +

    Pause a follower.

    +

    Pause a cross-cluster replication follower index. The follower index will not fetch any additional operations from the leader index. You can resume following with the resume follower API. You can pause and resume a follower index to change the configuration of the following task.

    @@ -552,9 +561,11 @@ async def pause_follow( ``_ - :param index: The name of the follower index that should pause following its - leader index. - :param master_timeout: Period to wait for a connection to the master node. + :param index: The name of the follower index. + :param master_timeout: The period to wait for a connection to the master node. + If the master node is not available before the timeout expires, the request + fails and returns an error. It can also be set to `-1` to indicate that the + request should never timeout. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -765,17 +776,19 @@ async def resume_auto_follow_pattern( """ .. raw:: html -

    Resume an auto-follow pattern. - Resume a cross-cluster replication auto-follow pattern that was paused. +

    Resume an auto-follow pattern.

    +

    Resume a cross-cluster replication auto-follow pattern that was paused. The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim.

    ``_ - :param name: The name of the auto follow pattern to resume discovering new indices - to follow. - :param master_timeout: Period to wait for a connection to the master node. + :param name: The name of the auto-follow pattern to resume. + :param master_timeout: The period to wait for a connection to the master node. + If the master node is not available before the timeout expires, the request + fails and returns an error. It can also be set to `-1` to indicate that the + request should never timeout. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -934,15 +947,18 @@ async def stats( """ .. raw:: html -

    Get cross-cluster replication stats. - This API returns stats about auto-following and the same shard-level stats as the get follower stats API.

    +

    Get cross-cluster replication stats.

    +

    This API returns stats about auto-following and the same shard-level stats as the get follower stats API.

    ``_ - :param master_timeout: Period to wait for a connection to the master node. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. + :param master_timeout: The period to wait for a connection to the master node. + If the master node is not available before the timeout expires, the request + fails and returns an error. It can also be set to `-1` to indicate that the + request should never timeout. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_ccr/stats" @@ -983,18 +999,23 @@ async def unfollow( """ .. raw:: html -

    Unfollow an index. - Convert a cross-cluster replication follower index to a regular index. +

    Unfollow an index.

    +

    Convert a cross-cluster replication follower index to a regular index. The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. The follower index must be paused and closed before you call the unfollow API.

    -

    NOTE: Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation.

    +
    +

    info + Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation.

    +
    ``_ - :param index: The name of the follower index that should be turned into a regular - index. - :param master_timeout: Period to wait for a connection to the master node. + :param index: The name of the follower index. + :param master_timeout: The period to wait for a connection to the master node. + If the master node is not available before the timeout expires, the request + fails and returns an error. It can also be set to `-1` to indicate that the + request should never timeout. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") diff --git a/elasticsearch/_async/client/cluster.py b/elasticsearch/_async/client/cluster.py index e440392a4..9acb82f65 100644 --- a/elasticsearch/_async/client/cluster.py +++ b/elasticsearch/_async/client/cluster.py @@ -447,8 +447,8 @@ async def health( """ .. raw:: html -

    Get the cluster health status. - You can also use the API to get the health status of only specified data streams and indices. +

    Get the cluster health status.

    +

    You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices.

    The cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated. @@ -850,8 +850,8 @@ async def put_settings( """ .. raw:: html -

    Update the cluster settings. - Configure and update dynamic settings on a running cluster. +

    Update the cluster settings.

    +

    Configure and update dynamic settings on a running cluster. You can also configure dynamic settings locally on an unstarted or shut down node in elasticsearch.yml.

    Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart. You can also reset transient or persistent settings by assigning them a null value.

    @@ -920,9 +920,16 @@ async def remote_info( """ .. raw:: html -

    Get remote cluster information. - Get all of the configured remote cluster information. - This API returns connection and endpoint information keyed by the configured remote cluster alias.

    +

    Get remote cluster information.

    +

    Get information about configured remote clusters. + The API returns connection and endpoint information keyed by the configured remote cluster alias.

    +
    +

    info + This API returns information that reflects current state on the local cluster. + The connected field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it. + Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster. + To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the resolve cluster endpoint.

    +
    ``_ diff --git a/elasticsearch/_async/client/eql.py b/elasticsearch/_async/client/eql.py index 871c37bcc..8985e91ec 100644 --- a/elasticsearch/_async/client/eql.py +++ b/elasticsearch/_async/client/eql.py @@ -43,7 +43,7 @@ async def delete( The API also deletes results for the search.

    - ``_ + ``_ :param id: Identifier for the search to delete. A search ID is provided in the EQL search API's response for an async search. A search ID is also provided @@ -251,8 +251,15 @@ async def search( :param index: The name of the index to scope the operation :param query: EQL query you wish to run. :param allow_no_indices: - :param allow_partial_search_results: - :param allow_partial_sequence_results: + :param allow_partial_search_results: Allow query execution also in case of shard + failures. If true, the query will keep running and will return results based + on the available shards. For sequences, the behavior can be further refined + using allow_partial_sequence_results + :param allow_partial_sequence_results: This flag applies only to sequences and + has effect only if allow_partial_search_results=true. If true, the sequence + query will return results based on the available shards, ignoring the others. + If false, the sequence query will return successfully, but will always have + empty results. :param case_sensitive: :param event_category_field: Field containing the event classification, such as process, file, or network. diff --git a/elasticsearch/_async/client/esql.py b/elasticsearch/_async/client/esql.py index bfdda5bbf..d36ac49ed 100644 --- a/elasticsearch/_async/client/esql.py +++ b/elasticsearch/_async/client/esql.py @@ -30,6 +30,7 @@ class EsqlClient(NamespacedClient): "query", "columnar", "filter", + "include_ccs_metadata", "locale", "params", "profile", @@ -56,6 +57,7 @@ async def async_query( ] ] = None, human: t.Optional[bool] = None, + include_ccs_metadata: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, keep_on_completion: t.Optional[bool] = None, locale: t.Optional[str] = None, @@ -97,6 +99,10 @@ async def async_query( :param filter: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. :param format: A short version of the Accept header, for example `json` or `yaml`. + :param include_ccs_metadata: When set to `true` and performing a cross-cluster + query, the response will include an extra `_clusters` object with information + about the clusters that participated in the search along with info such as + shards count. :param keep_alive: The period for which the query and its results are stored in the cluster. The default period is five days. When this period expires, the query and its results are deleted, even if the query is still ongoing. @@ -155,6 +161,8 @@ async def async_query( __body["columnar"] = columnar if filter is not None: __body["filter"] = filter + if include_ccs_metadata is not None: + __body["include_ccs_metadata"] = include_ccs_metadata if locale is not None: __body["locale"] = locale if params is not None: @@ -298,11 +306,67 @@ async def async_query_get( path_parts=__path_parts, ) + @_rewrite_parameters() + async def async_query_stop( + self, + *, + id: str, + drop_null_columns: t.Optional[bool] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

    Stop async ES|QL query.

    +

    This API interrupts the query execution and returns the results so far. + If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it.

    + + + ``_ + + :param id: The unique identifier of the query. A query ID is provided in the + ES|QL async query API response for a query that does not complete in the + designated time. A query ID is also provided when the request was submitted + with the `keep_on_completion` parameter set to `true`. + :param drop_null_columns: Indicates whether columns that are entirely `null` + will be removed from the `columns` and `values` portion of the results. If + `true`, the response will include an extra section under the name `all_columns` + which has the name of all the columns. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_query/async/{__path_parts["id"]}/stop' + __query: t.Dict[str, t.Any] = {} + if drop_null_columns is not None: + __query["drop_null_columns"] = drop_null_columns + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + endpoint_id="esql.async_query_stop", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=( "query", "columnar", "filter", + "include_ccs_metadata", "locale", "params", "profile", @@ -329,6 +393,7 @@ async def query( ] ] = None, human: t.Optional[bool] = None, + include_ccs_metadata: t.Optional[bool] = None, locale: t.Optional[str] = None, params: t.Optional[ t.Sequence[t.Union[None, bool, float, int, str, t.Any]] @@ -364,6 +429,10 @@ async def query( :param filter: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. :param format: A short version of the Accept header, e.g. json, yaml. + :param include_ccs_metadata: When set to `true` and performing a cross-cluster + query, the response will include an extra `_clusters` object with information + about the clusters that participated in the search along with info such as + shards count. :param locale: :param params: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) @@ -402,6 +471,8 @@ async def query( __body["columnar"] = columnar if filter is not None: __body["filter"] = filter + if include_ccs_metadata is not None: + __body["include_ccs_metadata"] = include_ccs_metadata if locale is not None: __body["locale"] = locale if params is not None: diff --git a/elasticsearch/_async/client/features.py b/elasticsearch/_async/client/features.py index 7615ddddf..8cd9d7b9e 100644 --- a/elasticsearch/_async/client/features.py +++ b/elasticsearch/_async/client/features.py @@ -102,7 +102,7 @@ async def reset_features(

    IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes.

    - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. """ diff --git a/elasticsearch/_async/client/fleet.py b/elasticsearch/_async/client/fleet.py index acdb046de..2cc830efe 100644 --- a/elasticsearch/_async/client/fleet.py +++ b/elasticsearch/_async/client/fleet.py @@ -48,7 +48,9 @@ async def global_checkpoints( """ .. raw:: html -

    Returns the current global checkpoints for an index. This API is design for internal use by the fleet server project.

    +

    Get global checkpoints.

    +

    Get the current global checkpoints for an index. + This API is designed for internal use by the Fleet server project.

    ``_ @@ -141,6 +143,8 @@ async def msearch( supports the wait_for_checkpoints parameter.

    + ``_ + :param searches: :param index: A single target to search. If the target is an index alias, it must resolve to a single index. @@ -388,6 +392,8 @@ async def search( after provided checkpoint has been processed and is visible for searches inside of Elasticsearch.

    + ``_ + :param index: A single target to search. If the target is an index alias, it must resolve to a single index. :param aggregations: diff --git a/elasticsearch/_async/client/ilm.py b/elasticsearch/_async/client/ilm.py index 482bb3fdd..d483797d0 100644 --- a/elasticsearch/_async/client/ilm.py +++ b/elasticsearch/_async/client/ilm.py @@ -214,8 +214,8 @@ async def get_status( """ .. raw:: html -

    Get the ILM status. - Get the current index lifecycle management status.

    +

    Get the ILM status.

    +

    Get the current index lifecycle management status.

    ``_ diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index 02b315efb..890f6903e 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -57,23 +57,40 @@ async def add_block( """ .. raw:: html -

    Add an index block. - Limits the operations allowed on an index by blocking specific operation types.

    +

    Add an index block.

    +

    Add an index block to an index. + Index blocks limit the operations allowed on an index by blocking specific operation types.

    - ``_ + ``_ - :param index: A comma separated list of indices to add a block to - :param block: The block to add (one of read, write, read_only or metadata) - :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves - into no concrete indices. (This includes `_all` string or when no indices - have been specified) - :param expand_wildcards: Whether to expand wildcard expression to concrete indices - that are open, closed or both. - :param ignore_unavailable: Whether specified concrete indices should be ignored - when unavailable (missing or closed) - :param master_timeout: Specify timeout for connection to master - :param timeout: Explicit operation timeout + :param index: A comma-separated list or wildcard expression of index names used + to limit the request. By default, you must explicitly name the indices you + are adding blocks to. To allow the adding of blocks to indices with `_all`, + `*`, or other wildcard expressions, change the `action.destructive_requires_name` + setting to `false`. You can update this setting in the `elasticsearch.yml` + file or by using the cluster update settings API. + :param block: The block type to add to the index. + :param allow_no_indices: If `false`, the request returns an error if any wildcard + expression, index alias, or `_all` value targets only missing or closed indices. + This behavior applies even if the request targets other open indices. For + example, a request targeting `foo*,bar*` returns an error if an index starts + with `foo` but no index starts with `bar`. + :param expand_wildcards: The type of index that wildcard patterns can match. + If the request can target data streams, this argument determines whether + wildcard expressions match hidden data streams. It supports comma-separated + values, such as `open,hidden`. + :param ignore_unavailable: If `false`, the request returns an error if it targets + a missing or closed index. + :param master_timeout: The period to wait for the master node. If the master + node is not available before the timeout expires, the request fails and returns + an error. It can also be set to `-1` to indicate that the request should + never timeout. + :param timeout: The period to wait for a response from all relevant nodes in + the cluster after updating the cluster metadata. If no response is received + before the timeout expires, the cluster metadata update still applies but + the response will indicate that it was not completely acknowledged. It can + also be set to `-1` to indicate that the request should never timeout. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -710,12 +727,11 @@ async def create_data_stream( """ .. raw:: html -

    Create a data stream. - Creates a data stream. - You must have a matching index template with data stream enabled.

    +

    Create a data stream.

    +

    You must have a matching index template with data stream enabled.

    - ``_ + ``_ :param name: Name of the data stream, which must meet the following criteria: Lowercase only; Cannot include `\\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, @@ -841,11 +857,11 @@ async def data_streams_stats( """ .. raw:: html -

    Get data stream stats. - Retrieves statistics for one or more data streams.

    +

    Get data stream stats.

    +

    Get statistics for one or more data streams.

    - ``_ + ``_ :param name: Comma-separated list of data streams used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams in a @@ -1120,7 +1136,7 @@ async def delete_data_stream( Deletes one or more data streams and their backing indices.

    - ``_ + ``_ :param name: Comma-separated list of data streams to delete. Wildcard (`*`) expressions are supported. @@ -1538,11 +1554,11 @@ async def exists_alias( """ .. raw:: html -

    Check aliases. - Checks if one or more data stream or index aliases exist.

    +

    Check aliases.

    +

    Check if one or more data stream or index aliases exist.

    - ``_ + ``_ :param name: Comma-separated list of aliases to check. Supports wildcards (`*`). :param index: Comma-separated list of data streams or indices used to limit the @@ -1612,11 +1628,11 @@ async def exists_index_template( """ .. raw:: html -

    Check index templates. - Check whether index templates exist.

    +

    Check index templates.

    +

    Check whether index templates exist.

    - ``_ + ``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. @@ -2287,8 +2303,8 @@ async def get_data_lifecycle( """ .. raw:: html -

    Get data stream lifecycles. - Retrieves the data stream lifecycle configuration of one or more data streams.

    +

    Get data stream lifecycles.

    +

    Get the data stream lifecycle configuration of one or more data streams.

    ``_ @@ -2396,11 +2412,11 @@ async def get_data_stream( """ .. raw:: html -

    Get data streams. - Retrieves information about one or more data streams.

    +

    Get data streams.

    +

    Get information about one or more data streams.

    - ``_ + ``_ :param name: Comma-separated list of data stream names used to limit the request. Wildcard (`*`) expressions are supported. If omitted, all data streams are @@ -3355,14 +3371,15 @@ async def put_alias( ) @_rewrite_parameters( - body_name="lifecycle", + body_fields=("data_retention", "downsampling", "enabled"), ) async def put_data_lifecycle( self, *, name: t.Union[str, t.Sequence[str]], - lifecycle: t.Optional[t.Mapping[str, t.Any]] = None, - body: t.Optional[t.Mapping[str, t.Any]] = None, + data_retention: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + downsampling: t.Optional[t.Mapping[str, t.Any]] = None, + enabled: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ @@ -3377,6 +3394,7 @@ async def put_data_lifecycle( master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html @@ -3389,7 +3407,15 @@ async def put_data_lifecycle( :param name: Comma-separated list of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. - :param lifecycle: + :param data_retention: If defined, every document added to this data stream will + be stored at least for this time frame. Any time after this duration the + document could be deleted. When empty, every document in this data stream + will be stored indefinitely. + :param downsampling: The downsampling configuration to execute for the managed + backing index after rollover. + :param enabled: If defined, it turns data stream lifecycle on/off (`true`/`false`) + for this data stream. A data stream lifecycle that's disabled (enabled: `false`) + will have no effect on the data stream. :param expand_wildcards: Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `hidden`, `open`, `closed`, `none`. @@ -3401,15 +3427,10 @@ async def put_data_lifecycle( """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") - if lifecycle is None and body is None: - raise ValueError( - "Empty value passed for parameters 'lifecycle' and 'body', one of them should be set." - ) - elif lifecycle is not None and body is not None: - raise ValueError("Cannot set both 'lifecycle' and 'body'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}/_lifecycle' __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: @@ -3424,8 +3445,18 @@ async def put_data_lifecycle( __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout - __body = lifecycle if lifecycle is not None else body - __headers = {"accept": "application/json", "content-type": "application/json"} + if not __body: + if data_retention is not None: + __body["data_retention"] = data_retention + if downsampling is not None: + __body["downsampling"] = downsampling + if enabled is not None: + __body["enabled"] = enabled + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, @@ -3631,10 +3662,7 @@ async def put_mapping( ] = None, dynamic_date_formats: t.Optional[t.Sequence[str]] = None, dynamic_templates: t.Optional[ - t.Union[ - t.Mapping[str, t.Mapping[str, t.Any]], - t.Sequence[t.Mapping[str, t.Mapping[str, t.Any]]], - ] + t.Sequence[t.Mapping[str, t.Mapping[str, t.Any]]] ] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ @@ -4255,7 +4283,7 @@ async def reload_search_analyzers( async def resolve_cluster( self, *, - name: t.Union[str, t.Sequence[str]], + name: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ @@ -4271,19 +4299,20 @@ async def resolve_cluster( ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html -

    Resolve the cluster. - Resolve the specified index expressions to return information about each cluster, including the local cluster, if included. - Multiple patterns and remote clusters are supported.

    +

    Resolve the cluster.

    +

    Resolve the specified index expressions to return information about each cluster, including the local "querying" cluster, if included. + If no index expression is provided, the API will return information about all the remote clusters that are configured on the querying cluster.

    This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search.

    You use the same index expression with this endpoint as you would for cross-cluster search. Index and cluster exclusions are also supported with this endpoint.

    For each cluster in the index expression, information is returned about:

      -
    • Whether the querying ("local") cluster is currently connected to each remote cluster in the index expression scope.
    • +
    • Whether the querying ("local") cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the remote/info endpoint.
    • Whether each remote cluster is configured with skip_unavailable as true or false.
    • Whether there are any indices, aliases, or data streams on that cluster that match the index expression.
    • Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index).
    • @@ -4291,7 +4320,13 @@ async def resolve_cluster(

    For example, GET /_resolve/cluster/my-index-*,cluster*:my-index-* returns information about the local cluster and all remotely configured clusters that start with the alias cluster*. Each cluster returns information about whether it has any indices, aliases or data streams that match my-index-*.

    -

    Advantages of using this endpoint before a cross-cluster search

    +

    Note on backwards compatibility

    +

    The ability to query without an index expression was added in version 8.18, so when + querying remote clusters older than that, the local cluster will send the index + expression dummy* to those remote clusters. Thus, if an errors occur, you may see a reference + to that index expression even though you didn't request it. If it causes a problem, you can + instead include an index expression like *:* to bypass the issue.

    +

    Advantages of using this endpoint before a cross-cluster search

    You may want to exclude a cluster or index from a search when:

    • A remote cluster is not currently connected and is configured with skip_unavailable=false. Running a cross-cluster search under those conditions will cause the entire search to fail.
    • @@ -4299,31 +4334,60 @@ async def resolve_cluster(
    • The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the _resolve/cluster response will be present. (This is also where security/permission errors will be shown.)
    • A remote cluster is an older version that does not support the feature you want to use in your search.
    +

    Test availability of remote clusters

    +

    The remote/info endpoint is commonly used to test whether the "local" cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not. + The remote cluster may be available, while the local cluster is not currently connected to it.

    +

    You can use the _resolve/cluster API to attempt to reconnect to remote clusters. + For example with GET _resolve/cluster or GET _resolve/cluster/*:*. + The connected field in the response will indicate whether it was successful. + If a connection was (re-)established, this will also cause the remote/info endpoint to now indicate a connected status.

    ``_ - :param name: Comma-separated name(s) or index pattern(s) of the indices, aliases, - and data streams to resolve. Resources on remote clusters can be specified - using the ``:`` syntax. + :param name: A comma-separated list of names or index patterns for the indices, + aliases, and data streams to resolve. Resources on remote clusters can be + specified using the ``:`` syntax. Index and cluster exclusions + (e.g., `-cluster1:*`) are also supported. If no index expression is specified, + information about all remote clusters configured on the local cluster is + returned without doing any index matching :param allow_no_indices: If false, the request returns an error if any wildcard - expression, index alias, or _all value targets only missing or closed indices. + expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For - example, a request targeting foo*,bar* returns an error if an index starts - with foo but no index starts with bar. + example, a request targeting `foo*,bar*` returns an error if an index starts + with `foo` but no index starts with `bar`. NOTE: This option is only supported + when specifying an index expression. You will get an error if you specify + index options to the `_resolve/cluster` API endpoint that takes no index + expression. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - :param ignore_throttled: If true, concrete, expanded or aliased indices are ignored - when frozen. Defaults to false. + NOTE: This option is only supported when specifying an index expression. + You will get an error if you specify index options to the `_resolve/cluster` + API endpoint that takes no index expression. + :param ignore_throttled: If true, concrete, expanded, or aliased indices are + ignored when frozen. NOTE: This option is only supported when specifying + an index expression. You will get an error if you specify index options to + the `_resolve/cluster` API endpoint that takes no index expression. :param ignore_unavailable: If false, the request returns an error if it targets - a missing or closed index. Defaults to false. + a missing or closed index. NOTE: This option is only supported when specifying + an index expression. You will get an error if you specify index options to + the `_resolve/cluster` API endpoint that takes no index expression. + :param timeout: The maximum time to wait for remote clusters to respond. If a + remote cluster does not respond within this timeout period, the API response + will show the cluster as not connected and include an error message that + the request timed out. The default timeout is unset and the query can take + as long as the networking layer is configured to wait for remote clusters + that are not responding (typically 30 seconds). """ - if name in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'name'") - __path_parts: t.Dict[str, str] = {"name": _quote(name)} - __path = f'/_resolve/cluster/{__path_parts["name"]}' + __path_parts: t.Dict[str, str] + if name not in SKIP_IN_PATH: + __path_parts = {"name": _quote(name)} + __path = f'/_resolve/cluster/{__path_parts["name"]}' + else: + __path_parts = {} + __path = "/_resolve/cluster" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices @@ -4341,6 +4405,8 @@ async def resolve_cluster( __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index f85857b8b..804e920b6 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -51,12 +51,12 @@ async def delete( ``_ - :param inference_id: The inference Id + :param inference_id: The inference identifier. :param task_type: The task type - :param dry_run: When true, the endpoint is not deleted, and a list of ingest - processors which reference this endpoint is returned + :param dry_run: When true, the endpoint is not deleted and a list of ingest processors + which reference this endpoint is returned. :param force: When true, the inference endpoint is forcefully deleted even if - it is still being used by ingest processors or semantic text fields + it is still being used by ingest processors or semantic text fields. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") @@ -180,18 +180,29 @@ async def inference( """ .. raw:: html -

    Perform inference on the service

    +

    Perform inference on the service.

    +

    This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. + It returns a response with the results of the tasks. + The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.

    +
    +

    info + The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

    +
    ``_ - :param inference_id: The inference Id - :param input: Inference input. Either a string or an array of strings. - :param task_type: The task type - :param query: Query input, required for rerank task. Not required for other tasks. - :param task_settings: Optional task settings - :param timeout: Specifies the amount of time to wait for the inference request - to complete. + :param inference_id: The unique identifier for the inference endpoint. + :param input: The text on which you want to perform the inference task. It can + be a single string or an array. > info > Inference endpoints for the `completion` + task type currently only support a single string as input. + :param task_type: The type of inference task that the model performs. + :param query: The query input, which is required only for the `rerank` task. + It is not required for other tasks. + :param task_settings: Task settings for the individual inference request. These + settings are specific to the task type you specified and override the task + settings specified when initializing the service. + :param timeout: The amount of time to wait for the inference request to complete. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") diff --git a/elasticsearch/_async/client/ingest.py b/elasticsearch/_async/client/ingest.py index 469be28e6..69792566c 100644 --- a/elasticsearch/_async/client/ingest.py +++ b/elasticsearch/_async/client/ingest.py @@ -40,18 +40,18 @@ async def delete_geoip_database( """ .. raw:: html -

    Delete GeoIP database configurations. - Delete one or more IP geolocation database configurations.

    +

    Delete GeoIP database configurations.

    +

    Delete one or more IP geolocation database configurations.

    - ``_ + ``_ :param id: A comma-separated list of geoip database configurations to delete - :param master_timeout: Period to wait for a connection to the master node. If - no response is received before the timeout expires, the request fails and - returns an error. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -244,15 +244,15 @@ async def get_geoip_database( """ .. raw:: html -

    Get GeoIP database configurations. - Get information about one or more IP geolocation database configurations.

    +

    Get GeoIP database configurations.

    +

    Get information about one or more IP geolocation database configurations.

    - ``_ + ``_ - :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard - (`*`) expressions are supported. To get all database configurations, omit - this parameter or use `*`. + :param id: A comma-separated list of database configuration IDs to retrieve. + Wildcard (`*`) expressions are supported. To get all database configurations, + omit this parameter or use `*`. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: @@ -350,8 +350,8 @@ async def get_pipeline( """ .. raw:: html -

    Get pipelines. - Get information about one or more ingest pipelines. +

    Get pipelines.

    +

    Get information about one or more ingest pipelines. This API returns a local reference of the pipeline.

    @@ -455,11 +455,11 @@ async def put_geoip_database( """ .. raw:: html -

    Create or update a GeoIP database configuration. - Refer to the create or update IP geolocation database configuration API.

    +

    Create or update a GeoIP database configuration.

    +

    Refer to the create or update IP geolocation database configuration API.

    - ``_ + ``_ :param id: ID of the database configuration to create or update. :param maxmind: The configuration necessary to identify which IP geolocation @@ -712,17 +712,17 @@ async def simulate( """ .. raw:: html -

    Simulate a pipeline. - Run an ingest pipeline against a set of provided documents. +

    Simulate a pipeline.

    +

    Run an ingest pipeline against a set of provided documents. You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.

    ``_ :param docs: Sample documents to test in the pipeline. - :param id: Pipeline to test. If you don’t specify a `pipeline` in the request + :param id: The pipeline to test. If you don't specify a `pipeline` in the request body, this parameter is required. - :param pipeline: Pipeline to test. If you don’t specify the `pipeline` request + :param pipeline: The pipeline to test. If you don't specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. :param verbose: If `true`, the response includes output data for each processor diff --git a/elasticsearch/_async/client/license.py b/elasticsearch/_async/client/license.py index cc3827cce..aac236243 100644 --- a/elasticsearch/_async/client/license.py +++ b/elasticsearch/_async/client/license.py @@ -39,16 +39,16 @@ async def delete( """ .. raw:: html -

    Delete the license. - When the license expires, your subscription level reverts to Basic.

    +

    Delete the license.

    +

    When the license expires, your subscription level reverts to Basic.

    If the operator privileges feature is enabled, only operator users can use this API.

    ``_ - :param master_timeout: Period to wait for a connection to the master node. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. + :param master_timeout: The period to wait for a connection to the master node. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_license" @@ -89,10 +89,13 @@ async def get( """ .. raw:: html -

    Get license information. - Get information about your Elastic license including its type, its status, when it was issued, and when it expires.

    -

    NOTE: If the master node is generating a new cluster state, the get license API may return a 404 Not Found response. +

    Get license information.

    +

    Get information about your Elastic license including its type, its status, when it was issued, and when it expires.

    +
    +

    info + If the master node is generating a new cluster state, the get license API may return a 404 Not Found response. If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request.

    +
    ``_ @@ -225,8 +228,8 @@ async def post( """ .. raw:: html -

    Update the license. - You can update your license at runtime without shutting down your nodes. +

    Update the license.

    +

    You can update your license at runtime without shutting down your nodes. License updates take effect immediately. If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true.

    @@ -240,9 +243,9 @@ async def post( :param license: :param licenses: A sequence of one or more JSON documents containing the license information. - :param master_timeout: Period to wait for a connection to the master node. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. + :param master_timeout: The period to wait for a connection to the master node. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_license" @@ -297,8 +300,8 @@ async def post_start_basic( """ .. raw:: html -

    Start a basic license. - Start an indefinite basic license, which gives access to all the basic features.

    +

    Start a basic license.

    +

    Start an indefinite basic license, which gives access to all the basic features.

    NOTE: In order to start a basic license, you must not currently have a basic license.

    If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true.

    diff --git a/elasticsearch/_async/client/ml.py b/elasticsearch/_async/client/ml.py index dfc4ddc32..d39c84f73 100644 --- a/elasticsearch/_async/client/ml.py +++ b/elasticsearch/_async/client/ml.py @@ -38,8 +38,8 @@ async def clear_trained_model_deployment_cache( """ .. raw:: html -

    Clear trained model deployment cache. - Cache will be cleared on all nodes where the trained model is assigned. +

    Clear trained model deployment cache.

    +

    Cache will be cleared on all nodes where the trained model is assigned. A trained model deployment may have an inference cache enabled. As requests are handled by each allocated node, their responses may be cached on that individual node. Calling this API clears the caches without restarting the deployment.

    @@ -93,8 +93,8 @@ async def close_job( """ .. raw:: html -

    Close anomaly detection jobs. - A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. +

    Close anomaly detection jobs.

    +

    A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. When a datafeed that has a specified end date stops, it automatically closes its associated job.

    @@ -161,8 +161,8 @@ async def delete_calendar( """ .. raw:: html -

    Delete a calendar. - Removes all scheduled events from a calendar, then deletes it.

    +

    Delete a calendar.

    +

    Remove all scheduled events from a calendar, then delete it.

    ``_ @@ -415,15 +415,15 @@ async def delete_expired_data( """ .. raw:: html -

    Delete expired ML data. - Deletes all job results, model snapshots and forecast data that have exceeded +

    Delete expired ML data.

    +

    Delete all job results, model snapshots and forecast data that have exceeded their retention days period. Machine learning state documents that are not associated with any job are also deleted. You can limit the request to a single or set of anomaly detection jobs by using a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. You can delete expired data for all anomaly detection - jobs by using _all, by specifying * as the <job_id>, or by omitting the - <job_id>.

    + jobs by using _all, by specifying * as the <job_id>, or by omitting the + <job_id>.

    ``_ @@ -485,8 +485,8 @@ async def delete_filter( """ .. raw:: html -

    Delete a filter. - If an anomaly detection job references the filter, you cannot delete the +

    Delete a filter.

    +

    If an anomaly detection job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter.

    @@ -533,8 +533,8 @@ async def delete_forecast( """ .. raw:: html -

    Delete forecasts from a job. - By default, forecasts are retained for 14 days. You can specify a +

    Delete forecasts from a job.

    +

    By default, forecasts are retained for 14 days. You can specify a different retention period with the expires_in parameter in the forecast jobs API. The delete forecast API enables you to delete one or more forecasts before they expire.

    @@ -607,8 +607,8 @@ async def delete_job( """ .. raw:: html -

    Delete an anomaly detection job. - All job configuration, model state and results are deleted. +

    Delete an anomaly detection job.

    +

    All job configuration, model state and results are deleted. It is not currently possible to delete multiple jobs using wildcards or a comma separated list. If you delete a job that has a datafeed, the request first tries to delete the datafeed. This behavior is equivalent to calling @@ -670,8 +670,8 @@ async def delete_model_snapshot( """ .. raw:: html -

    Delete a model snapshot. - You cannot delete the active model snapshot. To delete that snapshot, first +

    Delete a model snapshot.

    +

    You cannot delete the active model snapshot. To delete that snapshot, first revert to a different one. To identify the active model snapshot, refer to the model_snapshot_id in the results from the get jobs API.

    @@ -724,8 +724,8 @@ async def delete_trained_model( """ .. raw:: html -

    Delete an unreferenced trained model. - The request deletes a trained inference model that is not referenced by an ingest pipeline.

    +

    Delete an unreferenced trained model.

    +

    The request deletes a trained inference model that is not referenced by an ingest pipeline.

    ``_ @@ -777,8 +777,8 @@ async def delete_trained_model_alias( """ .. raw:: html -

    Delete a trained model alias. - This API deletes an existing model alias that refers to a trained model. If +

    Delete a trained model alias.

    +

    This API deletes an existing model alias that refers to a trained model. If the model alias is missing or refers to a model other than the one identified by the model_id, this API returns an error.

    @@ -838,13 +838,13 @@ async def estimate_model_memory( """ .. raw:: html -

    Estimate job model memory usage. - Makes an estimation of the memory usage for an anomaly detection job model. - It is based on analysis configuration details for the job and cardinality +

    Estimate job model memory usage.

    +

    Make an estimation of the memory usage for an anomaly detection job model. + The estimate is based on analysis configuration details for the job and cardinality estimates for the fields it references.

    - ``_ + ``_ :param analysis_config: For a list of the properties that you can specify in the `analysis_config` component of the body of this API. @@ -909,8 +909,8 @@ async def evaluate_data_frame( """ .. raw:: html -

    Evaluate data frame analytics. - The API packages together commonly used evaluation metrics for various types +

    Evaluate data frame analytics.

    +

    The API packages together commonly used evaluation metrics for various types of machine learning features. This has been designed for use on indexes created by data frame analytics. Evaluation requires both a ground truth field and an analytics result field to be present.

    @@ -990,8 +990,8 @@ async def explain_data_frame_analytics( """ .. raw:: html -

    Explain data frame analytics config. - This API provides explanations for a data frame analytics config that either +

    Explain data frame analytics config.

    +

    This API provides explanations for a data frame analytics config that either exists already or one that has not been created yet. The following explanations are provided:

      @@ -2891,8 +2891,8 @@ async def open_job( """ .. raw:: html -

      Open anomaly detection jobs. - An anomaly detection job must be opened to be ready to receive and analyze +

      Open anomaly detection jobs.

      +

      An anomaly detection job must be opened to be ready to receive and analyze data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically @@ -3082,7 +3082,7 @@ async def preview_data_frame_analytics( .. raw:: html

      Preview features used by data frame analytics. - Previews the extracted features used by a data frame analytics config.

      + Preview the extracted features used by a data frame analytics config.

      ``_ @@ -3821,8 +3821,8 @@ async def put_job( """ .. raw:: html -

      Create an anomaly detection job. - If you include a datafeed_config, you must have read index privileges on the source index. +

      Create an anomaly detection job.

      +

      If you include a datafeed_config, you must have read index privileges on the source index. If you include a datafeed_config but do not provide a query, the datafeed uses {"match_all": {"boost": 1}}.

      @@ -4669,11 +4669,14 @@ async def start_datafeed( path_parts=__path_parts, ) - @_rewrite_parameters() + @_rewrite_parameters( + body_fields=("adaptive_allocations",), + ) async def start_trained_model_deployment( self, *, model_id: str, + adaptive_allocations: t.Optional[t.Mapping[str, t.Any]] = None, cache_size: t.Optional[t.Union[int, str]] = None, deployment_id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, @@ -4688,6 +4691,7 @@ async def start_trained_model_deployment( wait_for: t.Optional[ t.Union[str, t.Literal["fully_allocated", "started", "starting"]] ] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html @@ -4700,6 +4704,9 @@ async def start_trained_model_deployment( :param model_id: The unique identifier of the trained model. Currently, only PyTorch models are supported. + :param adaptive_allocations: Adaptive allocations configuration. When enabled, + the number of allocations is set based on the current load. If adaptive_allocations + is enabled, do not set the number of allocations manually. :param cache_size: The inference cache size (in memory outside the JVM heap) per node for the model. The default value is the same size as the `model_size_bytes`. To disable the cache, `0b` can be provided. @@ -4709,7 +4716,8 @@ async def start_trained_model_deployment( model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed - to a value less than the number of hardware threads. + to a value less than the number of hardware threads. If adaptive_allocations + is enabled, do not set this value, because it’s automatically set. :param priority: The deployment priority. :param queue_capacity: Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds this value, new requests @@ -4729,6 +4737,7 @@ async def start_trained_model_deployment( __path_parts: t.Dict[str, str] = {"model_id": _quote(model_id)} __path = f'/_ml/trained_models/{__path_parts["model_id"]}/deployment/_start' __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} if cache_size is not None: __query["cache_size"] = cache_size if deployment_id is not None: @@ -4753,12 +4762,20 @@ async def start_trained_model_deployment( __query["timeout"] = timeout if wait_for is not None: __query["wait_for"] = wait_for + if not __body: + if adaptive_allocations is not None: + __body["adaptive_allocations"] = adaptive_allocations + if not __body: + __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, + body=__body, endpoint_id="ml.start_trained_model_deployment", path_parts=__path_parts, ) @@ -5540,12 +5557,13 @@ async def update_model_snapshot( ) @_rewrite_parameters( - body_fields=("number_of_allocations",), + body_fields=("adaptive_allocations", "number_of_allocations"), ) async def update_trained_model_deployment( self, *, model_id: str, + adaptive_allocations: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, @@ -5563,12 +5581,16 @@ async def update_trained_model_deployment( :param model_id: The unique identifier of the trained model. Currently, only PyTorch models are supported. + :param adaptive_allocations: Adaptive allocations configuration. When enabled, + the number of allocations is set based on the current load. If adaptive_allocations + is enabled, do not set the number of allocations manually. :param number_of_allocations: The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed - to a value less than the number of hardware threads. + to a value less than the number of hardware threads. If adaptive_allocations + is enabled, do not set this value, because it’s automatically set. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") @@ -5585,6 +5607,8 @@ async def update_trained_model_deployment( if pretty is not None: __query["pretty"] = pretty if not __body: + if adaptive_allocations is not None: + __body["adaptive_allocations"] = adaptive_allocations if number_of_allocations is not None: __body["number_of_allocations"] = number_of_allocations if not __body: @@ -5619,7 +5643,7 @@ async def upgrade_job_snapshot( .. raw:: html

      Upgrade a snapshot. - Upgrades an anomaly detection model snapshot to the latest major version. + Upgrade an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. @@ -5782,7 +5806,7 @@ async def validate_detector(

      Validate an anomaly detection job.

      - ``_ + ``_ :param detector: """ diff --git a/elasticsearch/_async/client/monitoring.py b/elasticsearch/_async/client/monitoring.py index d5e5b5252..761e772f4 100644 --- a/elasticsearch/_async/client/monitoring.py +++ b/elasticsearch/_async/client/monitoring.py @@ -48,7 +48,7 @@ async def bulk( This API is used by the monitoring features to send monitoring data.

      - ``_ + ``_ :param interval: Collection interval (e.g., '10s' or '10000ms') of the payload :param operations: diff --git a/elasticsearch/_async/client/nodes.py b/elasticsearch/_async/client/nodes.py index 99d8fb209..ff8c7b321 100644 --- a/elasticsearch/_async/client/nodes.py +++ b/elasticsearch/_async/client/nodes.py @@ -231,8 +231,8 @@ async def info( """ .. raw:: html -

      Get node information. - By default, the API returns all attributes and core settings for cluster nodes.

      +

      Get node information.

      +

      By default, the API returns all attributes and core settings for cluster nodes.

      ``_ @@ -308,7 +308,7 @@ async def reload_secure_settings( Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password.

      - ``_ + ``_ :param node_id: The names of particular nodes in the cluster to target. :param secure_settings_password: The password for the Elasticsearch keystore. diff --git a/elasticsearch/_async/client/search_application.py b/elasticsearch/_async/client/search_application.py index 724ccdcff..a517db976 100644 --- a/elasticsearch/_async/client/search_application.py +++ b/elasticsearch/_async/client/search_application.py @@ -45,13 +45,13 @@ async def delete( """ .. raw:: html -

      Delete a search application. - Remove a search application and its associated alias. Indices attached to the search application are not removed.

      +

      Delete a search application.

      +

      Remove a search application and its associated alias. Indices attached to the search application are not removed.

      ``_ - :param name: The name of the search application to delete + :param name: The name of the search application to delete. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -234,7 +234,7 @@ async def list( Get information about search applications.

      - ``_ + ``_ :param from_: Starting offset. :param q: Query in the Lucene query string syntax. diff --git a/elasticsearch/_async/client/transform.py b/elasticsearch/_async/client/transform.py index ae7e846d5..7b134118b 100644 --- a/elasticsearch/_async/client/transform.py +++ b/elasticsearch/_async/client/transform.py @@ -41,8 +41,7 @@ async def delete_transform( """ .. raw:: html -

      Delete a transform. - Deletes a transform.

      +

      Delete a transform.

      ``_ @@ -106,7 +105,7 @@ async def get_transform( .. raw:: html

      Get transforms. - Retrieves configuration information for transforms.

      + Get configuration information for transforms.

      ``_ @@ -178,8 +177,8 @@ async def get_transform_stats( """ .. raw:: html -

      Get transform stats. - Retrieves usage information for transforms.

      +

      Get transform stats.

      +

      Get usage information for transforms.

      ``_ @@ -508,9 +507,8 @@ async def reset_transform( """ .. raw:: html -

      Reset a transform. - Resets a transform. - Before you can reset it, you must stop it; alternatively, use the force query parameter. +

      Reset a transform.

      +

      Before you can reset it, you must stop it; alternatively, use the force query parameter. If the destination index was created by the transform, it is deleted.

      @@ -566,11 +564,11 @@ async def schedule_now_transform( """ .. raw:: html -

      Schedule a transform to start now. - Instantly runs a transform to process data.

      -

      If you _schedule_now a transform, it will process the new data instantly, - without waiting for the configured frequency interval. After _schedule_now API is called, - the transform will be processed again at now + frequency unless _schedule_now API +

      Schedule a transform to start now.

      +

      Instantly run a transform to process data. + If you run this API, the transform will process the new data instantly, + without waiting for the configured frequency interval. After the API is called, + the transform will be processed again at now + frequency unless the API is called again in the meantime.

      @@ -621,8 +619,7 @@ async def start_transform( """ .. raw:: html -

      Start a transform. - Starts a transform.

      +

      Start a transform.

      When you start a transform, it creates the destination index if it does not already exist. The number_of_shards is set to 1 and the auto_expand_replicas is set to 0-1. If it is a pivot transform, it deduces the mapping definitions for the destination index from the source indices and the transform aggregations. If fields in the @@ -879,8 +876,8 @@ async def upgrade_transforms( """ .. raw:: html -

      Upgrade all transforms. - Transforms are compatible across minor versions and between supported major versions. +

      Upgrade all transforms.

      +

      Transforms are compatible across minor versions and between supported major versions. However, over time, the format of transform configuration information may change. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It also cleans up the internal data structures that store the transform state and checkpoints. diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index 67187220a..705a3914d 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -626,6 +626,7 @@ def bulk( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + include_source_on_error: t.Optional[bool] = None, list_executed_pipelines: t.Optional[bool] = None, pipeline: t.Optional[str] = None, pretty: t.Optional[bool] = None, @@ -733,6 +734,8 @@ def bulk( :param operations: :param index: The name of the data stream, index, or index alias to perform bulk actions on. + :param include_source_on_error: True or false if to include the document source + in the error message in case of parsing errors. :param list_executed_pipelines: If `true`, the response will include the ingest pipelines that were run for each index or create. :param pipeline: The pipeline identifier to use to preprocess incoming documents. @@ -790,6 +793,8 @@ def bulk( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if include_source_on_error is not None: + __query["include_source_on_error"] = include_source_on_error if list_executed_pipelines is not None: __query["list_executed_pipelines"] = list_executed_pipelines if pipeline is not None: @@ -982,8 +987,8 @@ def count(

      Count search results. Get the number of documents matching a query.

      -

      The query can either be provided using a simple query string as a parameter or using the Query DSL defined within the request body. - The latter must be nested in a query key, which is the same as the search API.

      +

      The query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body. + The query is optional. When no query is provided, the API uses match_all to count all the documents.

      The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.

      The operation is broadcast across all shards. For each shard ID group, a replica is chosen and the search is run against it. @@ -1025,10 +1030,10 @@ def count( in the result. :param preference: The node or shard the operation should be performed on. By default, it is random. - :param q: The query in Lucene query string syntax. - :param query: Defines the search definition using the Query DSL. The query is - optional, and when not provided, it will use `match_all` to count all the - docs. + :param q: The query in Lucene query string syntax. This parameter cannot be used + with a request body. + :param query: Defines the search query using Query DSL. A request body query + cannot be used with the `q` query string parameter. :param routing: A custom value used to route operations to a specific shard. :param terminate_after: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. @@ -1114,6 +1119,7 @@ def create( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + include_source_on_error: t.Optional[bool] = None, pipeline: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ @@ -1196,6 +1202,8 @@ def create( :param id: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format. :param document: + :param include_source_on_error: True or false if to include the document source + in the error message in case of parsing errors. :param pipeline: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final @@ -1244,6 +1252,8 @@ def create( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if include_source_on_error is not None: + __query["include_source_on_error"] = include_source_on_error if pipeline is not None: __query["pipeline"] = pipeline if pretty is not None: @@ -1762,14 +1772,16 @@ def delete_script( Deletes a stored script or search template.

      - ``_ + ``_ - :param id: Identifier for the stored script or search template. - :param master_timeout: Period to wait for a connection to the master node. If - no response is received before the timeout expires, the request fails and - returns an error. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. + :param id: The identifier for the stored script or search template. + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. It can also be set to `-1` to indicate that the request + should never timeout. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. It can + also be set to `-1` to indicate that the request should never timeout. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -2069,34 +2081,44 @@ def explain( .. raw:: html

      Explain a document match result. - Returns information about why a specific document matches, or doesn’t match, a query.

      + Get information about why a specific document matches, or doesn't match, a query. + It computes a score explanation for a query and a specific document.

      ``_ - :param index: Index names used to limit the request. Only a single index name - can be provided to this parameter. - :param id: Defines the document ID. + :param index: Index names that are used to limit the request. Only a single index + name can be provided to this parameter. + :param id: The document identifier. :param analyze_wildcard: If `true`, wildcard and prefix queries are analyzed. - :param analyzer: Analyzer to use for the query string. This parameter can only - be used when the `q` query string parameter is specified. + This parameter can be used only when the `q` query string parameter is specified. + :param analyzer: The analyzer to use for the query string. This parameter can + be used only when the `q` query string parameter is specified. :param default_operator: The default operator for query string query: `AND` or - `OR`. - :param df: Field to use as default where no field prefix is given in the query - string. + `OR`. This parameter can be used only when the `q` query string parameter + is specified. + :param df: The field to use as default where no field prefix is given in the + query string. This parameter can be used only when the `q` query string parameter + is specified. :param lenient: If `true`, format-based query failures (such as providing text - to a numeric field) in the query string will be ignored. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. - :param q: Query in the Lucene query string syntax. + to a numeric field) in the query string will be ignored. This parameter can + be used only when the `q` query string parameter is specified. + :param preference: The node or shard the operation should be performed on. It + is random by default. + :param q: The query in the Lucene query string syntax. :param query: Defines the search definition using the Query DSL. - :param routing: Custom value used to route operations to a specific shard. - :param source: True or false to return the `_source` field or not, or a list + :param routing: A custom value used to route operations to a specific shard. + :param source: `True` or `false` to return the `_source` field or not or a list of fields to return. :param source_excludes: A comma-separated list of source fields to exclude from - the response. + the response. You can also use this parameter to exclude fields from the + subset specified in `_source_includes` query parameter. If the `_source` + parameter is `false`, this parameter is ignored. :param source_includes: A comma-separated list of source fields to include in - the response. + the response. If this parameter is specified, only these source fields are + returned. You can exclude fields from this subset using the `_source_excludes` + query parameter. If the `_source` parameter is `false`, this parameter is + ignored. :param stored_fields: A comma-separated list of stored fields to return in the response. """ @@ -2200,7 +2222,7 @@ def field_caps( ``_ - :param index: Comma-separated list of data streams, indices, and aliases used + :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. :param allow_no_indices: If false, the request returns an error if any wildcard @@ -2208,25 +2230,32 @@ def field_caps( This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. - :param expand_wildcards: Type of index that wildcard patterns can match. If the - request can target data streams, this argument determines whether wildcard - expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. - :param fields: List of fields to retrieve capabilities for. Wildcard (`*`) expressions - are supported. - :param filters: An optional set of filters: can include +metadata,-metadata,-nested,-multifield,-parent + :param expand_wildcards: The type of index that wildcard patterns can match. + If the request can target data streams, this argument determines whether + wildcard expressions match hidden data streams. Supports comma-separated + values, such as `open,hidden`. + :param fields: A list of fields to retrieve capabilities for. Wildcard (`*`) + expressions are supported. + :param filters: A comma-separated list of filters to apply to the response. :param ignore_unavailable: If `true`, missing or closed indices are not included in the response. :param include_empty_fields: If false, empty fields are not included in the response. :param include_unmapped: If true, unmapped fields are included in the response. - :param index_filter: Allows to filter indices if the provided query rewrites - to match_none on every shard. - :param runtime_mappings: Defines ad-hoc runtime fields in the request similar + :param index_filter: Filter indices if the provided query rewrites to `match_none` + on every shard. IMPORTANT: The filtering is done on a best-effort basis, + it uses index statistics and mappings to rewrite queries to `match_none` + instead of fully running the request. For instance a range query over a date + field can rewrite to `match_none` if all documents within a shard (including + deleted documents) are outside of the provided range. However, not all queries + can rewrite to `match_none` so this API may return an index even if the provided + filter matches no document. + :param runtime_mappings: Define ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. - :param types: Only return results for fields that have one of the types in the - list + :param types: A comma-separated list of field types to include. Any fields that + do not match one of these types will be excluded from the results. It defaults + to empty, meaning that all field types are returned. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: @@ -2459,10 +2488,13 @@ def get_script( Retrieves a stored script or search template.

      - ``_ + ``_ - :param id: Identifier for the stored script or search template. - :param master_timeout: Specify timeout for connection to master + :param id: The identifier for the stored script or search template. + :param master_timeout: The period to wait for the master node. If the master + node is not available before the timeout expires, the request fails and returns + an error. It can also be set to `-1` to indicate that the request should + never timeout. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -2505,7 +2537,7 @@ def get_script_context(

      Get a list of supported script contexts and their methods.

      - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_script_context" @@ -2544,7 +2576,7 @@ def get_script_languages(

      Get a list of available script types, languages, and contexts.

      - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_script_language" @@ -2764,6 +2796,7 @@ def index( human: t.Optional[bool] = None, if_primary_term: t.Optional[int] = None, if_seq_no: t.Optional[int] = None, + include_source_on_error: t.Optional[bool] = None, op_type: t.Optional[t.Union[str, t.Literal["create", "index"]]] = None, pipeline: t.Optional[str] = None, pretty: t.Optional[bool] = None, @@ -2889,6 +2922,8 @@ def index( term. :param if_seq_no: Only perform the operation if the document has this sequence number. + :param include_source_on_error: True or false if to include the document source + in the error message in case of parsing errors. :param op_type: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` @@ -2953,6 +2988,8 @@ def index( __query["if_primary_term"] = if_primary_term if if_seq_no is not None: __query["if_seq_no"] = if_seq_no + if include_source_on_error is not None: + __query["include_source_on_error"] = include_source_on_error if op_type is not None: __query["op_type"] = op_type if pipeline is not None: @@ -3067,30 +3104,37 @@ def knn_search( This means the results returned are not always the true k closest neighbors.

      The kNN search API supports restricting the search using a filter. The search will return the top k documents that also match the filter query.

      +

      A kNN search response has the exact same structure as a search API response. + However, certain sections have a meaning specific to kNN search:

      +
        +
      • The document _score is determined by the similarity between the query and document vector.
      • +
      • The hits.total object contains the total number of nearest neighbor candidates considered, which is num_candidates * num_shards. The hits.total.relation will always be eq, indicating an exact value.
      • +
      - ``_ + ``_ :param index: A comma-separated list of index names to search; use `_all` or - to perform the operation on all indices - :param knn: kNN query to execute + to perform the operation on all indices. + :param knn: The kNN query to run. :param docvalue_fields: The request returns doc values for field names matching - these patterns in the hits.fields property of the response. Accepts wildcard - (*) patterns. + these patterns in the `hits.fields` property of the response. It accepts + wildcard (`*`) patterns. :param fields: The request returns values for field names matching these patterns - in the hits.fields property of the response. Accepts wildcard (*) patterns. - :param filter: Query to filter the documents that can match. The kNN search will - return the top `k` documents that also match this filter. The value can be - a single query or a list of queries. If `filter` isn't provided, all documents - are allowed to match. - :param routing: A comma-separated list of specific routing values + in the `hits.fields` property of the response. It accepts wildcard (`*`) + patterns. + :param filter: A query to filter the documents that can match. The kNN search + will return the top `k` documents that also match this filter. The value + can be a single query or a list of queries. If `filter` isn't provided, all + documents are allowed to match. + :param routing: A comma-separated list of specific routing values. :param source: Indicates which source fields are returned for matching documents. - These fields are returned in the hits._source property of the search response. - :param stored_fields: List of stored fields to return as part of a hit. If no - fields are specified, no stored fields are included in the response. If this - field is specified, the _source parameter defaults to false. You can pass - _source: true to return both source fields and stored fields in the search - response. + These fields are returned in the `hits._source` property of the search response. + :param stored_fields: A list of stored fields to return as part of a hit. If + no fields are specified, no stored fields are included in the response. If + this field is specified, the `_source` parameter defaults to `false`. You + can pass `_source: true` to return both source fields and stored fields in + the search response. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -3174,6 +3218,14 @@ def mget(

      Get multiple JSON documents by ID from one or more indices. If you specify an index in the request URI, you only need to specify the document IDs in the request body. To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.

      +

      Filter source fields

      +

      By default, the _source field is returned for every document (if stored). + Use the _source and _source_include or source_exclude attributes to filter what fields are returned for a particular document. + You can include the _source, _source_includes, and _source_excludes query parameters in the request URI to specify the defaults to use when there are no per-document instructions.

      +

      Get stored fields

      +

      Use the stored_fields attribute to specify the set of stored fields you want to retrieve. + Any requested fields that are not stored are ignored. + You can include the stored_fields query parameter in the request URI to specify the defaults to use when there are no per-document instructions.

      ``_ @@ -3444,22 +3496,32 @@ def msearch_template( .. raw:: html

      Run multiple templated searches.

      +

      Run multiple templated searches with a single request. + If you are providing a text file or text input to curl, use the --data-binary flag instead of -d to preserve newlines. + For example:

      +
      $ cat requests
      +          { "index": "my-index" }
      +          { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }}
      +          { "index": "my-other-index" }
      +          { "id": "my-other-search-template", "params": { "query_type": "match_all" }}
      +
      +          $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo
      +          
      - ``_ + ``_ :param search_templates: - :param index: Comma-separated list of data streams, indices, and aliases to search. - Supports wildcards (`*`). To search all data streams and indices, omit this - parameter or use `*`. + :param index: A comma-separated list of data streams, indices, and aliases to + search. It supports wildcards (`*`). To search all data streams and indices, + omit this parameter or use `*`. :param ccs_minimize_roundtrips: If `true`, network round-trips are minimized for cross-cluster search requests. - :param max_concurrent_searches: Maximum number of concurrent searches the API - can run. + :param max_concurrent_searches: The maximum number of concurrent searches the + API can run. :param rest_total_hits_as_int: If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object. - :param search_type: The type of the search operation. Available options: `query_then_fetch`, - `dfs_query_then_fetch`. + :param search_type: The type of the search operation. :param typed_keys: If `true`, the response prefixes aggregation and suggester names with their respective types. """ @@ -3542,34 +3604,38 @@ def mtermvectors( .. raw:: html

      Get multiple term vectors.

      -

      You can specify existing documents by index and ID or provide artificial documents in the body of the request. +

      Get multiple term vectors with a single request. + You can specify existing documents by index and ID or provide artificial documents in the body of the request. You can specify the index in the request body or request URI. The response contains a docs array with all the fetched termvectors. Each element has the structure provided by the termvectors API.

      +

      Artificial documents

      +

      You can also use mtermvectors to generate term vectors for artificial documents provided in the body of the request. + The mapping used is determined by the specified _index.

      ``_ - :param index: Name of the index that contains the documents. - :param docs: Array of existing or artificial documents. + :param index: The name of the index that contains the documents. + :param docs: An array of existing or artificial documents. :param field_statistics: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. - :param fields: Comma-separated list or wildcard expressions of fields to include - in the statistics. Used as the default list unless a specific field list - is provided in the `completion_fields` or `fielddata_fields` parameters. - :param ids: Simplified syntax to specify documents by their ID if they're in + :param fields: A comma-separated list or wildcard expressions of fields to include + in the statistics. It is used as the default list unless a specific field + list is provided in the `completion_fields` or `fielddata_fields` parameters. + :param ids: A simplified syntax to specify documents by their ID if they're in the same index. :param offsets: If `true`, the response includes term offsets. :param payloads: If `true`, the response includes term payloads. :param positions: If `true`, the response includes term positions. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. + :param preference: The node or shard the operation should be performed on. It + is random by default. :param realtime: If true, the request is real-time as opposed to near-real-time. - :param routing: Custom value used to route operations to a specific shard. + :param routing: A custom value used to route operations to a specific shard. :param term_statistics: If true, the response includes term frequency and document frequency. :param version: If `true`, returns the document version as part of a hit. - :param version_type: Specific version type. + :param version_type: The version type. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: @@ -3782,20 +3848,21 @@ def put_script( Creates or updates a stored script or search template.

      - ``_ - - :param id: Identifier for the stored script or search template. Must be unique - within the cluster. - :param script: Contains the script or search template, its parameters, and its - language. - :param context: Context in which the script or search template should run. To - prevent errors, the API immediately compiles the script or template in this - context. - :param master_timeout: Period to wait for a connection to the master node. If - no response is received before the timeout expires, the request fails and - returns an error. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. + ``_ + + :param id: The identifier for the stored script or search template. It must be + unique within the cluster. + :param script: The script or search template, its parameters, and its language. + :param context: The context in which the script or search template should run. + To prevent errors, the API immediately compiles the script or template in + this context. + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. It can also be set to `-1` to indicate that the request + should never timeout. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. It can + also be set to `-1` to indicate that the request should never timeout. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -3875,7 +3942,7 @@ def rank_eval( :param requests: A set of typical search requests, together with their provided ratings. - :param index: Comma-separated list of data streams, indices, and index aliases + :param index: A comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. @@ -4285,13 +4352,13 @@ def render_search_template( ``_ - :param id: ID of the search template to render. If no `source` is specified, + :param id: The ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. :param file: :param params: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. - :param source: An inline search template. Supports the same parameters as the - search API's request body. These parameters also support Mustache variables. + :param source: An inline search template. It supports the same parameters as + the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. """ __path_parts: t.Dict[str, str] @@ -4340,7 +4407,24 @@ def render_search_template( def scripts_painless_execute( self, *, - context: t.Optional[str] = None, + context: t.Optional[ + t.Union[ + str, + t.Literal[ + "boolean_field", + "composite_field", + "date_field", + "double_field", + "filter", + "geo_point_field", + "ip_field", + "keyword_field", + "long_field", + "painless_test", + "score", + ], + ] + ] = None, context_setup: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, @@ -4352,15 +4436,22 @@ def scripts_painless_execute( """ .. raw:: html -

      Run a script. - Runs a script and returns a result.

      +

      Run a script.

      +

      Runs a script and returns a result. + Use this API to build and test scripts, such as when defining a script for a runtime field. + This API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster.

      +

      The API uses several contexts, which control how scripts are run, what variables are available at runtime, and what the return type is.

      +

      Each context requires a script, but additional parameters depend on the context you're using for that script.

      ``_ - :param context: The context that the script should run in. - :param context_setup: Additional parameters for the `context`. - :param script: The Painless script to execute. + :param context: The context that the script should run in. NOTE: Result ordering + in the field contexts is not guaranteed. + :param context_setup: Additional parameters for the `context`. NOTE: This parameter + is required for all contexts except `painless_test`, which is the default + if no value is provided for `context`. + :param script: The Painless script to run. """ __path_parts: t.Dict[str, str] = {} __path = "/_scripts/painless/_execute" @@ -4426,13 +4517,13 @@ def scroll(

      IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.

      - ``_ + ``_ - :param scroll_id: Scroll ID of the search. + :param scroll_id: The scroll ID of the search. :param rest_total_hits_as_int: If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object. - :param scroll: Period to retain the search context for scrolling. + :param scroll: The period to retain the search context for scrolling. """ if scroll_id is None and body is None: raise ValueError("Empty value passed for parameter 'scroll_id'") @@ -5071,7 +5162,315 @@ def search_mvt( .. raw:: html

      Search a vector tile.

      -

      Search a vector tile for geospatial values.

      +

      Search a vector tile for geospatial values. + Before using this API, you should be familiar with the Mapbox vector tile specification. + The API returns results as a binary mapbox vector tile.

      +

      Internally, Elasticsearch translates a vector tile search API request into a search containing:

      +
        +
      • A geo_bounding_box query on the <field>. The query uses the <zoom>/<x>/<y> tile as a bounding box.
      • +
      • A geotile_grid or geohex_grid aggregation on the <field>. The grid_agg parameter determines the aggregation type. The aggregation uses the <zoom>/<x>/<y> tile as a bounding box.
      • +
      • Optionally, a geo_bounds aggregation on the <field>. The search only includes this aggregation if the exact_bounds parameter is true.
      • +
      • If the optional parameter with_labels is true, the internal search will include a dynamic runtime field that calls the getLabelPosition function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label.
      • +
      +

      For example, Elasticsearch may translate a vector tile search API request with a grid_agg argument of geotile and an exact_bounds argument of true into the following search

      +
      GET my-index/_search
      +          {
      +            "size": 10000,
      +            "query": {
      +              "geo_bounding_box": {
      +                "my-geo-field": {
      +                  "top_left": {
      +                    "lat": -40.979898069620134,
      +                    "lon": -45
      +                  },
      +                  "bottom_right": {
      +                    "lat": -66.51326044311186,
      +                    "lon": 0
      +                  }
      +                }
      +              }
      +            },
      +            "aggregations": {
      +              "grid": {
      +                "geotile_grid": {
      +                  "field": "my-geo-field",
      +                  "precision": 11,
      +                  "size": 65536,
      +                  "bounds": {
      +                    "top_left": {
      +                      "lat": -40.979898069620134,
      +                      "lon": -45
      +                    },
      +                    "bottom_right": {
      +                      "lat": -66.51326044311186,
      +                      "lon": 0
      +                    }
      +                  }
      +                }
      +              },
      +              "bounds": {
      +                "geo_bounds": {
      +                  "field": "my-geo-field",
      +                  "wrap_longitude": false
      +                }
      +              }
      +            }
      +          }
      +          
      +

      The API returns results as a binary Mapbox vector tile. + Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers:

      +
        +
      • A hits layer containing a feature for each <field> value matching the geo_bounding_box query.
      • +
      • An aggs layer containing a feature for each cell of the geotile_grid or geohex_grid. The layer only contains features for cells with matching data.
      • +
      • A meta layer containing: +
          +
        • A feature containing a bounding box. By default, this is the bounding box of the tile.
        • +
        • Value ranges for any sub-aggregations on the geotile_grid or geohex_grid.
        • +
        • Metadata for the search.
        • +
        +
      • +
      +

      The API only returns features that can display at its zoom level. + For example, if a polygon feature has no area at its zoom level, the API omits it. + The API returns errors as UTF-8 encoded JSON.

      +

      IMPORTANT: You can specify several options for this API as either a query parameter or request body parameter. + If you specify both parameters, the query parameter takes precedence.

      +

      Grid precision for geotile

      +

      For a grid_agg of geotile, you can use cells in the aggs layer as tiles for lower zoom levels. + grid_precision represents the additional zoom levels available through these cells. The final precision is computed by as follows: <zoom> + grid_precision. + For example, if <zoom> is 7 and grid_precision is 8, then the geotile_grid aggregation will use a precision of 15. + The maximum final precision is 29. + The grid_precision also determines the number of cells for the grid as follows: (2^grid_precision) x (2^grid_precision). + For example, a value of 8 divides the tile into a grid of 256 x 256 cells. + The aggs layer only contains features for cells with matching data.

      +

      Grid precision for geohex

      +

      For a grid_agg of geohex, Elasticsearch uses <zoom> and grid_precision to calculate a final precision as follows: <zoom> + grid_precision.

      +

      This precision determines the H3 resolution of the hexagonal cells produced by the geohex aggregation. + The following table maps the H3 resolution for each precision. + For example, if <zoom> is 3 and grid_precision is 3, the precision is 6. + At a precision of 6, hexagonal cells have an H3 resolution of 2. + If <zoom> is 3 and grid_precision is 4, the precision is 7. + At a precision of 7, hexagonal cells have an H3 resolution of 3.

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      PrecisionUnique tile binsH3 resolutionUnique hex binsRatio
      14012230.5
      21601227.625
      364184213.15625
      425618423.2890625
      51024258825.744140625
      64096258821.436035156
      7163843411622.512329102
      8655363411620.6280822754
      926214442881221.099098206
      10104857642881220.2747745514
      114194304520168420.4808526039
      12167772166141178820.8414913416
      13671088646141178820.2103728354
      142684354567988251620.3681524172
      15107374182486917761220.644266719
      16429496729686917761220.1610666797
      1717179869184948424328420.2818666889
      186871947673610338970298820.4932667053
      19274877906944112372792091620.8632167343
      201099511627776112372792091620.2158041836
      2143980465111041216609544641220.3776573213
      221759218604441613116266812488420.6609003122
      237036874417766413116266812488420.165225078
      2428147497671065614813867687418820.2891438866
      251125899906842620155697073811931620.5060018015
      264503599627370500155697073811931620.1265004504
      2718014398509482000155697073811931620.03162511259
      2872057594037927900155697073811931620.007906278149
      29288230376151712000155697073811931620.001976569537
      +

      Hexagonal cells don't align perfectly on a vector tile. + Some cells may intersect more than one vector tile. + To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level. + Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density.

      ``_ @@ -5081,43 +5480,55 @@ def search_mvt( :param zoom: Zoom level for the vector tile to search :param x: X coordinate for the vector tile to search :param y: Y coordinate for the vector tile to search - :param aggs: Sub-aggregations for the geotile_grid. Supports the following aggregation - types: - avg - cardinality - max - min - sum - :param buffer: Size, in pixels, of a clipping buffer outside the tile. This allows - renderers to avoid outline artifacts from geometries that extend past the - extent of the tile. - :param exact_bounds: If false, the meta layer’s feature is the bounding box of - the tile. If true, the meta layer’s feature is a bounding box resulting from - a geo_bounds aggregation. The aggregation runs on values that intersect - the // tile with wrap_longitude set to false. The resulting bounding - box may be larger than the vector tile. - :param extent: Size, in pixels, of a side of the tile. Vector tiles are square + :param aggs: Sub-aggregations for the geotile_grid. It supports the following + aggregation types: - `avg` - `boxplot` - `cardinality` - `extended stats` + - `max` - `median absolute deviation` - `min` - `percentile` - `percentile-rank` + - `stats` - `sum` - `value count` The aggregation names can't start with + `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations. + :param buffer: The size, in pixels, of a clipping buffer outside the tile. This + allows renderers to avoid outline artifacts from geometries that extend past + the extent of the tile. + :param exact_bounds: If `false`, the meta layer's feature is the bounding box + of the tile. If `true`, the meta layer's feature is a bounding box resulting + from a `geo_bounds` aggregation. The aggregation runs on values that + intersect the `//` tile with `wrap_longitude` set to `false`. + The resulting bounding box may be larger than the vector tile. + :param extent: The size, in pixels, of a side of the tile. Vector tiles are square with equal sides. - :param fields: Fields to return in the `hits` layer. Supports wildcards (`*`). - This parameter does not support fields with array values. Fields with array - values may return inconsistent results. - :param grid_agg: Aggregation used to create a grid for the `field`. + :param fields: The fields to return in the `hits` layer. It supports wildcards + (`*`). This parameter does not support fields with array values. Fields with + array values may return inconsistent results. + :param grid_agg: The aggregation used to create a grid for the `field`. :param grid_precision: Additional zoom levels available through the aggs layer. - For example, if is 7 and grid_precision is 8, you can zoom in up to - level 15. Accepts 0-8. If 0, results don’t include the aggs layer. + For example, if `` is `7` and `grid_precision` is `8`, you can zoom + in up to level 15. Accepts 0-8. If 0, results don't include the aggs layer. :param grid_type: Determines the geometry type for features in the aggs layer. - In the aggs layer, each feature represents a geotile_grid cell. If 'grid' - each feature is a Polygon of the cells bounding box. If 'point' each feature + In the aggs layer, each feature represents a `geotile_grid` cell. If `grid, + each feature is a polygon of the cells bounding box. If `point`, each feature is a Point that is the centroid of the cell. - :param query: Query DSL used to filter documents for the search. + :param query: The query DSL used to filter documents for the search. :param runtime_mappings: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. - :param size: Maximum number of features to return in the hits layer. Accepts - 0-10000. If 0, results don’t include the hits layer. - :param sort: Sorts features in the hits layer. By default, the API calculates - a bounding box for each feature. It sorts features based on this box’s diagonal + :param size: The maximum number of features to return in the hits layer. Accepts + 0-10000. If 0, results don't include the hits layer. + :param sort: Sort the features in the hits layer. By default, the API calculates + a bounding box for each feature. It sorts features based on this box's diagonal length, from longest to shortest. - :param track_total_hits: Number of hits matching the query to count accurately. + :param track_total_hits: The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. :param with_labels: If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features. + * `Point` and `MultiPoint` features will have one of the points selected. + * `Polygon` and `MultiPolygon` features will have a single point generated, + either the centroid, if it is within the polygon, or another point within + the polygon selected from the sorted triangle-tree. * `LineString` features + will likewise provide a roughly central point selected from the triangle-tree. + * The aggregation results will provide one central point for each aggregation + bucket. All attributes from the original features will also be copied to + the new label features. In addition, the new features will be distinguishable + using the tag `_mvt_label_position`. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -5232,13 +5643,15 @@ def search_shards(

      Get the search shards.

      Get the indices and shards that a search request would be run against. This information can be useful for working out issues or planning optimizations with routing and shard preferences. - When filtered aliases are used, the filter is returned as part of the indices section.

      + When filtered aliases are used, the filter is returned as part of the indices section.

      +

      If the Elasticsearch security features are enabled, you must have the view_index_metadata or manage index privilege for the target data stream, index, or alias.

      ``_ - :param index: Returns the indices and shards that a search request would be executed - against. + :param index: A comma-separated list of data streams, indices, and aliases to + search. It supports wildcards (`*`). To search all data streams and indices, + omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For @@ -5252,10 +5665,13 @@ def search_shards( a missing or closed index. :param local: If `true`, the request retrieves information from the local node only. - :param master_timeout: Period to wait for a connection to the master node. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. - :param routing: Custom value used to route operations to a specific shard. + :param master_timeout: The period to wait for a connection to the master node. + If the master node is not available before the timeout expires, the request + fails and returns an error. IT can also be set to `-1` to indicate that the + request should never timeout. + :param preference: The node or shard the operation should be performed on. It + is random by default. + :param routing: A custom value used to route operations to a specific shard. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: @@ -5342,10 +5758,10 @@ def search_template(

      Run a search with a search template.

      - ``_ + ``_ - :param index: Comma-separated list of data streams, indices, and aliases to search. - Supports wildcards (*). + :param index: A comma-separated list of data streams, indices, and aliases to + search. It supports wildcards (`*`). :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For @@ -5353,32 +5769,34 @@ def search_template( with `foo` but no index starts with `bar`. :param ccs_minimize_roundtrips: If `true`, network round-trips are minimized for cross-cluster search requests. - :param expand_wildcards: Type of index that wildcard patterns can match. If the - request can target data streams, this argument determines whether wildcard - expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + :param expand_wildcards: The type of index that wildcard patterns can match. + If the request can target data streams, this argument determines whether + wildcard expressions match hidden data streams. Supports comma-separated + values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, + `hidden`, `none`. :param explain: If `true`, returns detailed information about score calculation - as part of each hit. - :param id: ID of the search template to use. If no source is specified, this - parameter is required. + as part of each hit. If you specify both this and the `explain` query parameter, + the API uses only the query parameter. + :param id: The ID of the search template to use. If no `source` is specified, + this parameter is required. :param ignore_throttled: If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param params: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. + :param preference: The node or shard the operation should be performed on. It + is random by default. :param profile: If `true`, the query execution is profiled. - :param rest_total_hits_as_int: If true, hits.total are rendered as an integer - in the response. - :param routing: Custom value used to route operations to a specific shard. + :param rest_total_hits_as_int: If `true`, `hits.total` is rendered as an integer + in the response. If `false`, it is rendered as an object. + :param routing: A custom value used to route operations to a specific shard. :param scroll: Specifies how long a consistent view of the index should be maintained for scrolled search. :param search_type: The type of the search operation. :param source: An inline search template. Supports the same parameters as the - search API's request body. Also supports Mustache variables. If no id is - specified, this parameter is required. + search API's request body. It also supports Mustache variables. If no `id` + is specified, this parameter is required. :param typed_keys: If `true`, the response prefixes aggregation and suggester names with their respective types. """ @@ -5476,30 +5894,35 @@ def terms_enum(

      Get terms in an index.

      Discover terms that match a partial string in an index. - This "terms enum" API is designed for low-latency look-ups used in auto-complete scenarios.

      -

      If the complete property in the response is false, the returned terms set may be incomplete and should be treated as approximate. - This can occur due to a few reasons, such as a request timeout or a node error.

      -

      NOTE: The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents.

      + This API is designed for low-latency look-ups used in auto-complete scenarios.

      +
      +

      info + The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents.

      +
      ``_ - :param index: Comma-separated list of data streams, indices, and index aliases - to search. Wildcard (*) expressions are supported. + :param index: A comma-separated list of data streams, indices, and index aliases + to search. Wildcard (`*`) expressions are supported. To search all data streams + or indices, omit this parameter or use `*` or `_all`. :param field: The string to match at the start of indexed terms. If not provided, all terms in the field are considered. - :param case_insensitive: When true the provided search string is matched against + :param case_insensitive: When `true`, the provided search string is matched against index terms without case sensitivity. - :param index_filter: Allows to filter an index shard if the provided query rewrites - to match_none. - :param search_after: - :param size: How many matching terms to return. - :param string: The string after which terms in the index should be returned. - Allows for a form of pagination if the last result from one request is passed - as the search_after parameter for a subsequent request. - :param timeout: The maximum length of time to spend collecting results. Defaults - to "1s" (one second). If the timeout is exceeded the complete flag set to - false in the response and the results may be partial or empty. + :param index_filter: Filter an index shard if the provided query rewrites to + `match_none`. + :param search_after: The string after which terms in the index should be returned. + It allows for a form of pagination if the last result from one request is + passed as the `search_after` parameter for a subsequent request. + :param size: The number of matching terms to return. + :param string: The string to match at the start of indexed terms. If it is not + provided, all terms in the field are considered. > info > The prefix string + cannot be larger than the largest possible keyword value, which is Lucene's + term byte-length limit of 32766. + :param timeout: The maximum length of time to spend collecting results. If the + timeout is exceeded the `complete` flag set to `false` in the response and + the results may be partial or empty. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -5582,32 +6005,73 @@ def termvectors(

      Get term vector information.

      Get information and statistics about terms in the fields of a particular document.

      +

      You can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request. + You can specify the fields you are interested in through the fields parameter or by adding the fields to the request body. + For example:

      +
      GET /my-index-000001/_termvectors/1?fields=message
      +          
      +

      Fields can be specified using wildcards, similar to the multi match query.

      +

      Term vectors are real-time by default, not near real-time. + This can be changed by setting realtime parameter to false.

      +

      You can request three types of values: term information, term statistics, and field statistics. + By default, all term information and field statistics are returned for all fields but term statistics are excluded.

      +

      Term information

      +
        +
      • term frequency in the field (always returned)
      • +
      • term positions (positions: true)
      • +
      • start and end offsets (offsets: true)
      • +
      • term payloads (payloads: true), as base64 encoded bytes
      • +
      +

      If the requested information wasn't stored in the index, it will be computed on the fly if possible. + Additionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user.

      +
      +

      warn + Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16.

      +
      +

      Behaviour

      +

      The term and field statistics are not accurate. + Deleted documents are not taken into account. + The information is only retrieved for the shard the requested document resides in. + The term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context. + By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected. + Use routing only to hit a particular shard.

      ``_ - :param index: Name of the index that contains the document. - :param id: Unique identifier of the document. + :param index: The name of the index that contains the document. + :param id: A unique identifier for the document. :param doc: An artificial document (a document not present in the index) for which you want to retrieve term vectors. - :param field_statistics: If `true`, the response includes the document count, - sum of document frequencies, and sum of total term frequencies. - :param fields: Comma-separated list or wildcard expressions of fields to include - in the statistics. Used as the default list unless a specific field list - is provided in the `completion_fields` or `fielddata_fields` parameters. - :param filter: Filter terms based on their tf-idf scores. + :param field_statistics: If `true`, the response includes: * The document count + (how many documents contain this field). * The sum of document frequencies + (the sum of document frequencies for all terms in this field). * The sum + of total term frequencies (the sum of total term frequencies of each term + in this field). + :param fields: A comma-separated list or wildcard expressions of fields to include + in the statistics. It is used as the default list unless a specific field + list is provided in the `completion_fields` or `fielddata_fields` parameters. + :param filter: Filter terms based on their tf-idf scores. This could be useful + in order find out a good characteristic vector of a document. This feature + works in a similar manner to the second phase of the More Like This Query. :param offsets: If `true`, the response includes term offsets. :param payloads: If `true`, the response includes term payloads. - :param per_field_analyzer: Overrides the default per-field analyzer. + :param per_field_analyzer: Override the default per-field analyzer. This is useful + in order to generate term vectors in any fashion, especially when using artificial + documents. When providing an analyzer for a field that already stores term + vectors, the term vectors will be regenerated. :param positions: If `true`, the response includes term positions. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. + :param preference: The node or shard the operation should be performed on. It + is random by default. :param realtime: If true, the request is real-time as opposed to near-real-time. - :param routing: Custom value used to route operations to a specific shard. - :param term_statistics: If `true`, the response includes term frequency and document - frequency. + :param routing: A custom value that is used to route operations to a specific + shard. + :param term_statistics: If `true`, the response includes: * The total term frequency + (how often a term occurs in all documents). * The document frequency (the + number of documents containing the current term). By default these values + are not returned since term statistics can have a serious performance impact. :param version: If `true`, returns the document version as part of a hit. - :param version_type: Specific version type. + :param version_type: The version type. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -5703,6 +6167,7 @@ def update( human: t.Optional[bool] = None, if_primary_term: t.Optional[int] = None, if_seq_no: t.Optional[int] = None, + include_source_on_error: t.Optional[bool] = None, lang: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ @@ -5758,6 +6223,8 @@ def update( term. :param if_seq_no: Only perform the operation if the document has this sequence number. + :param include_source_on_error: True or false if to include the document source + in the error message in case of parsing errors. :param lang: The script language. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to @@ -5802,6 +6269,8 @@ def update( __query["if_primary_term"] = if_primary_term if if_seq_no is not None: __query["if_seq_no"] = if_seq_no + if include_source_on_error is not None: + __query["include_source_on_error"] = include_source_on_error if lang is not None: __query["lang"] = lang if pretty is not None: @@ -5913,80 +6382,161 @@ def update_by_query(

      Update documents. Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes.

      +

      If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:

      +
        +
      • read
      • +
      • index or write
      • +
      +

      You can specify the query criteria in the request URI or the request body using the same syntax as the search API.

      +

      When you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning. + When the versions match, the document is updated and the version number is incremented. + If a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails. + You can opt to count version conflicts instead of halting and returning by setting conflicts to proceed. + Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than max_docs until it has successfully updated max_docs documents or it has gone through every document in the source query.

      +

      NOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number.

      +

      While processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents. + A bulk update request is performed for each batch of matching documents. + Any query or update failures cause the update by query request to fail and the failures are shown in the response. + Any update requests that completed successfully still stick, they are not rolled back.

      +

      Throttling update requests

      +

      To control the rate at which update by query issues batches of update operations, you can set requests_per_second to any positive decimal number. + This pads each batch with a wait time to throttle the rate. + Set requests_per_second to -1 to turn off throttling.

      +

      Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. + The padding time is the difference between the batch size divided by the requests_per_second and the time spent writing. + By default the batch size is 1000, so if requests_per_second is set to 500:

      +
      target_time = 1000 / 500 per second = 2 seconds
      +          wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
      +          
      +

      Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. + This is "bursty" instead of "smooth".

      +

      Slicing

      +

      Update by query supports sliced scroll to parallelize the update process. + This can improve efficiency and provide a convenient way to break the request down into smaller parts.

      +

      Setting slices to auto chooses a reasonable number for most data streams and indices. + This setting will use one slice per shard, up to a certain limit. + If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.

      +

      Adding slices to _update_by_query just automates the manual process of creating sub-requests, which means it has some quirks:

      +
        +
      • You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices.
      • +
      • Fetching the status of the task for the request with slices only contains the status of completed slices.
      • +
      • These sub-requests are individually addressable for things like cancellation and rethrottling.
      • +
      • Rethrottling the request with slices will rethrottle the unfinished sub-request proportionally.
      • +
      • Canceling the request with slices will cancel each sub-request.
      • +
      • Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.
      • +
      • Parameters like requests_per_second and max_docs on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using max_docs with slices might not result in exactly max_docs documents being updated.
      • +
      • Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.
      • +
      +

      If you're slicing manually or otherwise tuning automatic slicing, keep in mind that:

      +
        +
      • Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.
      • +
      • Update performance scales linearly across available resources with the number of slices.
      • +
      +

      Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources.

      +

      Update the document source

      +

      Update by query supports scripts to update the document source. + As with the update API, you can set ctx.op to change the operation that is performed.

      +

      Set ctx.op = "noop" if your script decides that it doesn't have to make any changes. + The update by query operation skips updating the document and increments the noop counter.

      +

      Set ctx.op = "delete" if your script decides that the document should be deleted. + The update by query operation deletes the document and increments the deleted counter.

      +

      Update by query supports only index, noop, and delete. + Setting ctx.op to anything else is an error. + Setting any other field in ctx is an error. + This API enables you to only modify the source of matching documents; you cannot move them.

      ``_ - :param index: Comma-separated list of data streams, indices, and aliases to search. - Supports wildcards (`*`). To search all data streams or indices, omit this - parameter or use `*` or `_all`. + :param index: A comma-separated list of data streams, indices, and aliases to + search. It supports wildcards (`*`). To search all data streams or indices, + omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param analyze_wildcard: If `true`, wildcard and prefix queries are analyzed. - :param analyzer: Analyzer to use for the query string. - :param conflicts: What to do if update by query hits version conflicts: `abort` - or `proceed`. + This parameter can be used only when the `q` query string parameter is specified. + :param analyzer: The analyzer to use for the query string. This parameter can + be used only when the `q` query string parameter is specified. + :param conflicts: The preferred behavior when update by query hits version conflicts: + `abort` or `proceed`. :param default_operator: The default operator for query string query: `AND` or - `OR`. - :param df: Field to use as default where no field prefix is given in the query - string. - :param expand_wildcards: Type of index that wildcard patterns can match. If the - request can target data streams, this argument determines whether wildcard - expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + `OR`. This parameter can be used only when the `q` query string parameter + is specified. + :param df: The field to use as default where no field prefix is given in the + query string. This parameter can be used only when the `q` query string parameter + is specified. + :param expand_wildcards: The type of index that wildcard patterns can match. + If the request can target data streams, this argument determines whether + wildcard expressions match hidden data streams. It supports comma-separated + values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, + `hidden`, `none`. :param from_: Starting offset (default: 0) :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param lenient: If `true`, format-based query failures (such as providing text - to a numeric field) in the query string will be ignored. + to a numeric field) in the query string will be ignored. This parameter can + be used only when the `q` query string parameter is specified. :param max_docs: The maximum number of documents to update. - :param pipeline: ID of the pipeline to use to preprocess incoming documents. + :param pipeline: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. - :param preference: Specifies the node or shard the operation should be performed - on. Random by default. - :param q: Query in the Lucene query string syntax. - :param query: Specifies the documents to update using the Query DSL. + :param preference: The node or shard the operation should be performed on. It + is random by default. + :param q: A query in the Lucene query string syntax. + :param query: The documents to update using the Query DSL. :param refresh: If `true`, Elasticsearch refreshes affected shards to make the - operation visible to search. + operation visible to search after the request completes. This is different + than the update API's `refresh` parameter, which causes just the shard that + received the request to be refreshed. :param request_cache: If `true`, the request cache is used for this request. + It defaults to the index-level setting. :param requests_per_second: The throttle for this request in sub-requests per second. - :param routing: Custom value used to route operations to a specific shard. + :param routing: A custom value used to route operations to a specific shard. :param script: The script to run to update the document source or metadata when updating. - :param scroll: Period to retain the search context for scrolling. - :param scroll_size: Size of the scroll request that powers the operation. - :param search_timeout: Explicit timeout for each search request. - :param search_type: The type of the search operation. Available options: `query_then_fetch`, - `dfs_query_then_fetch`. + :param scroll: The period to retain the search context for scrolling. + :param scroll_size: The size of the scroll request that powers the operation. + :param search_timeout: An explicit timeout for each search request. By default, + there is no timeout. + :param search_type: The type of the search operation. Available options include + `query_then_fetch` and `dfs_query_then_fetch`. :param slice: Slice the request manually using the provided slice ID and total number of slices. :param slices: The number of slices this task should be divided into. :param sort: A comma-separated list of : pairs. - :param stats: Specific `tag` of the request for logging and statistical purposes. - :param terminate_after: Maximum number of documents to collect for each shard. + :param stats: The specific `tag` of the request for logging and statistical purposes. + :param terminate_after: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. - Elasticsearch collects documents before sorting. Use with caution. Elasticsearch - applies this parameter to each shard handling the request. When possible, - let Elasticsearch perform early termination automatically. Avoid specifying - this parameter for requests that target data streams with backing indices - across multiple data tiers. - :param timeout: Period each update request waits for the following operations: - dynamic mapping updates, waiting for active shards. + Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. + Elasticsearch applies this parameter to each shard handling the request. + When possible, let Elasticsearch perform early termination automatically. + Avoid specifying this parameter for requests that target data streams with + backing indices across multiple data tiers. + :param timeout: The period each update request waits for the following operations: + dynamic mapping updates, waiting for active shards. By default, it is one + minute. This guarantees Elasticsearch waits for at least the timeout before + failing. The actual wait time could be longer, particularly when multiple + waits occur. :param version: If `true`, returns the document version as part of a hit. :param version_type: Should the document increment the version number (internal) on hit or not (reindex) :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer - up to the total number of shards in the index (`number_of_replicas+1`). + up to the total number of shards in the index (`number_of_replicas+1`). The + `timeout` parameter controls how long each write request waits for unavailable + shards to become available. Both work exactly the way they work in the bulk + API. :param wait_for_completion: If `true`, the request blocks until the operation - is complete. + is complete. If `false`, Elasticsearch performs some preflight checks, launches + the request, and returns a task ID that you can use to cancel or get the + status of the task. Elasticsearch creates a record of this task as a document + at `.tasks/task/${taskId}`. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -6116,11 +6666,11 @@ def update_by_query_rethrottle( Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts.

      - ``_ + ``_ :param task_id: The ID for the task. :param requests_per_second: The throttle for this request in sub-requests per - second. + second. To turn off throttling, set it to `-1`. """ if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_id'") diff --git a/elasticsearch/_sync/client/async_search.py b/elasticsearch/_sync/client/async_search.py index 30a582b10..1a004a6f1 100644 --- a/elasticsearch/_sync/client/async_search.py +++ b/elasticsearch/_sync/client/async_search.py @@ -97,8 +97,8 @@ def get( ``_ :param id: A unique identifier for the async search. - :param keep_alive: Specifies how long the async search should be available in - the cluster. When not specified, the `keep_alive` set with the corresponding + :param keep_alive: The length of time that the async search should be available + in the cluster. When not specified, the `keep_alive` set with the corresponding submit async request will be used. Otherwise, it is possible to override the value and extend the validity of the request. When this period expires, the search, if still running, is cancelled. If the search is completed, its @@ -157,13 +157,17 @@ def status(

      Get the async search status.

      Get the status of a previously submitted async search request given its identifier, without retrieving search results. - If the Elasticsearch security features are enabled, use of this API is restricted to the monitoring_user role.

      + If the Elasticsearch security features are enabled, the access to the status of a specific async search is restricted to:

      +
        +
      • The user or API key that submitted the original async search request.
      • +
      • Users that have the monitor cluster privilege or greater privileges.
      • +
      ``_ :param id: A unique identifier for the async search. - :param keep_alive: Specifies how long the async search needs to be available. + :param keep_alive: The length of time that the async search needs to be available. Ongoing async searches and any saved search results are deleted after this period. """ @@ -270,6 +274,7 @@ def submit( ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, indices_boost: t.Optional[t.Sequence[t.Mapping[str, float]]] = None, + keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, keep_on_completion: t.Optional[bool] = None, knn: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] @@ -384,6 +389,9 @@ def submit( :param ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :param indices_boost: Boosts the _score of documents from specified indices. + :param keep_alive: Specifies how long the async search needs to be available. + Ongoing async searches and any saved search results are deleted after this + period. :param keep_on_completion: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. :param knn: Defines the approximate kNN search to run. @@ -510,6 +518,8 @@ def submit( __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable + if keep_alive is not None: + __query["keep_alive"] = keep_alive if keep_on_completion is not None: __query["keep_on_completion"] = keep_on_completion if lenient is not None: diff --git a/elasticsearch/_sync/client/cat.py b/elasticsearch/_sync/client/cat.py index cbacf8a67..2ca151d5a 100644 --- a/elasticsearch/_sync/client/cat.py +++ b/elasticsearch/_sync/client/cat.py @@ -2494,7 +2494,7 @@ def tasks( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API.

      - ``_ + ``_ :param actions: The task action names, which are used to limit the response. :param detailed: If `true`, the response includes detailed information about diff --git a/elasticsearch/_sync/client/ccr.py b/elasticsearch/_sync/client/ccr.py index 5f0ce695f..ab9fe911b 100644 --- a/elasticsearch/_sync/client/ccr.py +++ b/elasticsearch/_sync/client/ccr.py @@ -39,14 +39,17 @@ def delete_auto_follow_pattern( """ .. raw:: html -

      Delete auto-follow patterns. - Delete a collection of cross-cluster replication auto-follow patterns.

      +

      Delete auto-follow patterns.

      +

      Delete a collection of cross-cluster replication auto-follow patterns.

      ``_ - :param name: The name of the auto follow pattern. - :param master_timeout: Period to wait for a connection to the master node. + :param name: The auto-follow pattern collection to delete. + :param master_timeout: The period to wait for a connection to the master node. + If the master node is not available before the timeout expires, the request + fails and returns an error. It can also be set to `-1` to indicate that the + request should never timeout. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -251,16 +254,18 @@ def follow_info( """ .. raw:: html -

      Get follower information. - Get information about all cross-cluster replication follower indices. +

      Get follower information.

      +

      Get information about all cross-cluster replication follower indices. For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused.

      ``_ - :param index: A comma-separated list of index patterns; use `_all` to perform - the operation on all indices - :param master_timeout: Period to wait for a connection to the master node. + :param index: A comma-delimited list of follower index patterns. + :param master_timeout: The period to wait for a connection to the master node. + If the master node is not available before the timeout expires, the request + fails and returns an error. It can also be set to `-1` to indicate that the + request should never timeout. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -301,17 +306,16 @@ def follow_stats( """ .. raw:: html -

      Get follower stats. - Get cross-cluster replication follower stats. +

      Get follower stats.

      +

      Get cross-cluster replication follower stats. The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices.

      ``_ - :param index: A comma-separated list of index patterns; use `_all` to perform - the operation on all indices - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. + :param index: A comma-delimited list of index patterns. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -437,15 +441,18 @@ def get_auto_follow_pattern( """ .. raw:: html -

      Get auto-follow patterns. - Get cross-cluster replication auto-follow patterns.

      +

      Get auto-follow patterns.

      +

      Get cross-cluster replication auto-follow patterns.

      ``_ - :param name: Specifies the auto-follow pattern collection that you want to retrieve. - If you do not specify a name, the API returns information for all collections. - :param master_timeout: Period to wait for a connection to the master node. + :param name: The auto-follow pattern collection that you want to retrieve. If + you do not specify a name, the API returns information for all collections. + :param master_timeout: The period to wait for a connection to the master node. + If the master node is not available before the timeout expires, the request + fails and returns an error. It can also be set to `-1` to indicate that the + request should never timeout. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: @@ -489,8 +496,8 @@ def pause_auto_follow_pattern( """ .. raw:: html -

      Pause an auto-follow pattern. - Pause a cross-cluster replication auto-follow pattern. +

      Pause an auto-follow pattern.

      +

      Pause a cross-cluster replication auto-follow pattern. When the API returns, the auto-follow pattern is inactive. New indices that are created on the remote cluster and match the auto-follow patterns are ignored.

      You can resume auto-following with the resume auto-follow pattern API. @@ -500,9 +507,11 @@ def pause_auto_follow_pattern( ``_ - :param name: The name of the auto follow pattern that should pause discovering - new indices to follow. - :param master_timeout: Period to wait for a connection to the master node. + :param name: The name of the auto-follow pattern to pause. + :param master_timeout: The period to wait for a connection to the master node. + If the master node is not available before the timeout expires, the request + fails and returns an error. It can also be set to `-1` to indicate that the + request should never timeout. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -543,8 +552,8 @@ def pause_follow( """ .. raw:: html -

      Pause a follower. - Pause a cross-cluster replication follower index. +

      Pause a follower.

      +

      Pause a cross-cluster replication follower index. The follower index will not fetch any additional operations from the leader index. You can resume following with the resume follower API. You can pause and resume a follower index to change the configuration of the following task.

      @@ -552,9 +561,11 @@ def pause_follow( ``_ - :param index: The name of the follower index that should pause following its - leader index. - :param master_timeout: Period to wait for a connection to the master node. + :param index: The name of the follower index. + :param master_timeout: The period to wait for a connection to the master node. + If the master node is not available before the timeout expires, the request + fails and returns an error. It can also be set to `-1` to indicate that the + request should never timeout. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -765,17 +776,19 @@ def resume_auto_follow_pattern( """ .. raw:: html -

      Resume an auto-follow pattern. - Resume a cross-cluster replication auto-follow pattern that was paused. +

      Resume an auto-follow pattern.

      +

      Resume a cross-cluster replication auto-follow pattern that was paused. The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim.

      ``_ - :param name: The name of the auto follow pattern to resume discovering new indices - to follow. - :param master_timeout: Period to wait for a connection to the master node. + :param name: The name of the auto-follow pattern to resume. + :param master_timeout: The period to wait for a connection to the master node. + If the master node is not available before the timeout expires, the request + fails and returns an error. It can also be set to `-1` to indicate that the + request should never timeout. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -934,15 +947,18 @@ def stats( """ .. raw:: html -

      Get cross-cluster replication stats. - This API returns stats about auto-following and the same shard-level stats as the get follower stats API.

      +

      Get cross-cluster replication stats.

      +

      This API returns stats about auto-following and the same shard-level stats as the get follower stats API.

      ``_ - :param master_timeout: Period to wait for a connection to the master node. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. + :param master_timeout: The period to wait for a connection to the master node. + If the master node is not available before the timeout expires, the request + fails and returns an error. It can also be set to `-1` to indicate that the + request should never timeout. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_ccr/stats" @@ -983,18 +999,23 @@ def unfollow( """ .. raw:: html -

      Unfollow an index. - Convert a cross-cluster replication follower index to a regular index. +

      Unfollow an index.

      +

      Convert a cross-cluster replication follower index to a regular index. The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. The follower index must be paused and closed before you call the unfollow API.

      -

      NOTE: Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation.

      +
      +

      info + Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation.

      +
      ``_ - :param index: The name of the follower index that should be turned into a regular - index. - :param master_timeout: Period to wait for a connection to the master node. + :param index: The name of the follower index. + :param master_timeout: The period to wait for a connection to the master node. + If the master node is not available before the timeout expires, the request + fails and returns an error. It can also be set to `-1` to indicate that the + request should never timeout. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") diff --git a/elasticsearch/_sync/client/cluster.py b/elasticsearch/_sync/client/cluster.py index 0d7fb74db..54e83a132 100644 --- a/elasticsearch/_sync/client/cluster.py +++ b/elasticsearch/_sync/client/cluster.py @@ -447,8 +447,8 @@ def health( """ .. raw:: html -

      Get the cluster health status. - You can also use the API to get the health status of only specified data streams and indices. +

      Get the cluster health status.

      +

      You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices.

      The cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated. @@ -850,8 +850,8 @@ def put_settings( """ .. raw:: html -

      Update the cluster settings. - Configure and update dynamic settings on a running cluster. +

      Update the cluster settings.

      +

      Configure and update dynamic settings on a running cluster. You can also configure dynamic settings locally on an unstarted or shut down node in elasticsearch.yml.

      Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart. You can also reset transient or persistent settings by assigning them a null value.

      @@ -920,9 +920,16 @@ def remote_info( """ .. raw:: html -

      Get remote cluster information. - Get all of the configured remote cluster information. - This API returns connection and endpoint information keyed by the configured remote cluster alias.

      +

      Get remote cluster information.

      +

      Get information about configured remote clusters. + The API returns connection and endpoint information keyed by the configured remote cluster alias.

      +
      +

      info + This API returns information that reflects current state on the local cluster. + The connected field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it. + Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster. + To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the resolve cluster endpoint.

      +
      ``_ diff --git a/elasticsearch/_sync/client/eql.py b/elasticsearch/_sync/client/eql.py index 558e9bad5..7df6bf4b4 100644 --- a/elasticsearch/_sync/client/eql.py +++ b/elasticsearch/_sync/client/eql.py @@ -43,7 +43,7 @@ def delete( The API also deletes results for the search.

      - ``_ + ``_ :param id: Identifier for the search to delete. A search ID is provided in the EQL search API's response for an async search. A search ID is also provided @@ -251,8 +251,15 @@ def search( :param index: The name of the index to scope the operation :param query: EQL query you wish to run. :param allow_no_indices: - :param allow_partial_search_results: - :param allow_partial_sequence_results: + :param allow_partial_search_results: Allow query execution also in case of shard + failures. If true, the query will keep running and will return results based + on the available shards. For sequences, the behavior can be further refined + using allow_partial_sequence_results + :param allow_partial_sequence_results: This flag applies only to sequences and + has effect only if allow_partial_search_results=true. If true, the sequence + query will return results based on the available shards, ignoring the others. + If false, the sequence query will return successfully, but will always have + empty results. :param case_sensitive: :param event_category_field: Field containing the event classification, such as process, file, or network. diff --git a/elasticsearch/_sync/client/esql.py b/elasticsearch/_sync/client/esql.py index 85e129c5f..25baccf1f 100644 --- a/elasticsearch/_sync/client/esql.py +++ b/elasticsearch/_sync/client/esql.py @@ -30,6 +30,7 @@ class EsqlClient(NamespacedClient): "query", "columnar", "filter", + "include_ccs_metadata", "locale", "params", "profile", @@ -56,6 +57,7 @@ def async_query( ] ] = None, human: t.Optional[bool] = None, + include_ccs_metadata: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, keep_on_completion: t.Optional[bool] = None, locale: t.Optional[str] = None, @@ -97,6 +99,10 @@ def async_query( :param filter: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. :param format: A short version of the Accept header, for example `json` or `yaml`. + :param include_ccs_metadata: When set to `true` and performing a cross-cluster + query, the response will include an extra `_clusters` object with information + about the clusters that participated in the search along with info such as + shards count. :param keep_alive: The period for which the query and its results are stored in the cluster. The default period is five days. When this period expires, the query and its results are deleted, even if the query is still ongoing. @@ -155,6 +161,8 @@ def async_query( __body["columnar"] = columnar if filter is not None: __body["filter"] = filter + if include_ccs_metadata is not None: + __body["include_ccs_metadata"] = include_ccs_metadata if locale is not None: __body["locale"] = locale if params is not None: @@ -298,11 +306,67 @@ def async_query_get( path_parts=__path_parts, ) + @_rewrite_parameters() + def async_query_stop( + self, + *, + id: str, + drop_null_columns: t.Optional[bool] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

      Stop async ES|QL query.

      +

      This API interrupts the query execution and returns the results so far. + If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it.

      + + + ``_ + + :param id: The unique identifier of the query. A query ID is provided in the + ES|QL async query API response for a query that does not complete in the + designated time. A query ID is also provided when the request was submitted + with the `keep_on_completion` parameter set to `true`. + :param drop_null_columns: Indicates whether columns that are entirely `null` + will be removed from the `columns` and `values` portion of the results. If + `true`, the response will include an extra section under the name `all_columns` + which has the name of all the columns. + """ + if id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'id'") + __path_parts: t.Dict[str, str] = {"id": _quote(id)} + __path = f'/_query/async/{__path_parts["id"]}/stop' + __query: t.Dict[str, t.Any] = {} + if drop_null_columns is not None: + __query["drop_null_columns"] = drop_null_columns + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __headers = {"accept": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + endpoint_id="esql.async_query_stop", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=( "query", "columnar", "filter", + "include_ccs_metadata", "locale", "params", "profile", @@ -329,6 +393,7 @@ def query( ] ] = None, human: t.Optional[bool] = None, + include_ccs_metadata: t.Optional[bool] = None, locale: t.Optional[str] = None, params: t.Optional[ t.Sequence[t.Union[None, bool, float, int, str, t.Any]] @@ -364,6 +429,10 @@ def query( :param filter: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. :param format: A short version of the Accept header, e.g. json, yaml. + :param include_ccs_metadata: When set to `true` and performing a cross-cluster + query, the response will include an extra `_clusters` object with information + about the clusters that participated in the search along with info such as + shards count. :param locale: :param params: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) @@ -402,6 +471,8 @@ def query( __body["columnar"] = columnar if filter is not None: __body["filter"] = filter + if include_ccs_metadata is not None: + __body["include_ccs_metadata"] = include_ccs_metadata if locale is not None: __body["locale"] = locale if params is not None: diff --git a/elasticsearch/_sync/client/features.py b/elasticsearch/_sync/client/features.py index 66f19522b..70019a24a 100644 --- a/elasticsearch/_sync/client/features.py +++ b/elasticsearch/_sync/client/features.py @@ -102,7 +102,7 @@ def reset_features(

      IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes.

      - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. """ diff --git a/elasticsearch/_sync/client/fleet.py b/elasticsearch/_sync/client/fleet.py index f7bce669d..837e7b195 100644 --- a/elasticsearch/_sync/client/fleet.py +++ b/elasticsearch/_sync/client/fleet.py @@ -48,7 +48,9 @@ def global_checkpoints( """ .. raw:: html -

      Returns the current global checkpoints for an index. This API is design for internal use by the fleet server project.

      +

      Get global checkpoints.

      +

      Get the current global checkpoints for an index. + This API is designed for internal use by the Fleet server project.

      ``_ @@ -141,6 +143,8 @@ def msearch( supports the wait_for_checkpoints parameter.

      + ``_ + :param searches: :param index: A single target to search. If the target is an index alias, it must resolve to a single index. @@ -388,6 +392,8 @@ def search( after provided checkpoint has been processed and is visible for searches inside of Elasticsearch.

      + ``_ + :param index: A single target to search. If the target is an index alias, it must resolve to a single index. :param aggregations: diff --git a/elasticsearch/_sync/client/ilm.py b/elasticsearch/_sync/client/ilm.py index f42c24b26..f3dd02684 100644 --- a/elasticsearch/_sync/client/ilm.py +++ b/elasticsearch/_sync/client/ilm.py @@ -214,8 +214,8 @@ def get_status( """ .. raw:: html -

      Get the ILM status. - Get the current index lifecycle management status.

      +

      Get the ILM status.

      +

      Get the current index lifecycle management status.

      ``_ diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index b4774cea3..ddcd59823 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -57,23 +57,40 @@ def add_block( """ .. raw:: html -

      Add an index block. - Limits the operations allowed on an index by blocking specific operation types.

      +

      Add an index block.

      +

      Add an index block to an index. + Index blocks limit the operations allowed on an index by blocking specific operation types.

      - ``_ + ``_ - :param index: A comma separated list of indices to add a block to - :param block: The block to add (one of read, write, read_only or metadata) - :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves - into no concrete indices. (This includes `_all` string or when no indices - have been specified) - :param expand_wildcards: Whether to expand wildcard expression to concrete indices - that are open, closed or both. - :param ignore_unavailable: Whether specified concrete indices should be ignored - when unavailable (missing or closed) - :param master_timeout: Specify timeout for connection to master - :param timeout: Explicit operation timeout + :param index: A comma-separated list or wildcard expression of index names used + to limit the request. By default, you must explicitly name the indices you + are adding blocks to. To allow the adding of blocks to indices with `_all`, + `*`, or other wildcard expressions, change the `action.destructive_requires_name` + setting to `false`. You can update this setting in the `elasticsearch.yml` + file or by using the cluster update settings API. + :param block: The block type to add to the index. + :param allow_no_indices: If `false`, the request returns an error if any wildcard + expression, index alias, or `_all` value targets only missing or closed indices. + This behavior applies even if the request targets other open indices. For + example, a request targeting `foo*,bar*` returns an error if an index starts + with `foo` but no index starts with `bar`. + :param expand_wildcards: The type of index that wildcard patterns can match. + If the request can target data streams, this argument determines whether + wildcard expressions match hidden data streams. It supports comma-separated + values, such as `open,hidden`. + :param ignore_unavailable: If `false`, the request returns an error if it targets + a missing or closed index. + :param master_timeout: The period to wait for the master node. If the master + node is not available before the timeout expires, the request fails and returns + an error. It can also be set to `-1` to indicate that the request should + never timeout. + :param timeout: The period to wait for a response from all relevant nodes in + the cluster after updating the cluster metadata. If no response is received + before the timeout expires, the cluster metadata update still applies but + the response will indicate that it was not completely acknowledged. It can + also be set to `-1` to indicate that the request should never timeout. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -710,12 +727,11 @@ def create_data_stream( """ .. raw:: html -

      Create a data stream. - Creates a data stream. - You must have a matching index template with data stream enabled.

      +

      Create a data stream.

      +

      You must have a matching index template with data stream enabled.

      - ``_ + ``_ :param name: Name of the data stream, which must meet the following criteria: Lowercase only; Cannot include `\\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, @@ -841,11 +857,11 @@ def data_streams_stats( """ .. raw:: html -

      Get data stream stats. - Retrieves statistics for one or more data streams.

      +

      Get data stream stats.

      +

      Get statistics for one or more data streams.

      - ``_ + ``_ :param name: Comma-separated list of data streams used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams in a @@ -1120,7 +1136,7 @@ def delete_data_stream( Deletes one or more data streams and their backing indices.

      - ``_ + ``_ :param name: Comma-separated list of data streams to delete. Wildcard (`*`) expressions are supported. @@ -1538,11 +1554,11 @@ def exists_alias( """ .. raw:: html -

      Check aliases. - Checks if one or more data stream or index aliases exist.

      +

      Check aliases.

      +

      Check if one or more data stream or index aliases exist.

      - ``_ + ``_ :param name: Comma-separated list of aliases to check. Supports wildcards (`*`). :param index: Comma-separated list of data streams or indices used to limit the @@ -1612,11 +1628,11 @@ def exists_index_template( """ .. raw:: html -

      Check index templates. - Check whether index templates exist.

      +

      Check index templates.

      +

      Check whether index templates exist.

      - ``_ + ``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. @@ -2287,8 +2303,8 @@ def get_data_lifecycle( """ .. raw:: html -

      Get data stream lifecycles. - Retrieves the data stream lifecycle configuration of one or more data streams.

      +

      Get data stream lifecycles.

      +

      Get the data stream lifecycle configuration of one or more data streams.

      ``_ @@ -2396,11 +2412,11 @@ def get_data_stream( """ .. raw:: html -

      Get data streams. - Retrieves information about one or more data streams.

      +

      Get data streams.

      +

      Get information about one or more data streams.

      - ``_ + ``_ :param name: Comma-separated list of data stream names used to limit the request. Wildcard (`*`) expressions are supported. If omitted, all data streams are @@ -3355,14 +3371,15 @@ def put_alias( ) @_rewrite_parameters( - body_name="lifecycle", + body_fields=("data_retention", "downsampling", "enabled"), ) def put_data_lifecycle( self, *, name: t.Union[str, t.Sequence[str]], - lifecycle: t.Optional[t.Mapping[str, t.Any]] = None, - body: t.Optional[t.Mapping[str, t.Any]] = None, + data_retention: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + downsampling: t.Optional[t.Mapping[str, t.Any]] = None, + enabled: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ @@ -3377,6 +3394,7 @@ def put_data_lifecycle( master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html @@ -3389,7 +3407,15 @@ def put_data_lifecycle( :param name: Comma-separated list of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. - :param lifecycle: + :param data_retention: If defined, every document added to this data stream will + be stored at least for this time frame. Any time after this duration the + document could be deleted. When empty, every document in this data stream + will be stored indefinitely. + :param downsampling: The downsampling configuration to execute for the managed + backing index after rollover. + :param enabled: If defined, it turns data stream lifecycle on/off (`true`/`false`) + for this data stream. A data stream lifecycle that's disabled (enabled: `false`) + will have no effect on the data stream. :param expand_wildcards: Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `hidden`, `open`, `closed`, `none`. @@ -3401,15 +3427,10 @@ def put_data_lifecycle( """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") - if lifecycle is None and body is None: - raise ValueError( - "Empty value passed for parameters 'lifecycle' and 'body', one of them should be set." - ) - elif lifecycle is not None and body is not None: - raise ValueError("Cannot set both 'lifecycle' and 'body'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}/_lifecycle' __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: @@ -3424,8 +3445,18 @@ def put_data_lifecycle( __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout - __body = lifecycle if lifecycle is not None else body - __headers = {"accept": "application/json", "content-type": "application/json"} + if not __body: + if data_retention is not None: + __body["data_retention"] = data_retention + if downsampling is not None: + __body["downsampling"] = downsampling + if enabled is not None: + __body["enabled"] = enabled + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, @@ -3631,10 +3662,7 @@ def put_mapping( ] = None, dynamic_date_formats: t.Optional[t.Sequence[str]] = None, dynamic_templates: t.Optional[ - t.Union[ - t.Mapping[str, t.Mapping[str, t.Any]], - t.Sequence[t.Mapping[str, t.Mapping[str, t.Any]]], - ] + t.Sequence[t.Mapping[str, t.Mapping[str, t.Any]]] ] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ @@ -4255,7 +4283,7 @@ def reload_search_analyzers( def resolve_cluster( self, *, - name: t.Union[str, t.Sequence[str]], + name: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ @@ -4271,19 +4299,20 @@ def resolve_cluster( ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html -

      Resolve the cluster. - Resolve the specified index expressions to return information about each cluster, including the local cluster, if included. - Multiple patterns and remote clusters are supported.

      +

      Resolve the cluster.

      +

      Resolve the specified index expressions to return information about each cluster, including the local "querying" cluster, if included. + If no index expression is provided, the API will return information about all the remote clusters that are configured on the querying cluster.

      This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search.

      You use the same index expression with this endpoint as you would for cross-cluster search. Index and cluster exclusions are also supported with this endpoint.

      For each cluster in the index expression, information is returned about:

        -
      • Whether the querying ("local") cluster is currently connected to each remote cluster in the index expression scope.
      • +
      • Whether the querying ("local") cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the remote/info endpoint.
      • Whether each remote cluster is configured with skip_unavailable as true or false.
      • Whether there are any indices, aliases, or data streams on that cluster that match the index expression.
      • Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index).
      • @@ -4291,7 +4320,13 @@ def resolve_cluster(

      For example, GET /_resolve/cluster/my-index-*,cluster*:my-index-* returns information about the local cluster and all remotely configured clusters that start with the alias cluster*. Each cluster returns information about whether it has any indices, aliases or data streams that match my-index-*.

      -

      Advantages of using this endpoint before a cross-cluster search

      +

      Note on backwards compatibility

      +

      The ability to query without an index expression was added in version 8.18, so when + querying remote clusters older than that, the local cluster will send the index + expression dummy* to those remote clusters. Thus, if an errors occur, you may see a reference + to that index expression even though you didn't request it. If it causes a problem, you can + instead include an index expression like *:* to bypass the issue.

      +

      Advantages of using this endpoint before a cross-cluster search

      You may want to exclude a cluster or index from a search when:

      • A remote cluster is not currently connected and is configured with skip_unavailable=false. Running a cross-cluster search under those conditions will cause the entire search to fail.
      • @@ -4299,31 +4334,60 @@ def resolve_cluster(
      • The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the _resolve/cluster response will be present. (This is also where security/permission errors will be shown.)
      • A remote cluster is an older version that does not support the feature you want to use in your search.
      +

      Test availability of remote clusters

      +

      The remote/info endpoint is commonly used to test whether the "local" cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not. + The remote cluster may be available, while the local cluster is not currently connected to it.

      +

      You can use the _resolve/cluster API to attempt to reconnect to remote clusters. + For example with GET _resolve/cluster or GET _resolve/cluster/*:*. + The connected field in the response will indicate whether it was successful. + If a connection was (re-)established, this will also cause the remote/info endpoint to now indicate a connected status.

      ``_ - :param name: Comma-separated name(s) or index pattern(s) of the indices, aliases, - and data streams to resolve. Resources on remote clusters can be specified - using the ``:`` syntax. + :param name: A comma-separated list of names or index patterns for the indices, + aliases, and data streams to resolve. Resources on remote clusters can be + specified using the ``:`` syntax. Index and cluster exclusions + (e.g., `-cluster1:*`) are also supported. If no index expression is specified, + information about all remote clusters configured on the local cluster is + returned without doing any index matching :param allow_no_indices: If false, the request returns an error if any wildcard - expression, index alias, or _all value targets only missing or closed indices. + expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For - example, a request targeting foo*,bar* returns an error if an index starts - with foo but no index starts with bar. + example, a request targeting `foo*,bar*` returns an error if an index starts + with `foo` but no index starts with `bar`. NOTE: This option is only supported + when specifying an index expression. You will get an error if you specify + index options to the `_resolve/cluster` API endpoint that takes no index + expression. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - :param ignore_throttled: If true, concrete, expanded or aliased indices are ignored - when frozen. Defaults to false. + NOTE: This option is only supported when specifying an index expression. + You will get an error if you specify index options to the `_resolve/cluster` + API endpoint that takes no index expression. + :param ignore_throttled: If true, concrete, expanded, or aliased indices are + ignored when frozen. NOTE: This option is only supported when specifying + an index expression. You will get an error if you specify index options to + the `_resolve/cluster` API endpoint that takes no index expression. :param ignore_unavailable: If false, the request returns an error if it targets - a missing or closed index. Defaults to false. + a missing or closed index. NOTE: This option is only supported when specifying + an index expression. You will get an error if you specify index options to + the `_resolve/cluster` API endpoint that takes no index expression. + :param timeout: The maximum time to wait for remote clusters to respond. If a + remote cluster does not respond within this timeout period, the API response + will show the cluster as not connected and include an error message that + the request timed out. The default timeout is unset and the query can take + as long as the networking layer is configured to wait for remote clusters + that are not responding (typically 30 seconds). """ - if name in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'name'") - __path_parts: t.Dict[str, str] = {"name": _quote(name)} - __path = f'/_resolve/cluster/{__path_parts["name"]}' + __path_parts: t.Dict[str, str] + if name not in SKIP_IN_PATH: + __path_parts = {"name": _quote(name)} + __path = f'/_resolve/cluster/{__path_parts["name"]}' + else: + __path_parts = {} + __path = "/_resolve/cluster" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices @@ -4341,6 +4405,8 @@ def resolve_cluster( __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index 5430e7283..7dc4a8cc2 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -51,12 +51,12 @@ def delete( ``_ - :param inference_id: The inference Id + :param inference_id: The inference identifier. :param task_type: The task type - :param dry_run: When true, the endpoint is not deleted, and a list of ingest - processors which reference this endpoint is returned + :param dry_run: When true, the endpoint is not deleted and a list of ingest processors + which reference this endpoint is returned. :param force: When true, the inference endpoint is forcefully deleted even if - it is still being used by ingest processors or semantic text fields + it is still being used by ingest processors or semantic text fields. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") @@ -180,18 +180,29 @@ def inference( """ .. raw:: html -

      Perform inference on the service

      +

      Perform inference on the service.

      +

      This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. + It returns a response with the results of the tasks. + The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.

      +
      +

      info + The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

      +
      ``_ - :param inference_id: The inference Id - :param input: Inference input. Either a string or an array of strings. - :param task_type: The task type - :param query: Query input, required for rerank task. Not required for other tasks. - :param task_settings: Optional task settings - :param timeout: Specifies the amount of time to wait for the inference request - to complete. + :param inference_id: The unique identifier for the inference endpoint. + :param input: The text on which you want to perform the inference task. It can + be a single string or an array. > info > Inference endpoints for the `completion` + task type currently only support a single string as input. + :param task_type: The type of inference task that the model performs. + :param query: The query input, which is required only for the `rerank` task. + It is not required for other tasks. + :param task_settings: Task settings for the individual inference request. These + settings are specific to the task type you specified and override the task + settings specified when initializing the service. + :param timeout: The amount of time to wait for the inference request to complete. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") diff --git a/elasticsearch/_sync/client/ingest.py b/elasticsearch/_sync/client/ingest.py index 0a909bc6b..f4d8c9241 100644 --- a/elasticsearch/_sync/client/ingest.py +++ b/elasticsearch/_sync/client/ingest.py @@ -40,18 +40,18 @@ def delete_geoip_database( """ .. raw:: html -

      Delete GeoIP database configurations. - Delete one or more IP geolocation database configurations.

      +

      Delete GeoIP database configurations.

      +

      Delete one or more IP geolocation database configurations.

      - ``_ + ``_ :param id: A comma-separated list of geoip database configurations to delete - :param master_timeout: Period to wait for a connection to the master node. If - no response is received before the timeout expires, the request fails and - returns an error. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. + :param master_timeout: The period to wait for a connection to the master node. + If no response is received before the timeout expires, the request fails + and returns an error. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") @@ -244,15 +244,15 @@ def get_geoip_database( """ .. raw:: html -

      Get GeoIP database configurations. - Get information about one or more IP geolocation database configurations.

      +

      Get GeoIP database configurations.

      +

      Get information about one or more IP geolocation database configurations.

      - ``_ + ``_ - :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard - (`*`) expressions are supported. To get all database configurations, omit - this parameter or use `*`. + :param id: A comma-separated list of database configuration IDs to retrieve. + Wildcard (`*`) expressions are supported. To get all database configurations, + omit this parameter or use `*`. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: @@ -350,8 +350,8 @@ def get_pipeline( """ .. raw:: html -

      Get pipelines. - Get information about one or more ingest pipelines. +

      Get pipelines.

      +

      Get information about one or more ingest pipelines. This API returns a local reference of the pipeline.

      @@ -455,11 +455,11 @@ def put_geoip_database( """ .. raw:: html -

      Create or update a GeoIP database configuration. - Refer to the create or update IP geolocation database configuration API.

      +

      Create or update a GeoIP database configuration.

      +

      Refer to the create or update IP geolocation database configuration API.

      - ``_ + ``_ :param id: ID of the database configuration to create or update. :param maxmind: The configuration necessary to identify which IP geolocation @@ -712,17 +712,17 @@ def simulate( """ .. raw:: html -

      Simulate a pipeline. - Run an ingest pipeline against a set of provided documents. +

      Simulate a pipeline.

      +

      Run an ingest pipeline against a set of provided documents. You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.

      ``_ :param docs: Sample documents to test in the pipeline. - :param id: Pipeline to test. If you don’t specify a `pipeline` in the request + :param id: The pipeline to test. If you don't specify a `pipeline` in the request body, this parameter is required. - :param pipeline: Pipeline to test. If you don’t specify the `pipeline` request + :param pipeline: The pipeline to test. If you don't specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. :param verbose: If `true`, the response includes output data for each processor diff --git a/elasticsearch/_sync/client/license.py b/elasticsearch/_sync/client/license.py index dac9f6b88..842e47354 100644 --- a/elasticsearch/_sync/client/license.py +++ b/elasticsearch/_sync/client/license.py @@ -39,16 +39,16 @@ def delete( """ .. raw:: html -

      Delete the license. - When the license expires, your subscription level reverts to Basic.

      +

      Delete the license.

      +

      When the license expires, your subscription level reverts to Basic.

      If the operator privileges feature is enabled, only operator users can use this API.

      ``_ - :param master_timeout: Period to wait for a connection to the master node. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. + :param master_timeout: The period to wait for a connection to the master node. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_license" @@ -89,10 +89,13 @@ def get( """ .. raw:: html -

      Get license information. - Get information about your Elastic license including its type, its status, when it was issued, and when it expires.

      -

      NOTE: If the master node is generating a new cluster state, the get license API may return a 404 Not Found response. +

      Get license information.

      +

      Get information about your Elastic license including its type, its status, when it was issued, and when it expires.

      +
      +

      info + If the master node is generating a new cluster state, the get license API may return a 404 Not Found response. If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request.

      +
      ``_ @@ -225,8 +228,8 @@ def post( """ .. raw:: html -

      Update the license. - You can update your license at runtime without shutting down your nodes. +

      Update the license.

      +

      You can update your license at runtime without shutting down your nodes. License updates take effect immediately. If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true.

      @@ -240,9 +243,9 @@ def post( :param license: :param licenses: A sequence of one or more JSON documents containing the license information. - :param master_timeout: Period to wait for a connection to the master node. - :param timeout: Period to wait for a response. If no response is received before - the timeout expires, the request fails and returns an error. + :param master_timeout: The period to wait for a connection to the master node. + :param timeout: The period to wait for a response. If no response is received + before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_license" @@ -297,8 +300,8 @@ def post_start_basic( """ .. raw:: html -

      Start a basic license. - Start an indefinite basic license, which gives access to all the basic features.

      +

      Start a basic license.

      +

      Start an indefinite basic license, which gives access to all the basic features.

      NOTE: In order to start a basic license, you must not currently have a basic license.

      If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true.

      diff --git a/elasticsearch/_sync/client/ml.py b/elasticsearch/_sync/client/ml.py index f10b79e2e..fbcb25309 100644 --- a/elasticsearch/_sync/client/ml.py +++ b/elasticsearch/_sync/client/ml.py @@ -38,8 +38,8 @@ def clear_trained_model_deployment_cache( """ .. raw:: html -

      Clear trained model deployment cache. - Cache will be cleared on all nodes where the trained model is assigned. +

      Clear trained model deployment cache.

      +

      Cache will be cleared on all nodes where the trained model is assigned. A trained model deployment may have an inference cache enabled. As requests are handled by each allocated node, their responses may be cached on that individual node. Calling this API clears the caches without restarting the deployment.

      @@ -93,8 +93,8 @@ def close_job( """ .. raw:: html -

      Close anomaly detection jobs. - A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. +

      Close anomaly detection jobs.

      +

      A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. When a datafeed that has a specified end date stops, it automatically closes its associated job.

      @@ -161,8 +161,8 @@ def delete_calendar( """ .. raw:: html -

      Delete a calendar. - Removes all scheduled events from a calendar, then deletes it.

      +

      Delete a calendar.

      +

      Remove all scheduled events from a calendar, then delete it.

      ``_ @@ -415,15 +415,15 @@ def delete_expired_data( """ .. raw:: html -

      Delete expired ML data. - Deletes all job results, model snapshots and forecast data that have exceeded +

      Delete expired ML data.

      +

      Delete all job results, model snapshots and forecast data that have exceeded their retention days period. Machine learning state documents that are not associated with any job are also deleted. You can limit the request to a single or set of anomaly detection jobs by using a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. You can delete expired data for all anomaly detection - jobs by using _all, by specifying * as the <job_id>, or by omitting the - <job_id>.

      + jobs by using _all, by specifying * as the <job_id>, or by omitting the + <job_id>.

      ``_ @@ -485,8 +485,8 @@ def delete_filter( """ .. raw:: html -

      Delete a filter. - If an anomaly detection job references the filter, you cannot delete the +

      Delete a filter.

      +

      If an anomaly detection job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter.

      @@ -533,8 +533,8 @@ def delete_forecast( """ .. raw:: html -

      Delete forecasts from a job. - By default, forecasts are retained for 14 days. You can specify a +

      Delete forecasts from a job.

      +

      By default, forecasts are retained for 14 days. You can specify a different retention period with the expires_in parameter in the forecast jobs API. The delete forecast API enables you to delete one or more forecasts before they expire.

      @@ -607,8 +607,8 @@ def delete_job( """ .. raw:: html -

      Delete an anomaly detection job. - All job configuration, model state and results are deleted. +

      Delete an anomaly detection job.

      +

      All job configuration, model state and results are deleted. It is not currently possible to delete multiple jobs using wildcards or a comma separated list. If you delete a job that has a datafeed, the request first tries to delete the datafeed. This behavior is equivalent to calling @@ -670,8 +670,8 @@ def delete_model_snapshot( """ .. raw:: html -

      Delete a model snapshot. - You cannot delete the active model snapshot. To delete that snapshot, first +

      Delete a model snapshot.

      +

      You cannot delete the active model snapshot. To delete that snapshot, first revert to a different one. To identify the active model snapshot, refer to the model_snapshot_id in the results from the get jobs API.

      @@ -724,8 +724,8 @@ def delete_trained_model( """ .. raw:: html -

      Delete an unreferenced trained model. - The request deletes a trained inference model that is not referenced by an ingest pipeline.

      +

      Delete an unreferenced trained model.

      +

      The request deletes a trained inference model that is not referenced by an ingest pipeline.

      ``_ @@ -777,8 +777,8 @@ def delete_trained_model_alias( """ .. raw:: html -

      Delete a trained model alias. - This API deletes an existing model alias that refers to a trained model. If +

      Delete a trained model alias.

      +

      This API deletes an existing model alias that refers to a trained model. If the model alias is missing or refers to a model other than the one identified by the model_id, this API returns an error.

      @@ -838,13 +838,13 @@ def estimate_model_memory( """ .. raw:: html -

      Estimate job model memory usage. - Makes an estimation of the memory usage for an anomaly detection job model. - It is based on analysis configuration details for the job and cardinality +

      Estimate job model memory usage.

      +

      Make an estimation of the memory usage for an anomaly detection job model. + The estimate is based on analysis configuration details for the job and cardinality estimates for the fields it references.

      - ``_ + ``_ :param analysis_config: For a list of the properties that you can specify in the `analysis_config` component of the body of this API. @@ -909,8 +909,8 @@ def evaluate_data_frame( """ .. raw:: html -

      Evaluate data frame analytics. - The API packages together commonly used evaluation metrics for various types +

      Evaluate data frame analytics.

      +

      The API packages together commonly used evaluation metrics for various types of machine learning features. This has been designed for use on indexes created by data frame analytics. Evaluation requires both a ground truth field and an analytics result field to be present.

      @@ -990,8 +990,8 @@ def explain_data_frame_analytics( """ .. raw:: html -

      Explain data frame analytics config. - This API provides explanations for a data frame analytics config that either +

      Explain data frame analytics config.

      +

      This API provides explanations for a data frame analytics config that either exists already or one that has not been created yet. The following explanations are provided:

        @@ -2891,8 +2891,8 @@ def open_job( """ .. raw:: html -

        Open anomaly detection jobs. - An anomaly detection job must be opened to be ready to receive and analyze +

        Open anomaly detection jobs.

        +

        An anomaly detection job must be opened to be ready to receive and analyze data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically @@ -3082,7 +3082,7 @@ def preview_data_frame_analytics( .. raw:: html

        Preview features used by data frame analytics. - Previews the extracted features used by a data frame analytics config.

        + Preview the extracted features used by a data frame analytics config.

        ``_ @@ -3821,8 +3821,8 @@ def put_job( """ .. raw:: html -

        Create an anomaly detection job. - If you include a datafeed_config, you must have read index privileges on the source index. +

        Create an anomaly detection job.

        +

        If you include a datafeed_config, you must have read index privileges on the source index. If you include a datafeed_config but do not provide a query, the datafeed uses {"match_all": {"boost": 1}}.

        @@ -4669,11 +4669,14 @@ def start_datafeed( path_parts=__path_parts, ) - @_rewrite_parameters() + @_rewrite_parameters( + body_fields=("adaptive_allocations",), + ) def start_trained_model_deployment( self, *, model_id: str, + adaptive_allocations: t.Optional[t.Mapping[str, t.Any]] = None, cache_size: t.Optional[t.Union[int, str]] = None, deployment_id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, @@ -4688,6 +4691,7 @@ def start_trained_model_deployment( wait_for: t.Optional[ t.Union[str, t.Literal["fully_allocated", "started", "starting"]] ] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html @@ -4700,6 +4704,9 @@ def start_trained_model_deployment( :param model_id: The unique identifier of the trained model. Currently, only PyTorch models are supported. + :param adaptive_allocations: Adaptive allocations configuration. When enabled, + the number of allocations is set based on the current load. If adaptive_allocations + is enabled, do not set the number of allocations manually. :param cache_size: The inference cache size (in memory outside the JVM heap) per node for the model. The default value is the same size as the `model_size_bytes`. To disable the cache, `0b` can be provided. @@ -4709,7 +4716,8 @@ def start_trained_model_deployment( model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed - to a value less than the number of hardware threads. + to a value less than the number of hardware threads. If adaptive_allocations + is enabled, do not set this value, because it’s automatically set. :param priority: The deployment priority. :param queue_capacity: Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds this value, new requests @@ -4729,6 +4737,7 @@ def start_trained_model_deployment( __path_parts: t.Dict[str, str] = {"model_id": _quote(model_id)} __path = f'/_ml/trained_models/{__path_parts["model_id"]}/deployment/_start' __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} if cache_size is not None: __query["cache_size"] = cache_size if deployment_id is not None: @@ -4753,12 +4762,20 @@ def start_trained_model_deployment( __query["timeout"] = timeout if wait_for is not None: __query["wait_for"] = wait_for + if not __body: + if adaptive_allocations is not None: + __body["adaptive_allocations"] = adaptive_allocations + if not __body: + __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, + body=__body, endpoint_id="ml.start_trained_model_deployment", path_parts=__path_parts, ) @@ -5540,12 +5557,13 @@ def update_model_snapshot( ) @_rewrite_parameters( - body_fields=("number_of_allocations",), + body_fields=("adaptive_allocations", "number_of_allocations"), ) def update_trained_model_deployment( self, *, model_id: str, + adaptive_allocations: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, @@ -5563,12 +5581,16 @@ def update_trained_model_deployment( :param model_id: The unique identifier of the trained model. Currently, only PyTorch models are supported. + :param adaptive_allocations: Adaptive allocations configuration. When enabled, + the number of allocations is set based on the current load. If adaptive_allocations + is enabled, do not set the number of allocations manually. :param number_of_allocations: The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed - to a value less than the number of hardware threads. + to a value less than the number of hardware threads. If adaptive_allocations + is enabled, do not set this value, because it’s automatically set. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") @@ -5585,6 +5607,8 @@ def update_trained_model_deployment( if pretty is not None: __query["pretty"] = pretty if not __body: + if adaptive_allocations is not None: + __body["adaptive_allocations"] = adaptive_allocations if number_of_allocations is not None: __body["number_of_allocations"] = number_of_allocations if not __body: @@ -5619,7 +5643,7 @@ def upgrade_job_snapshot( .. raw:: html

        Upgrade a snapshot. - Upgrades an anomaly detection model snapshot to the latest major version. + Upgrade an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. @@ -5782,7 +5806,7 @@ def validate_detector(

        Validate an anomaly detection job.

        - ``_ + ``_ :param detector: """ diff --git a/elasticsearch/_sync/client/monitoring.py b/elasticsearch/_sync/client/monitoring.py index eae014b19..033982ad1 100644 --- a/elasticsearch/_sync/client/monitoring.py +++ b/elasticsearch/_sync/client/monitoring.py @@ -48,7 +48,7 @@ def bulk( This API is used by the monitoring features to send monitoring data.

        - ``_ + ``_ :param interval: Collection interval (e.g., '10s' or '10000ms') of the payload :param operations: diff --git a/elasticsearch/_sync/client/nodes.py b/elasticsearch/_sync/client/nodes.py index 61bf40d31..b64f3abf0 100644 --- a/elasticsearch/_sync/client/nodes.py +++ b/elasticsearch/_sync/client/nodes.py @@ -231,8 +231,8 @@ def info( """ .. raw:: html -

        Get node information. - By default, the API returns all attributes and core settings for cluster nodes.

        +

        Get node information.

        +

        By default, the API returns all attributes and core settings for cluster nodes.

        ``_ @@ -308,7 +308,7 @@ def reload_secure_settings( Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password.

        - ``_ + ``_ :param node_id: The names of particular nodes in the cluster to target. :param secure_settings_password: The password for the Elasticsearch keystore. diff --git a/elasticsearch/_sync/client/search_application.py b/elasticsearch/_sync/client/search_application.py index 76ef2d456..7309f9ec4 100644 --- a/elasticsearch/_sync/client/search_application.py +++ b/elasticsearch/_sync/client/search_application.py @@ -45,13 +45,13 @@ def delete( """ .. raw:: html -

        Delete a search application. - Remove a search application and its associated alias. Indices attached to the search application are not removed.

        +

        Delete a search application.

        +

        Remove a search application and its associated alias. Indices attached to the search application are not removed.

        ``_ - :param name: The name of the search application to delete + :param name: The name of the search application to delete. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") @@ -234,7 +234,7 @@ def list( Get information about search applications.

        - ``_ + ``_ :param from_: Starting offset. :param q: Query in the Lucene query string syntax. diff --git a/elasticsearch/_sync/client/transform.py b/elasticsearch/_sync/client/transform.py index 95427115e..1df9564d1 100644 --- a/elasticsearch/_sync/client/transform.py +++ b/elasticsearch/_sync/client/transform.py @@ -41,8 +41,7 @@ def delete_transform( """ .. raw:: html -

        Delete a transform. - Deletes a transform.

        +

        Delete a transform.

        ``_ @@ -106,7 +105,7 @@ def get_transform( .. raw:: html

        Get transforms. - Retrieves configuration information for transforms.

        + Get configuration information for transforms.

        ``_ @@ -178,8 +177,8 @@ def get_transform_stats( """ .. raw:: html -

        Get transform stats. - Retrieves usage information for transforms.

        +

        Get transform stats.

        +

        Get usage information for transforms.

        ``_ @@ -508,9 +507,8 @@ def reset_transform( """ .. raw:: html -

        Reset a transform. - Resets a transform. - Before you can reset it, you must stop it; alternatively, use the force query parameter. +

        Reset a transform.

        +

        Before you can reset it, you must stop it; alternatively, use the force query parameter. If the destination index was created by the transform, it is deleted.

        @@ -566,11 +564,11 @@ def schedule_now_transform( """ .. raw:: html -

        Schedule a transform to start now. - Instantly runs a transform to process data.

        -

        If you _schedule_now a transform, it will process the new data instantly, - without waiting for the configured frequency interval. After _schedule_now API is called, - the transform will be processed again at now + frequency unless _schedule_now API +

        Schedule a transform to start now.

        +

        Instantly run a transform to process data. + If you run this API, the transform will process the new data instantly, + without waiting for the configured frequency interval. After the API is called, + the transform will be processed again at now + frequency unless the API is called again in the meantime.

        @@ -621,8 +619,7 @@ def start_transform( """ .. raw:: html -

        Start a transform. - Starts a transform.

        +

        Start a transform.

        When you start a transform, it creates the destination index if it does not already exist. The number_of_shards is set to 1 and the auto_expand_replicas is set to 0-1. If it is a pivot transform, it deduces the mapping definitions for the destination index from the source indices and the transform aggregations. If fields in the @@ -879,8 +876,8 @@ def upgrade_transforms( """ .. raw:: html -

        Upgrade all transforms. - Transforms are compatible across minor versions and between supported major versions. +

        Upgrade all transforms.

        +

        Transforms are compatible across minor versions and between supported major versions. However, over time, the format of transform configuration information may change. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It also cleans up the internal data structures that store the transform state and checkpoints. diff --git a/elasticsearch/dsl/types.py b/elasticsearch/dsl/types.py index fe12a00e8..d1c39003e 100644 --- a/elasticsearch/dsl/types.py +++ b/elasticsearch/dsl/types.py @@ -4345,14 +4345,12 @@ class BulkIndexByScrollFailure(AttrDict[Any]): :arg id: (required) :arg index: (required) :arg status: (required) - :arg type: (required) """ cause: "ErrorCause" id: str index: str status: int - type: str class CardinalityAggregate(AttrDict[Any]): From 8154f9a0c1d6bafd2e014a2837cb1c00cc1a3536 Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Wed, 5 Mar 2025 11:08:29 +0400 Subject: [PATCH 38/65] Bring 8.17.2 release to parent (#2821) --- .../00ad41bde67beac991534ae0e04b1296.asciidoc | 11 ++ .../015e6e6132b6d6d44bddb06bc3b316ed.asciidoc | 2 +- .../0165d22da5f2fc7678392b31d8eb5566.asciidoc | 2 +- .../016f3147dae9ff2c3e831257ae470361.asciidoc | 2 +- .../01b23f09d2b7f140faf649eadbbf3ac3.asciidoc | 2 +- .../0bc6155e0c88062a4d8490da49db3aa8.asciidoc | 2 +- .../0bcd380315ef4691b8c79df6ca53a85f.asciidoc | 2 +- .../0bee07a581c5776e068f6f4efad5a399.asciidoc | 17 +-- .../0d689ac6e78be5d438f9b5d441be2b44.asciidoc | 2 +- .../0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc | 32 ++--- .../0f028f71f04c1d569fab402869565a84.asciidoc | 15 +++ .../0f2e5e006b663a88ee99b130ab1b4844.asciidoc | 2 +- .../0fb7705ddbf1fc2b65d2de2e00fe5769.asciidoc | 2 +- .../1147a02afa087278e51fa365fb9e06b7.asciidoc | 2 +- .../11e8d6e14686efabb8634b6522c05cb5.asciidoc | 2 +- .../120fcf9f55128d6a81d5e87a9c235bbd.asciidoc | 25 ++-- .../12adea5d76f73d94d80d42f53f67563f.asciidoc | 11 ++ .../13d91782399ba1f291e103c18b5338cc.asciidoc | 25 ++++ .../141ef0ebaa3b0772892b79b9bb85efb0.asciidoc | 5 +- .../15ac33d641b376d9494075eb1f0d4066.asciidoc | 10 ++ .../174b93c323aa8e9cc8ee2a3df5736810.asciidoc | 12 ++ .../17a1e308761afd3282f13d44d7be008a.asciidoc | 2 +- .../19c00c6b29bc7dbc5e92b3668da2da93.asciidoc | 37 +++--- .../1a7483796087053ba55029d0dc2ab356.asciidoc | 2 +- .../1a9efb56adb2cd84faa9825a129381b9.asciidoc | 2 +- .../1ead35c954963e83f89872048dabdbe9.asciidoc | 21 +++ .../1ef5119db55a6f2b6fc0ab92f36e7f8e.asciidoc | 2 +- .../22334f4b24bb8977d3e1bf2ffdc29d3f.asciidoc | 2 +- .../272e27bf1fcc4fe5dbd4092679dd0342.asciidoc | 11 ++ .../2891aa10ee9d474780adf94d5607f2db.asciidoc | 2 +- .../29aeabacb1fdf5b083d5f091b6d1bd44.asciidoc | 15 +++ .../2a1eece9a59ac1773edcf0a932c26de0.asciidoc | 11 +- .../2a71e2d7f7179dd76183d30789046808.asciidoc | 2 +- .../2afd49985950cbcccf727fa858d00067.asciidoc | 24 ++++ .../2afdf0d83724953aa2875b5fb37d60cc.asciidoc | 11 +- .../2b7687e3d7c06824950e00618c297864.asciidoc | 2 +- .../2bacdcb278705d944f367cfb984cf4d2.asciidoc | 2 +- .../2d150ff3b6b991b58fea6aa5cc669aa3.asciidoc | 2 +- .../2ee002e60bd7a38d466e5f0eb0c38946.asciidoc | 2 +- .../2f72a63c73dd672ac2dc3997ad15dd41.asciidoc | 2 +- .../2f9ee29fe49f7d206a41212aa5945296.asciidoc | 22 ++++ .../2fa7ded8515b32f26c54394ea598f573.asciidoc | 2 +- .../31832bd71c31c46a1ccf8d1c210d89d4.asciidoc | 30 +++++ .../32123981430e5a8b34fe14314fc48429.asciidoc | 2 +- .../32c8c86702ccd68eb70f1573409c2a1f.asciidoc | 31 +++++ .../3649194a97d265a3bc758f8b38f7561e.asciidoc | 21 +++ ...36792c81c053e0555407d1e83e7e054f.asciidoc} | 7 +- .../3722dad876023e0757138dd5a6d3240e.asciidoc | 23 ++++ .../3a204b57072a104d9b50f3a9e064a8f6.asciidoc | 19 +++ .../3a2953fd81d65118a776c87a81530e15.asciidoc | 2 +- .../3a6238835c7d9f51e6d91f92885fadeb.asciidoc | 2 +- .../3bc4a3681e3ea9cb3de49f72085807d8.asciidoc | 65 ++++++++++ .../3bc872dbcdad8ff02cbaea39e7f38352.asciidoc | 2 +- .../3e278e6c193b4c17dbdc70670e15d78c.asciidoc | 2 +- .../3ea4c971b3f47735dcc207ee2645fa03.asciidoc | 2 +- .../3f1fe5f5f99b98d0891f38003e10b636.asciidoc | 11 +- .../3f2e5132e35b9e8b3203a4a0541cf0d4.asciidoc | 2 +- .../405511f7c1f12cc0a227b4563fe7b2e2.asciidoc | 5 +- .../40c3e7bb1fdc125a1ab21bd7d7326694.asciidoc | 2 +- .../416a3ba11232d3c078c1c31340cf356f.asciidoc | 2 +- .../4301cb9d970ec65778f91ce1f438e0d5.asciidoc | 2 +- .../433cf45a23decdf3a096016ffaaf26ba.asciidoc | 2 +- .../43d9e314431336a6f084cea76dfd6489.asciidoc | 2 +- .../43e86fbaeed068dcc981214338559b5a.asciidoc | 2 +- .../443dd902f64b3217505c9595839c3b2d.asciidoc | 2 +- .../44dfac5bc3131014e2c6bb1ebc76b33d.asciidoc | 2 +- .../45954b8aaedfed57012be8b6538b0a24.asciidoc | 67 +++++----- .../46b771a9932c3fa6057a7b2679c72ef0.asciidoc | 10 ++ .../4982c547be1ad9455ae836990aea92c5.asciidoc | 5 + .../4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc | 13 +- .../4de4bb55bbc0a76c75d256f245a3ee3f.asciidoc | 17 +++ .../4f621ab694f62ddb89e0684a9e76c4d1.asciidoc | 2 +- .../4ff2dcec03fe097075cf1d174a019a1f.asciidoc | 2 +- .../532ddf9afdcd0b1c9c0bb331e74d8df3.asciidoc | 2 +- .../537bce129338d9227bccb6a0283dab45.asciidoc | 12 ++ .../57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc | 13 +- .../5969c446688c8b326acc80276573e9d2.asciidoc | 2 +- .../59aa5216630f80c5dc298fc5bba4a819.asciidoc | 10 ++ .../5b2a13366bd4e1ab4b25d04d360570dc.asciidoc | 2 +- .../5bba213a7f543190139d1a69ab2ed076.asciidoc | 17 +-- .../5ea9da129ca70a5fe534f27a82d80b29.asciidoc | 2 +- .../5f3549ac7fee94682ca0d7439eebdd2a.asciidoc | 2 +- .../615dc36f0978c676624fb7d1144b4899.asciidoc | 11 ++ .../642161d70dacf7d153767d37d3726838.asciidoc | 2 +- .../650a0fb27c66a790c4687267423af1da.asciidoc | 2 +- .../6636701d31b0c9eb8316f1f8e99cc918.asciidoc | 2 +- .../666c420fe61fa122386da3c356a64943.asciidoc | 2 +- .../66915e95b723ee2f6e5164a94b8f98c1.asciidoc | 12 ++ .../67b71a95b6fe6c83faae51ea038a1bf1.asciidoc | 10 ++ .../69541f0bb81ab3797926bb2a00607cda.asciidoc | 2 +- .../698e0a2b67ba7842caa801d9ef46ebe3.asciidoc | 2 +- .../69d5710bdec73041c66f21d5f96637e8.asciidoc | 2 +- ...6baf72c04d48cb04c2f8be609ff3b3b5.asciidoc} | 7 +- .../6ce8334def48552ba7d44025580d9105.asciidoc | 2 +- .../6dcd3916679f6aa64f79524c75991ebd.asciidoc | 2 +- .../6e498b9dc753b94abf2618c407fa5cd8.asciidoc | 16 +++ .../6e6b78e6b689a5d6aa637271b6d084e2.asciidoc | 2 +- .../6ea062455229151e311869a81ee40252.asciidoc | 2 +- .../6f3b723bf6179b96c3413597ed7f49e1.asciidoc | 15 +-- .../73d1a6c5ef90b7e35d43a0bfdc1e158d.asciidoc | 2 +- .../744aeb2af40f519e430e21e004e3c3b7.asciidoc | 2 +- .../75e360d03fb416f0a65ca37c662c2e9c.asciidoc | 2 +- .../75e6d66e94e61bd8a555beaaee255c36.asciidoc | 2 +- .../76e02434835630cb830724beb92df354.asciidoc | 2 +- .../77518e8c6198acfe77c0934fd2fe65cb.asciidoc | 55 ++++---- .../78043831fd32004a82930c8ac8a1d809.asciidoc | 2 +- .../78e20b4cff470ed7357de1fd74bcfeb7.asciidoc | 2 +- .../79d206a528be704050a437adce2496dd.asciidoc | 2 +- .../7b5c231526846f2f7b98d78f3656ae6a.asciidoc | 2 +- .../7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc | 5 +- .../7bdc283b96c7a965fae23013647b8578.asciidoc | 2 +- .../7daff6b7e668ab8a762b8ab5dff7a167.asciidoc | 2 +- .../7f2d511cb64743c006225e5933a14bb4.asciidoc | 2 +- .../7fde3ff91c4a2e7080444af37d5cd287.asciidoc | 2 +- .../80dd7f5882c59b9c1c90e8351937441f.asciidoc | 61 ++++----- .../828f0045747fde4888a947bb99e190e3.asciidoc | 2 +- .../82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc | 51 ++++---- .../84465de841fe5c6099a0382f786f2cb8.asciidoc | 2 +- .../853fc710cea79fb4e1a85fb6d149f9c5.asciidoc | 2 +- .../8621c05cc7cf3880bde751f6670a0c3a.asciidoc | 15 +++ .../87457bb3467484bec3e9df4e25942ba6.asciidoc | 2 +- .../88b19973b970adf9b73fca82017d4951.asciidoc | 2 +- .../89f547649895176c246bb8c41313ff21.asciidoc | 15 +++ .../8a1b6eae4893c5dd27b3d81fd8d70f5b.asciidoc | 2 +- .../8b144b3eb20872595fd7cbc6c245c7c8.asciidoc | 12 -- .../8c47c80139f40f25db44f5781ca2dfbe.asciidoc | 10 ++ .../9169d19a80175ec94f80865d0f9bef4c.asciidoc | 2 +- .../91e106a2affbc8df32cd940684a779ed.asciidoc | 8 +- .../927b20a221f975b75d1227b67d0eb7e2.asciidoc | 2 +- .../944806221eb89f5af2298ccdf2902277.asciidoc | 2 +- .../947efe87db7f8813c0878f8affc3e2d1.asciidoc | 8 ++ .../948418e0ef1b7e7cfee2f11be715d7d2.asciidoc | 2 +- .../95c1b376652533c352bbf793c74d1b08.asciidoc | 2 +- .../971fd23adb81bb5842c7750e0379336a.asciidoc | 2 +- .../97c6c07f46f4177f0565a04bc50924a3.asciidoc | 2 +- .../991b9ba53f0eccec8ec5a42f8d9b655c.asciidoc | 2 +- .../99fb82d49ac477e6a9dfdd71f9465374.asciidoc | 5 +- .../9afa0844883b7471883aa378a8dd10b4.asciidoc | 9 +- .../9beb260834f8cfb240f6308950dbb9c2.asciidoc | 2 +- .../9c01db07c9ac395b6370e3b33965c21f.asciidoc | 15 +-- .../9de4edafd22a8b9cb557632b2c8779cd.asciidoc | 2 +- .../a00311843b5f8f3e9f7d511334a828b1.asciidoc | 2 +- .../a162eb50853331c80596f5994e9d1c38.asciidoc | 12 +- .../a1dda7e7c01be96a4acf7b725d70385f.asciidoc | 2 +- .../a34e758e019f563d323ca90ad9fd6e3e.asciidoc | 2 +- .../a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc | 2 +- .../a46f566ca031375658c22f89b87dc6d2.asciidoc | 12 ++ .../a60aaed30d7d26eaacbb2c0ed4ddc66d.asciidoc | 10 ++ .../a675fafa7c688cb3ea1be09bf887ebf0.asciidoc | 12 ++ .../a9554396506888e392a1aee0ca28e6fc.asciidoc | 2 +- .../a95a123b9f862e52ab1e8f875961c852.asciidoc | 2 +- .../a9f14efc26fdd3c37a71f06c310163d9.asciidoc | 2 +- .../ac22cc2b0f4ad659055feed2852a2d59.asciidoc | 2 +- .../acb10091ad335ddd15d71021aaf23c62.asciidoc | 2 +- .../adced6e22ef03c2ae3b14aa5bdd24fd9.asciidoc | 10 ++ .../aee4734ee63dbbbd12a21ee886f7a829.asciidoc | 2 +- .../b0bddf2ffaa83049b195829c06b875cd.asciidoc | 5 +- .../b1e81b70b874a1f0cf75a0ec6e430ddc.asciidoc | 10 ++ .../b583bf8d3a2f49d633aa2cfed5606418.asciidoc | 2 +- .../b5bc1bb7278f2f95bc54790c78c928e0.asciidoc | 2 +- .../b607eea422295a3e9acd75f9ed1c8cb7.asciidoc | 2 +- .../b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc | 10 +- ...ba0e7e0b18fc9ec6c623d40186d1f61b.asciidoc} | 3 +- .../bb2ba5d1885f87506f90dbb002e518f4.asciidoc | 2 +- ...bcd1afb793240b1dddd9fa5d3f21192b.asciidoc} | 10 +- .../bcdfaa4487747249699a86a0dcd22f5e.asciidoc | 53 ++++---- .../be5fef0640c3a650ee96f84e3376a1be.asciidoc | 2 +- .../bf1de9fa1b825fa875d27fa08821a6d1.asciidoc | 2 +- .../bf3f520b47581d861e802730aaf2a519.asciidoc | 2 +- .../c147de68fd6da032ad4a3c1bf626f5d6.asciidoc | 2 +- .../c3b77e11b16e37e9e37e28dec922432e.asciidoc | 10 ++ .../c4607ca79b2bcde39305d6f4f21cad37.asciidoc | 2 +- .../c580990a70028bb49cca8a6bde86bbf6.asciidoc | 17 +-- .../c6339d09f85000a6432304b0ec63b8f6.asciidoc | 2 +- .../ca5dda98e977125d40a7fe1e178e213f.asciidoc | 2 +- .../cc5eefcc2102aae7e87b0c87b4af10b8.asciidoc | 2 +- .../ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc | 121 +++++++++--------- .../cd7da0c3769682f546cc1888e569382e.asciidoc | 2 +- .../d1d8b6e642db1a7c70dbbf0fe6d8e92d.asciidoc | 2 +- .../d2e7dead222cfbebbd2c21a7cc1893b4.asciidoc | 11 ++ .../d35c8cf7a98b3f112e1de8797ec6689d.asciidoc | 11 +- .../d3a0f648d0fd50b54a4e9ebe363c5047.asciidoc | 48 +++++++ .../d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc | 8 +- .../d6a4548b29e939fb197189c20c7c016f.asciidoc | 17 +++ .../d8c053ee26c1533ce936ec81101d8e1b.asciidoc | 5 +- .../d93d52b6057a7aff3d0766ca44c505e0.asciidoc | 2 +- .../dd16c9c981551c9da47ebb5ef5105fa0.asciidoc | 57 +++++++++ .../dd71b0c9f9197684ff29c61062c55660.asciidoc | 5 +- .../dddb6a6ebd145f8411c5b4910d332f87.asciidoc | 2 +- .../dde92fdf3469349ffe2c81764333543a.asciidoc | 14 ++ .../de139866a220124360e5e27d1a736ea4.asciidoc | 2 +- .../e3019fd5f23458ae49ad9854c97d321c.asciidoc | 9 +- .../e4b38973c74037335378d8480f1ce894.asciidoc | 59 ++++----- .../e6ccd979c34ba03007e625c6ec3e71a9.asciidoc | 2 +- .../e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc | 2 +- .../e6faae2e272ee57727f38e55a3de5bb2.asciidoc | 2 +- .../e715fb8c792bf09ac98f0ceca99beb84.asciidoc | 10 ++ .../e93ff228ab3e63738e1c83fdfb7424b9.asciidoc | 2 +- .../eac3bc428d03eb4926fa51f74b9bc4d5.asciidoc | 2 +- .../ec135f0cc0d3f526df68000b2a95c65b.asciidoc | 12 ++ .../ed5c3b45e8de912faba44507d827eb93.asciidoc | 2 +- .../edae616e1244babf6032aecc6aaaf836.asciidoc | 2 +- .../edb25dc0162b039d477cb06aed2d6275.asciidoc | 2 +- .../ee08328cd157d547de19b4abe867b23e.asciidoc | 2 +- .../f29b2674299ddf51a25ed87619025ede.asciidoc | 2 +- .../f39512478cae2db8f4566a1e4af9e8f5.asciidoc | 2 +- .../f8cb1a04c2e487ff006b5ae0e1a7afbd.asciidoc | 2 +- ...f994498dd6576be657dedce2822d2b9e.asciidoc} | 9 +- .../fbb38243221c8fb311660616e3add9ce.asciidoc | 2 +- .../fe208d94ec93eabf3bd06139fa70701e.asciidoc | 2 +- .../fe3a927d868cbc530e08e05964d5174a.asciidoc | 2 +- ...ffda10edaa7ce087703193c3cb95a426.asciidoc} | 18 ++- docs/guide/release-notes.asciidoc | 13 ++ elasticsearch/_version.py | 2 +- .../generate-docs-examples/package-lock.json | 17 ++- 215 files changed, 1278 insertions(+), 629 deletions(-) create mode 100644 docs/examples/00ad41bde67beac991534ae0e04b1296.asciidoc create mode 100644 docs/examples/0f028f71f04c1d569fab402869565a84.asciidoc create mode 100644 docs/examples/12adea5d76f73d94d80d42f53f67563f.asciidoc create mode 100644 docs/examples/13d91782399ba1f291e103c18b5338cc.asciidoc create mode 100644 docs/examples/15ac33d641b376d9494075eb1f0d4066.asciidoc create mode 100644 docs/examples/174b93c323aa8e9cc8ee2a3df5736810.asciidoc create mode 100644 docs/examples/1ead35c954963e83f89872048dabdbe9.asciidoc create mode 100644 docs/examples/272e27bf1fcc4fe5dbd4092679dd0342.asciidoc create mode 100644 docs/examples/29aeabacb1fdf5b083d5f091b6d1bd44.asciidoc create mode 100644 docs/examples/2afd49985950cbcccf727fa858d00067.asciidoc create mode 100644 docs/examples/2f9ee29fe49f7d206a41212aa5945296.asciidoc create mode 100644 docs/examples/31832bd71c31c46a1ccf8d1c210d89d4.asciidoc create mode 100644 docs/examples/32c8c86702ccd68eb70f1573409c2a1f.asciidoc create mode 100644 docs/examples/3649194a97d265a3bc758f8b38f7561e.asciidoc rename docs/examples/{37f367ca81a16d3aef4ef7126ec33a2e.asciidoc => 36792c81c053e0555407d1e83e7e054f.asciidoc} (93%) create mode 100644 docs/examples/3722dad876023e0757138dd5a6d3240e.asciidoc create mode 100644 docs/examples/3a204b57072a104d9b50f3a9e064a8f6.asciidoc create mode 100644 docs/examples/3bc4a3681e3ea9cb3de49f72085807d8.asciidoc create mode 100644 docs/examples/46b771a9932c3fa6057a7b2679c72ef0.asciidoc create mode 100644 docs/examples/4de4bb55bbc0a76c75d256f245a3ee3f.asciidoc create mode 100644 docs/examples/537bce129338d9227bccb6a0283dab45.asciidoc create mode 100644 docs/examples/59aa5216630f80c5dc298fc5bba4a819.asciidoc create mode 100644 docs/examples/615dc36f0978c676624fb7d1144b4899.asciidoc create mode 100644 docs/examples/66915e95b723ee2f6e5164a94b8f98c1.asciidoc create mode 100644 docs/examples/67b71a95b6fe6c83faae51ea038a1bf1.asciidoc rename docs/examples/{d29031409016b2b798148ef173a196ae.asciidoc => 6baf72c04d48cb04c2f8be609ff3b3b5.asciidoc} (73%) create mode 100644 docs/examples/6e498b9dc753b94abf2618c407fa5cd8.asciidoc create mode 100644 docs/examples/8621c05cc7cf3880bde751f6670a0c3a.asciidoc create mode 100644 docs/examples/89f547649895176c246bb8c41313ff21.asciidoc delete mode 100644 docs/examples/8b144b3eb20872595fd7cbc6c245c7c8.asciidoc create mode 100644 docs/examples/8c47c80139f40f25db44f5781ca2dfbe.asciidoc create mode 100644 docs/examples/947efe87db7f8813c0878f8affc3e2d1.asciidoc create mode 100644 docs/examples/a46f566ca031375658c22f89b87dc6d2.asciidoc create mode 100644 docs/examples/a60aaed30d7d26eaacbb2c0ed4ddc66d.asciidoc create mode 100644 docs/examples/a675fafa7c688cb3ea1be09bf887ebf0.asciidoc create mode 100644 docs/examples/adced6e22ef03c2ae3b14aa5bdd24fd9.asciidoc create mode 100644 docs/examples/b1e81b70b874a1f0cf75a0ec6e430ddc.asciidoc rename docs/examples/{357edc9d10e98ed776401c7a439a1a55.asciidoc => ba0e7e0b18fc9ec6c623d40186d1f61b.asciidoc} (78%) rename docs/examples/{436d50b85fc8f0977d02059eec00719b.asciidoc => bcd1afb793240b1dddd9fa5d3f21192b.asciidoc} (52%) create mode 100644 docs/examples/c3b77e11b16e37e9e37e28dec922432e.asciidoc create mode 100644 docs/examples/d2e7dead222cfbebbd2c21a7cc1893b4.asciidoc create mode 100644 docs/examples/d3a0f648d0fd50b54a4e9ebe363c5047.asciidoc create mode 100644 docs/examples/d6a4548b29e939fb197189c20c7c016f.asciidoc create mode 100644 docs/examples/dd16c9c981551c9da47ebb5ef5105fa0.asciidoc create mode 100644 docs/examples/dde92fdf3469349ffe2c81764333543a.asciidoc create mode 100644 docs/examples/e715fb8c792bf09ac98f0ceca99beb84.asciidoc create mode 100644 docs/examples/ec135f0cc0d3f526df68000b2a95c65b.asciidoc rename docs/examples/{9ad0864bcd665b63551e944653d32423.asciidoc => f994498dd6576be657dedce2822d2b9e.asciidoc} (82%) rename docs/examples/{681d24c2633f598fc43d6afff8996dbb.asciidoc => ffda10edaa7ce087703193c3cb95a426.asciidoc} (90%) diff --git a/docs/examples/00ad41bde67beac991534ae0e04b1296.asciidoc b/docs/examples/00ad41bde67beac991534ae0e04b1296.asciidoc new file mode 100644 index 000000000..b806eedf1 --- /dev/null +++ b/docs/examples/00ad41bde67beac991534ae0e04b1296.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// migration/apis/data-stream-reindex.asciidoc:273 + +[source, python] +---- +resp = client.indices.get_data_stream( + name="my-data-stream", + filter_path="data_streams.indices.index_name", +) +print(resp) +---- diff --git a/docs/examples/015e6e6132b6d6d44bddb06bc3b316ed.asciidoc b/docs/examples/015e6e6132b6d6d44bddb06bc3b316ed.asciidoc index 78636bc7a..1930760fd 100644 --- a/docs/examples/015e6e6132b6d6d44bddb06bc3b316ed.asciidoc +++ b/docs/examples/015e6e6132b6d6d44bddb06bc3b316ed.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-examples.asciidoc:801 +// search/search-your-data/retrievers-examples.asciidoc:1051 [source, python] ---- diff --git a/docs/examples/0165d22da5f2fc7678392b31d8eb5566.asciidoc b/docs/examples/0165d22da5f2fc7678392b31d8eb5566.asciidoc index 749e89a02..b716f8df7 100644 --- a/docs/examples/0165d22da5f2fc7678392b31d8eb5566.asciidoc +++ b/docs/examples/0165d22da5f2fc7678392b31d8eb5566.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-examples.asciidoc:1113 +// search/search-your-data/retrievers-examples.asciidoc:1363 [source, python] ---- diff --git a/docs/examples/016f3147dae9ff2c3e831257ae470361.asciidoc b/docs/examples/016f3147dae9ff2c3e831257ae470361.asciidoc index b1bdb6ee6..299997ce4 100644 --- a/docs/examples/016f3147dae9ff2c3e831257ae470361.asciidoc +++ b/docs/examples/016f3147dae9ff2c3e831257ae470361.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// alias.asciidoc:52 +// alias.asciidoc:54 [source, python] ---- diff --git a/docs/examples/01b23f09d2b7f140faf649eadbbf3ac3.asciidoc b/docs/examples/01b23f09d2b7f140faf649eadbbf3ac3.asciidoc index b0675f45b..7d37ed747 100644 --- a/docs/examples/01b23f09d2b7f140faf649eadbbf3ac3.asciidoc +++ b/docs/examples/01b23f09d2b7f140faf649eadbbf3ac3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/index-templates.asciidoc:84 +// indices/index-templates.asciidoc:86 [source, python] ---- diff --git a/docs/examples/0bc6155e0c88062a4d8490da49db3aa8.asciidoc b/docs/examples/0bc6155e0c88062a4d8490da49db3aa8.asciidoc index 2479ec8bb..77db1349e 100644 --- a/docs/examples/0bc6155e0c88062a4d8490da49db3aa8.asciidoc +++ b/docs/examples/0bc6155e0c88062a4d8490da49db3aa8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-examples.asciidoc:562 +// search/search-your-data/retrievers-examples.asciidoc:812 [source, python] ---- diff --git a/docs/examples/0bcd380315ef4691b8c79df6ca53a85f.asciidoc b/docs/examples/0bcd380315ef4691b8c79df6ca53a85f.asciidoc index 75c29429f..124a36052 100644 --- a/docs/examples/0bcd380315ef4691b8c79df6ca53a85f.asciidoc +++ b/docs/examples/0bcd380315ef4691b8c79df6ca53a85f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/sort-search-results.asciidoc:395 +// search/search-your-data/sort-search-results.asciidoc:397 [source, python] ---- diff --git a/docs/examples/0bee07a581c5776e068f6f4efad5a399.asciidoc b/docs/examples/0bee07a581c5776e068f6f4efad5a399.asciidoc index 3cc5e8efb..5ef14fcc9 100644 --- a/docs/examples/0bee07a581c5776e068f6f4efad5a399.asciidoc +++ b/docs/examples/0bee07a581c5776e068f6f4efad5a399.asciidoc @@ -1,19 +1,12 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-across-clusters.asciidoc:192 +// esql/esql-across-clusters.asciidoc:194 [source, python] ---- -resp = client.perform_request( - "POST", - "/_query/async", - params={ - "format": "json" - }, - headers={"Content-Type": "application/json"}, - body={ - "query": "\n FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ", - "include_ccs_metadata": True - }, +resp = client.esql.async_query( + format="json", + query="\n FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ", + include_ccs_metadata=True, ) print(resp) ---- diff --git a/docs/examples/0d689ac6e78be5d438f9b5d441be2b44.asciidoc b/docs/examples/0d689ac6e78be5d438f9b5d441be2b44.asciidoc index 816c0d5bb..89f2a7eeb 100644 --- a/docs/examples/0d689ac6e78be5d438f9b5d441be2b44.asciidoc +++ b/docs/examples/0d689ac6e78be5d438f9b5d441be2b44.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-examples.asciidoc:941 +// search/search-your-data/retrievers-examples.asciidoc:1191 [source, python] ---- diff --git a/docs/examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc b/docs/examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc index 285b99308..eb746abe3 100644 --- a/docs/examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc +++ b/docs/examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc @@ -3,24 +3,20 @@ [source, python] ---- -resp = client.perform_request( - "POST", - "/_application/search_application/my-app/_render_query", - headers={"Content-Type": "application/json"}, - body={ - "params": { - "query_string": "my first query", - "text_fields": [ - { - "name": "title", - "boost": 5 - }, - { - "name": "description", - "boost": 1 - } - ] - } +resp = client.search_application.render_query( + name="my-app", + params={ + "query_string": "my first query", + "text_fields": [ + { + "name": "title", + "boost": 5 + }, + { + "name": "description", + "boost": 1 + } + ] }, ) print(resp) diff --git a/docs/examples/0f028f71f04c1d569fab402869565a84.asciidoc b/docs/examples/0f028f71f04c1d569fab402869565a84.asciidoc new file mode 100644 index 000000000..295df000b --- /dev/null +++ b/docs/examples/0f028f71f04c1d569fab402869565a84.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// migration/migrate_9_0.asciidoc:476 + +[source, python] +---- +resp = client.indices.put_settings( + index=".reindexed-v9-ml-anomalies-custom-example", + settings={ + "index": { + "number_of_replicas": "" + } + }, +) +print(resp) +---- diff --git a/docs/examples/0f2e5e006b663a88ee99b130ab1b4844.asciidoc b/docs/examples/0f2e5e006b663a88ee99b130ab1b4844.asciidoc index c32fb4f97..9b52aa31d 100644 --- a/docs/examples/0f2e5e006b663a88ee99b130ab1b4844.asciidoc +++ b/docs/examples/0f2e5e006b663a88ee99b130ab1b4844.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/sort-search-results.asciidoc:570 +// search/search-your-data/sort-search-results.asciidoc:572 [source, python] ---- diff --git a/docs/examples/0fb7705ddbf1fc2b65d2de2e00fe5769.asciidoc b/docs/examples/0fb7705ddbf1fc2b65d2de2e00fe5769.asciidoc index 5021fce0e..308c9612e 100644 --- a/docs/examples/0fb7705ddbf1fc2b65d2de2e00fe5769.asciidoc +++ b/docs/examples/0fb7705ddbf1fc2b65d2de2e00fe5769.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// aggregations/metrics/scripted-metric-aggregation.asciidoc:61 +// aggregations/metrics/scripted-metric-aggregation.asciidoc:63 [source, python] ---- diff --git a/docs/examples/1147a02afa087278e51fa365fb9e06b7.asciidoc b/docs/examples/1147a02afa087278e51fa365fb9e06b7.asciidoc index 6f2b83e31..6b9ea7a99 100644 --- a/docs/examples/1147a02afa087278e51fa365fb9e06b7.asciidoc +++ b/docs/examples/1147a02afa087278e51fa365fb9e06b7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// api-conventions.asciidoc:345 +// api-conventions.asciidoc:355 [source, python] ---- diff --git a/docs/examples/11e8d6e14686efabb8634b6522c05cb5.asciidoc b/docs/examples/11e8d6e14686efabb8634b6522c05cb5.asciidoc index 9e854328d..88bee88b3 100644 --- a/docs/examples/11e8d6e14686efabb8634b6522c05cb5.asciidoc +++ b/docs/examples/11e8d6e14686efabb8634b6522c05cb5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/highlighting.asciidoc:455 +// search/search-your-data/highlighting.asciidoc:467 [source, python] ---- diff --git a/docs/examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc b/docs/examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc index 5fb759d64..dd2c9dc6b 100644 --- a/docs/examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc +++ b/docs/examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc @@ -1,21 +1,18 @@ // This file is autogenerated, DO NOT EDIT -// inference/chat-completion-inference.asciidoc:301 +// inference/chat-completion-inference.asciidoc:305 [source, python] ---- -resp = client.perform_request( - "POST", - "/_inference/chat_completion/openai-completion/_stream", - headers={"Content-Type": "application/json"}, - body={ - "model": "gpt-4o", - "messages": [ - { - "role": "user", - "content": "What is Elastic?" - } - ] - }, +resp = client.inference.stream_inference( + task_type="chat_completion", + inference_id="openai-completion", + model="gpt-4o", + messages=[ + { + "role": "user", + "content": "What is Elastic?" + } + ], ) print(resp) ---- diff --git a/docs/examples/12adea5d76f73d94d80d42f53f67563f.asciidoc b/docs/examples/12adea5d76f73d94d80d42f53f67563f.asciidoc new file mode 100644 index 000000000..18270ad1c --- /dev/null +++ b/docs/examples/12adea5d76f73d94d80d42f53f67563f.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// migration/migrate_9_0.asciidoc:393 + +[source, python] +---- +resp = client.indices.add_block( + index=".ml-anomalies-custom-example", + block="read_only", +) +print(resp) +---- diff --git a/docs/examples/13d91782399ba1f291e103c18b5338cc.asciidoc b/docs/examples/13d91782399ba1f291e103c18b5338cc.asciidoc new file mode 100644 index 000000000..248cdff9a --- /dev/null +++ b/docs/examples/13d91782399ba1f291e103c18b5338cc.asciidoc @@ -0,0 +1,25 @@ +// This file is autogenerated, DO NOT EDIT +// migration/apis/create-index-from-source.asciidoc:94 + +[source, python] +---- +resp = client.indices.create_from( + source="my-index", + dest="my-new-index", + create_from={ + "settings_override": { + "index": { + "number_of_shards": 5 + } + }, + "mappings_override": { + "properties": { + "field2": { + "type": "boolean" + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc b/docs/examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc index 11d594a0f..c6cd62c44 100644 --- a/docs/examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc +++ b/docs/examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc @@ -3,9 +3,8 @@ [source, python] ---- -resp = client.inference.put( - task_type="my-inference-endpoint", - inference_id="_update", +resp = client.inference.update( + inference_id="my-inference-endpoint", inference_config={ "service_settings": { "api_key": "" diff --git a/docs/examples/15ac33d641b376d9494075eb1f0d4066.asciidoc b/docs/examples/15ac33d641b376d9494075eb1f0d4066.asciidoc new file mode 100644 index 000000000..b6c8f58d1 --- /dev/null +++ b/docs/examples/15ac33d641b376d9494075eb1f0d4066.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// migration/apis/data-stream-reindex.asciidoc:224 + +[source, python] +---- +resp = client.indices.cancel_migrate_reindex( + index="my-data-stream", +) +print(resp) +---- diff --git a/docs/examples/174b93c323aa8e9cc8ee2a3df5736810.asciidoc b/docs/examples/174b93c323aa8e9cc8ee2a3df5736810.asciidoc new file mode 100644 index 000000000..db5b5eda0 --- /dev/null +++ b/docs/examples/174b93c323aa8e9cc8ee2a3df5736810.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// rest-api/security/delegate-pki-authentication.asciidoc:83 + +[source, python] +---- +resp = client.security.delegate_pki( + x509_certificate_chain=[ + "MIIDeDCCAmCgAwIBAgIUBzj/nGGKxP2iXawsSquHmQjCJmMwDQYJKoZIhvcNAQELBQAwUzErMCkGA1UEAxMiRWxhc3RpY3NlYXJjaCBUZXN0IEludGVybWVkaWF0ZSBDQTEWMBQGA1UECxMNRWxhc3RpY3NlYXJjaDEMMAoGA1UEChMDb3JnMB4XDTIzMDcxODE5MjkwNloXDTQzMDcxMzE5MjkwNlowSjEiMCAGA1UEAxMZRWxhc3RpY3NlYXJjaCBUZXN0IENsaWVudDEWMBQGA1UECxMNRWxhc3RpY3NlYXJjaDEMMAoGA1UEChMDb3JnMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAllHL4pQkkfwAm/oLkxYYO+r950DEy1bjH+4viCHzNADLCTWO+lOZJVlNx7QEzJE3QGMdif9CCBBxQFMapA7oUFCLq84fPSQQu5AnvvbltVD9nwVtCs+9ZGDjMKsz98RhSLMFIkxdxi6HkQ3Lfa4ZSI4lvba4oo+T/GveazBDS+NgmKyq00EOXt3tWi1G9vEVItommzXWfv0agJWzVnLMldwkPqsw0W7zrpyT7FZS4iLbQADGceOW8fiauOGMkscu9zAnDR/SbWl/chYioQOdw6ndFLn1YIFPd37xL0WsdsldTpn0vH3YfzgLMffT/3P6YlwBegWzsx6FnM/93Ecb4wIDAQABo00wSzAJBgNVHRMEAjAAMB0GA1UdDgQWBBQKNRwjW+Ad/FN1Rpoqme/5+jrFWzAfBgNVHSMEGDAWgBRcya0c0x/PaI7MbmJVIylWgLqXNjANBgkqhkiG9w0BAQsFAAOCAQEACZ3PF7Uqu47lplXHP6YlzYL2jL0D28hpj5lGtdha4Muw1m/BjDb0Pu8l0NQ1z3AP6AVcvjNDkQq6Y5jeSz0bwQlealQpYfo7EMXjOidrft1GbqOMFmTBLpLA9SvwYGobSTXWTkJzonqVaTcf80HpMgM2uEhodwTcvz6v1WEfeT/HMjmdIsq4ImrOL9RNrcZG6nWfw0HR3JNOgrbfyEztEI471jHznZ336OEcyX7gQuvHE8tOv5+oD1d7s3Xg1yuFp+Ynh+FfOi3hPCuaHA+7F6fLmzMDLVUBAllugst1C3U+L/paD7tqIa4ka+KNPCbSfwazmJrt4XNiivPR4hwH5g==" + ], +) +print(resp) +---- diff --git a/docs/examples/17a1e308761afd3282f13d44d7be008a.asciidoc b/docs/examples/17a1e308761afd3282f13d44d7be008a.asciidoc index ee6770cd1..4fcdf8ed5 100644 --- a/docs/examples/17a1e308761afd3282f13d44d7be008a.asciidoc +++ b/docs/examples/17a1e308761afd3282f13d44d7be008a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/highlighting.asciidoc:687 +// search/search-your-data/highlighting.asciidoc:699 [source, python] ---- diff --git a/docs/examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc b/docs/examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc index 3f54b088b..0789a399a 100644 --- a/docs/examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc +++ b/docs/examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc @@ -3,28 +3,23 @@ [source, python] ---- -resp = client.perform_request( - "POST", - "/_ingest/_simulate", - headers={"Content-Type": "application/json"}, - body={ - "docs": [ - { - "_index": "my-index", - "_id": "123", - "_source": { - "foo": "bar" - } - }, - { - "_index": "my-index", - "_id": "456", - "_source": { - "foo": "rab" - } +resp = client.simulate.ingest( + docs=[ + { + "_index": "my-index", + "_id": "123", + "_source": { + "foo": "bar" } - ] - }, + }, + { + "_index": "my-index", + "_id": "456", + "_source": { + "foo": "rab" + } + } + ], ) print(resp) ---- diff --git a/docs/examples/1a7483796087053ba55029d0dc2ab356.asciidoc b/docs/examples/1a7483796087053ba55029d0dc2ab356.asciidoc index cd3f38242..c48bf98b9 100644 --- a/docs/examples/1a7483796087053ba55029d0dc2ab356.asciidoc +++ b/docs/examples/1a7483796087053ba55029d0dc2ab356.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/multivalued-fields.asciidoc:187 +// esql/multivalued-fields.asciidoc:191 [source, python] ---- diff --git a/docs/examples/1a9efb56adb2cd84faa9825a129381b9.asciidoc b/docs/examples/1a9efb56adb2cd84faa9825a129381b9.asciidoc index 870811976..e0d883c89 100644 --- a/docs/examples/1a9efb56adb2cd84faa9825a129381b9.asciidoc +++ b/docs/examples/1a9efb56adb2cd84faa9825a129381b9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/rollup-search.asciidoc:219 +// rollup/apis/rollup-search.asciidoc:222 [source, python] ---- diff --git a/docs/examples/1ead35c954963e83f89872048dabdbe9.asciidoc b/docs/examples/1ead35c954963e83f89872048dabdbe9.asciidoc new file mode 100644 index 000000000..dc8dd1106 --- /dev/null +++ b/docs/examples/1ead35c954963e83f89872048dabdbe9.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// rest-api/security/query-role.asciidoc:137 + +[source, python] +---- +resp = client.security.query_role( + query={ + "bool": { + "must_not": { + "term": { + "metadata._reserved": True + } + } + } + }, + sort=[ + "name" + ], +) +print(resp) +---- diff --git a/docs/examples/1ef5119db55a6f2b6fc0ab92f36e7f8e.asciidoc b/docs/examples/1ef5119db55a6f2b6fc0ab92f36e7f8e.asciidoc index 6456d8322..7de69a88a 100644 --- a/docs/examples/1ef5119db55a6f2b6fc0ab92f36e7f8e.asciidoc +++ b/docs/examples/1ef5119db55a6f2b6fc0ab92f36e7f8e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/sort-search-results.asciidoc:61 +// search/search-your-data/sort-search-results.asciidoc:63 [source, python] ---- diff --git a/docs/examples/22334f4b24bb8977d3e1bf2ffdc29d3f.asciidoc b/docs/examples/22334f4b24bb8977d3e1bf2ffdc29d3f.asciidoc index 496e9e9b5..45a2461c9 100644 --- a/docs/examples/22334f4b24bb8977d3e1bf2ffdc29d3f.asciidoc +++ b/docs/examples/22334f4b24bb8977d3e1bf2ffdc29d3f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/sort-search-results.asciidoc:313 +// search/search-your-data/sort-search-results.asciidoc:315 [source, python] ---- diff --git a/docs/examples/272e27bf1fcc4fe5dbd4092679dd0342.asciidoc b/docs/examples/272e27bf1fcc4fe5dbd4092679dd0342.asciidoc new file mode 100644 index 000000000..f7955285c --- /dev/null +++ b/docs/examples/272e27bf1fcc4fe5dbd4092679dd0342.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// migration/migrate_9_0.asciidoc:604 + +[source, python] +---- +resp = client.indices.add_block( + index=".ml-anomalies-custom-example", + block="write", +) +print(resp) +---- diff --git a/docs/examples/2891aa10ee9d474780adf94d5607f2db.asciidoc b/docs/examples/2891aa10ee9d474780adf94d5607f2db.asciidoc index fe88b0dff..fc9780c02 100644 --- a/docs/examples/2891aa10ee9d474780adf94d5607f2db.asciidoc +++ b/docs/examples/2891aa10ee9d474780adf94d5607f2db.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/sort-search-results.asciidoc:175 +// search/search-your-data/sort-search-results.asciidoc:177 [source, python] ---- diff --git a/docs/examples/29aeabacb1fdf5b083d5f091b6d1bd44.asciidoc b/docs/examples/29aeabacb1fdf5b083d5f091b6d1bd44.asciidoc new file mode 100644 index 000000000..8fd8db4c8 --- /dev/null +++ b/docs/examples/29aeabacb1fdf5b083d5f091b6d1bd44.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// migration/apis/data-stream-reindex.asciidoc:105 + +[source, python] +---- +resp = client.indices.migrate_reindex( + reindex={ + "source": { + "index": "my-data-stream" + }, + "mode": "upgrade" + }, +) +print(resp) +---- diff --git a/docs/examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc b/docs/examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc index e8c3ee1d2..d3fe44432 100644 --- a/docs/examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc +++ b/docs/examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc @@ -3,14 +3,9 @@ [source, python] ---- -resp = client.perform_request( - "POST", - "/_security/oidc/logout", - headers={"Content-Type": "application/json"}, - body={ - "token": "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==", - "refresh_token": "vLBPvmAB6KvwvJZr27cS" - }, +resp = client.security.oidc_logout( + token="dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==", + refresh_token="vLBPvmAB6KvwvJZr27cS", ) print(resp) ---- diff --git a/docs/examples/2a71e2d7f7179dd76183d30789046808.asciidoc b/docs/examples/2a71e2d7f7179dd76183d30789046808.asciidoc index 9d9d7f53e..3e5c29b2d 100644 --- a/docs/examples/2a71e2d7f7179dd76183d30789046808.asciidoc +++ b/docs/examples/2a71e2d7f7179dd76183d30789046808.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/multivalued-fields.asciidoc:219 +// esql/multivalued-fields.asciidoc:224 [source, python] ---- diff --git a/docs/examples/2afd49985950cbcccf727fa858d00067.asciidoc b/docs/examples/2afd49985950cbcccf727fa858d00067.asciidoc new file mode 100644 index 000000000..19ceadeaa --- /dev/null +++ b/docs/examples/2afd49985950cbcccf727fa858d00067.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// mapping/types/semantic-text.asciidoc:159 + +[source, python] +---- +resp = client.indices.create( + index="test-index", + query={ + "match": { + "my_field": "Which country is Paris in?" + } + }, + highlight={ + "fields": { + "my_field": { + "type": "semantic", + "number_of_fragments": 2, + "order": "score" + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc b/docs/examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc index f074730ed..2a307f890 100644 --- a/docs/examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc +++ b/docs/examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc @@ -1,14 +1,11 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-rest.asciidoc:383 +// esql/esql-rest.asciidoc:384 [source, python] ---- -resp = client.perform_request( - "GET", - "/_query/async/FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", - params={ - "wait_for_completion_timeout": "30s" - }, +resp = client.esql.async_query_get( + id="FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", + wait_for_completion_timeout="30s", ) print(resp) ---- diff --git a/docs/examples/2b7687e3d7c06824950e00618c297864.asciidoc b/docs/examples/2b7687e3d7c06824950e00618c297864.asciidoc index 15d07ba55..6c9103348 100644 --- a/docs/examples/2b7687e3d7c06824950e00618c297864.asciidoc +++ b/docs/examples/2b7687e3d7c06824950e00618c297864.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/resolve-cluster.asciidoc:179 +// indices/resolve-cluster.asciidoc:205 [source, python] ---- diff --git a/docs/examples/2bacdcb278705d944f367cfb984cf4d2.asciidoc b/docs/examples/2bacdcb278705d944f367cfb984cf4d2.asciidoc index 3d9506a77..e5f183126 100644 --- a/docs/examples/2bacdcb278705d944f367cfb984cf4d2.asciidoc +++ b/docs/examples/2bacdcb278705d944f367cfb984cf4d2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/sort-search-results.asciidoc:30 +// search/search-your-data/sort-search-results.asciidoc:32 [source, python] ---- diff --git a/docs/examples/2d150ff3b6b991b58fea6aa5cc669aa3.asciidoc b/docs/examples/2d150ff3b6b991b58fea6aa5cc669aa3.asciidoc index 88454fbda..2c16c93b6 100644 --- a/docs/examples/2d150ff3b6b991b58fea6aa5cc669aa3.asciidoc +++ b/docs/examples/2d150ff3b6b991b58fea6aa5cc669aa3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/match-phrase-query.asciidoc:30 +// query-dsl/match-phrase-query.asciidoc:66 [source, python] ---- diff --git a/docs/examples/2ee002e60bd7a38d466e5f0eb0c38946.asciidoc b/docs/examples/2ee002e60bd7a38d466e5f0eb0c38946.asciidoc index 0da4d008c..6fd68fa15 100644 --- a/docs/examples/2ee002e60bd7a38d466e5f0eb0c38946.asciidoc +++ b/docs/examples/2ee002e60bd7a38d466e5f0eb0c38946.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// alias.asciidoc:373 +// alias.asciidoc:375 [source, python] ---- diff --git a/docs/examples/2f72a63c73dd672ac2dc3997ad15dd41.asciidoc b/docs/examples/2f72a63c73dd672ac2dc3997ad15dd41.asciidoc index 57193131c..a5cb08edb 100644 --- a/docs/examples/2f72a63c73dd672ac2dc3997ad15dd41.asciidoc +++ b/docs/examples/2f72a63c73dd672ac2dc3997ad15dd41.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/semantic-text.asciidoc:216 +// mapping/types/semantic-text.asciidoc:242 [source, python] ---- diff --git a/docs/examples/2f9ee29fe49f7d206a41212aa5945296.asciidoc b/docs/examples/2f9ee29fe49f7d206a41212aa5945296.asciidoc new file mode 100644 index 000000000..ec5c59702 --- /dev/null +++ b/docs/examples/2f9ee29fe49f7d206a41212aa5945296.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// migration/apis/create-index-from-source.asciidoc:117 + +[source, python] +---- +resp = client.indices.create_from( + source="my-index", + dest="my-new-index", + create_from={ + "settings_override": { + "index": { + "blocks.write": None, + "blocks.read": None, + "blocks.read_only": None, + "blocks.read_only_allow_delete": None, + "blocks.metadata": None + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/2fa7ded8515b32f26c54394ea598f573.asciidoc b/docs/examples/2fa7ded8515b32f26c54394ea598f573.asciidoc index a444a667b..bc1835a56 100644 --- a/docs/examples/2fa7ded8515b32f26c54394ea598f573.asciidoc +++ b/docs/examples/2fa7ded8515b32f26c54394ea598f573.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/index-templates.asciidoc:121 +// indices/index-templates.asciidoc:123 [source, python] ---- diff --git a/docs/examples/31832bd71c31c46a1ccf8d1c210d89d4.asciidoc b/docs/examples/31832bd71c31c46a1ccf8d1c210d89d4.asciidoc new file mode 100644 index 000000000..367c6cad1 --- /dev/null +++ b/docs/examples/31832bd71c31c46a1ccf8d1c210d89d4.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/search-multiple-indices.asciidoc:51 + +[source, python] +---- +resp = client.search( + index="my-index-*", + query={ + "bool": { + "must": [ + { + "match": { + "user.id": "kimchy" + } + } + ], + "must_not": [ + { + "terms": { + "_index": [ + "my-index-01" + ] + } + } + ] + } + }, +) +print(resp) +---- diff --git a/docs/examples/32123981430e5a8b34fe14314fc48429.asciidoc b/docs/examples/32123981430e5a8b34fe14314fc48429.asciidoc index 0b6646b9d..311b84ef4 100644 --- a/docs/examples/32123981430e5a8b34fe14314fc48429.asciidoc +++ b/docs/examples/32123981430e5a8b34fe14314fc48429.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/search-multiple-indices.asciidoc:11 +// search/search-your-data/search-multiple-indices.asciidoc:17 [source, python] ---- diff --git a/docs/examples/32c8c86702ccd68eb70f1573409c2a1f.asciidoc b/docs/examples/32c8c86702ccd68eb70f1573409c2a1f.asciidoc new file mode 100644 index 000000000..52477014c --- /dev/null +++ b/docs/examples/32c8c86702ccd68eb70f1573409c2a1f.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// ilm/actions/ilm-searchable-snapshot.asciidoc:130 + +[source, python] +---- +resp = client.ilm.put_lifecycle( + name="my_policy", + policy={ + "phases": { + "hot": { + "actions": { + "rollover": { + "max_primary_shard_size": "50gb" + }, + "searchable_snapshot": { + "snapshot_repository": "backing_repo", + "replicate_for": "14d" + } + } + }, + "delete": { + "min_age": "28d", + "actions": { + "delete": {} + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/3649194a97d265a3bc758f8b38f7561e.asciidoc b/docs/examples/3649194a97d265a3bc758f8b38f7561e.asciidoc new file mode 100644 index 000000000..e2071f790 --- /dev/null +++ b/docs/examples/3649194a97d265a3bc758f8b38f7561e.asciidoc @@ -0,0 +1,21 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/semantic-text-hybrid-search:21 + +[source, python] +---- +resp = client.indices.create( + index="semantic-embeddings", + mappings={ + "properties": { + "semantic_text": { + "type": "semantic_text" + }, + "content": { + "type": "text", + "copy_to": "semantic_text" + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/37f367ca81a16d3aef4ef7126ec33a2e.asciidoc b/docs/examples/36792c81c053e0555407d1e83e7e054f.asciidoc similarity index 93% rename from docs/examples/37f367ca81a16d3aef4ef7126ec33a2e.asciidoc rename to docs/examples/36792c81c053e0555407d1e83e7e054f.asciidoc index 32c67885f..4941727c4 100644 --- a/docs/examples/37f367ca81a16d3aef4ef7126ec33a2e.asciidoc +++ b/docs/examples/36792c81c053e0555407d1e83e7e054f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:429 +// search/retriever.asciidoc:452 [source, python] ---- @@ -9,10 +9,13 @@ resp = client.search( retriever={ "rescorer": { "rescore": { + "window_size": 50, "query": { - "window_size": 50, "rescore_query": { "script_score": { + "query": { + "match_all": {} + }, "script": { "source": "cosineSimilarity(params.queryVector, 'product-vector_final_stage') + 1.0", "params": { diff --git a/docs/examples/3722dad876023e0757138dd5a6d3240e.asciidoc b/docs/examples/3722dad876023e0757138dd5a6d3240e.asciidoc new file mode 100644 index 000000000..4346ba355 --- /dev/null +++ b/docs/examples/3722dad876023e0757138dd5a6d3240e.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// migration/apis/create-index-from-source.asciidoc:63 + +[source, python] +---- +resp = client.indices.create( + index="my-index", + settings={ + "index": { + "number_of_shards": 3, + "blocks.write": True + } + }, + mappings={ + "properties": { + "field1": { + "type": "text" + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/3a204b57072a104d9b50f3a9e064a8f6.asciidoc b/docs/examples/3a204b57072a104d9b50f3a9e064a8f6.asciidoc new file mode 100644 index 000000000..6ebfa63af --- /dev/null +++ b/docs/examples/3a204b57072a104d9b50f3a9e064a8f6.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// migration/migrate_9_0.asciidoc:620 + +[source, python] +---- +resp = client.search( + index=".ml-anomalies-custom-example", + size=0, + aggs={ + "job_ids": { + "terms": { + "field": "job_id", + "size": 100 + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/3a2953fd81d65118a776c87a81530e15.asciidoc b/docs/examples/3a2953fd81d65118a776c87a81530e15.asciidoc index 1264164ad..874f6e65e 100644 --- a/docs/examples/3a2953fd81d65118a776c87a81530e15.asciidoc +++ b/docs/examples/3a2953fd81d65118a776c87a81530e15.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/highlighting.asciidoc:593 +// search/search-your-data/highlighting.asciidoc:605 [source, python] ---- diff --git a/docs/examples/3a6238835c7d9f51e6d91f92885fadeb.asciidoc b/docs/examples/3a6238835c7d9f51e6d91f92885fadeb.asciidoc index d35ef64ac..a5c22e4e2 100644 --- a/docs/examples/3a6238835c7d9f51e6d91f92885fadeb.asciidoc +++ b/docs/examples/3a6238835c7d9f51e6d91f92885fadeb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/sort-search-results.asciidoc:11 +// search/search-your-data/sort-search-results.asciidoc:13 [source, python] ---- diff --git a/docs/examples/3bc4a3681e3ea9cb3de49f72085807d8.asciidoc b/docs/examples/3bc4a3681e3ea9cb3de49f72085807d8.asciidoc new file mode 100644 index 000000000..b58b0609d --- /dev/null +++ b/docs/examples/3bc4a3681e3ea9cb3de49f72085807d8.asciidoc @@ -0,0 +1,65 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/retrievers-examples.asciidoc:321 + +[source, python] +---- +resp = client.search( + index="retrievers_example", + retriever={ + "linear": { + "retrievers": [ + { + "retriever": { + "standard": { + "query": { + "function_score": { + "query": { + "term": { + "topic": "ai" + } + }, + "functions": [ + { + "script_score": { + "script": { + "source": "doc['timestamp'].value.millis" + } + } + } + ], + "boost_mode": "replace" + } + }, + "sort": { + "timestamp": { + "order": "asc" + } + } + } + }, + "weight": 2, + "normalizer": "minmax" + }, + { + "retriever": { + "knn": { + "field": "vector", + "query_vector": [ + 0.23, + 0.67, + 0.89 + ], + "k": 3, + "num_candidates": 5 + } + }, + "weight": 1.5 + } + ], + "rank_window_size": 10 + } + }, + source=False, +) +print(resp) +---- diff --git a/docs/examples/3bc872dbcdad8ff02cbaea39e7f38352.asciidoc b/docs/examples/3bc872dbcdad8ff02cbaea39e7f38352.asciidoc index e1218aca3..5016d46ad 100644 --- a/docs/examples/3bc872dbcdad8ff02cbaea39e7f38352.asciidoc +++ b/docs/examples/3bc872dbcdad8ff02cbaea39e7f38352.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/sort-search-results.asciidoc:202 +// search/search-your-data/sort-search-results.asciidoc:204 [source, python] ---- diff --git a/docs/examples/3e278e6c193b4c17dbdc70670e15d78c.asciidoc b/docs/examples/3e278e6c193b4c17dbdc70670e15d78c.asciidoc index 5d41900e4..f0d41dfdc 100644 --- a/docs/examples/3e278e6c193b4c17dbdc70670e15d78c.asciidoc +++ b/docs/examples/3e278e6c193b4c17dbdc70670e15d78c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/highlighting.asciidoc:642 +// search/search-your-data/highlighting.asciidoc:654 [source, python] ---- diff --git a/docs/examples/3ea4c971b3f47735dcc207ee2645fa03.asciidoc b/docs/examples/3ea4c971b3f47735dcc207ee2645fa03.asciidoc index 9aed3ad38..3a9ce3e68 100644 --- a/docs/examples/3ea4c971b3f47735dcc207ee2645fa03.asciidoc +++ b/docs/examples/3ea4c971b3f47735dcc207ee2645fa03.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// alias.asciidoc:418 +// alias.asciidoc:420 [source, python] ---- diff --git a/docs/examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc b/docs/examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc index 1202241d6..fe24eccf5 100644 --- a/docs/examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc +++ b/docs/examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc @@ -3,14 +3,9 @@ [source, python] ---- -resp = client.perform_request( - "POST", - "/_query/async", - headers={"Content-Type": "application/json"}, - body={ - "query": "\n FROM library\n | EVAL year = DATE_TRUNC(1 YEARS, release_date)\n | STATS MAX(page_count) BY year\n | SORT year\n | LIMIT 5\n ", - "wait_for_completion_timeout": "2s" - }, +resp = client.esql.async_query( + query="\n FROM library\n | EVAL year = DATE_TRUNC(1 YEARS, release_date)\n | STATS MAX(page_count) BY year\n | SORT year\n | LIMIT 5\n ", + wait_for_completion_timeout="2s", ) print(resp) ---- diff --git a/docs/examples/3f2e5132e35b9e8b3203a4a0541cf0d4.asciidoc b/docs/examples/3f2e5132e35b9e8b3203a4a0541cf0d4.asciidoc index a87fc4d6b..dc4fd9f52 100644 --- a/docs/examples/3f2e5132e35b9e8b3203a4a0541cf0d4.asciidoc +++ b/docs/examples/3f2e5132e35b9e8b3203a4a0541cf0d4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ilm/actions/ilm-searchable-snapshot.asciidoc:96 +// ilm/actions/ilm-searchable-snapshot.asciidoc:103 [source, python] ---- diff --git a/docs/examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc b/docs/examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc index 376f8c0f4..ef7fcf7e1 100644 --- a/docs/examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc +++ b/docs/examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc @@ -3,9 +3,8 @@ [source, python] ---- -resp = client.perform_request( - "GET", - "/_query/async/FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", +resp = client.esql.async_query_get( + id="FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", ) print(resp) ---- diff --git a/docs/examples/40c3e7bb1fdc125a1ab21bd7d7326694.asciidoc b/docs/examples/40c3e7bb1fdc125a1ab21bd7d7326694.asciidoc index 1cbdde427..9a580ba46 100644 --- a/docs/examples/40c3e7bb1fdc125a1ab21bd7d7326694.asciidoc +++ b/docs/examples/40c3e7bb1fdc125a1ab21bd7d7326694.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/multivalued-fields.asciidoc:142 +// esql/multivalued-fields.asciidoc:145 [source, python] ---- diff --git a/docs/examples/416a3ba11232d3c078c1c31340cf356f.asciidoc b/docs/examples/416a3ba11232d3c078c1c31340cf356f.asciidoc index e8318037b..94459206c 100644 --- a/docs/examples/416a3ba11232d3c078c1c31340cf356f.asciidoc +++ b/docs/examples/416a3ba11232d3c078c1c31340cf356f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/highlighting.asciidoc:475 +// search/search-your-data/highlighting.asciidoc:487 [source, python] ---- diff --git a/docs/examples/4301cb9d970ec65778f91ce1f438e0d5.asciidoc b/docs/examples/4301cb9d970ec65778f91ce1f438e0d5.asciidoc index 87313bc14..6c3d6250f 100644 --- a/docs/examples/4301cb9d970ec65778f91ce1f438e0d5.asciidoc +++ b/docs/examples/4301cb9d970ec65778f91ce1f438e0d5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// alias.asciidoc:289 +// alias.asciidoc:291 [source, python] ---- diff --git a/docs/examples/433cf45a23decdf3a096016ffaaf26ba.asciidoc b/docs/examples/433cf45a23decdf3a096016ffaaf26ba.asciidoc index 5302d44a4..8bd263b9c 100644 --- a/docs/examples/433cf45a23decdf3a096016ffaaf26ba.asciidoc +++ b/docs/examples/433cf45a23decdf3a096016ffaaf26ba.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// alias.asciidoc:394 +// alias.asciidoc:396 [source, python] ---- diff --git a/docs/examples/43d9e314431336a6f084cea76dfd6489.asciidoc b/docs/examples/43d9e314431336a6f084cea76dfd6489.asciidoc index 251530a9e..c1ec6eb7d 100644 --- a/docs/examples/43d9e314431336a6f084cea76dfd6489.asciidoc +++ b/docs/examples/43d9e314431336a6f084cea76dfd6489.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:247 +// search/retriever.asciidoc:254 [source, python] ---- diff --git a/docs/examples/43e86fbaeed068dcc981214338559b5a.asciidoc b/docs/examples/43e86fbaeed068dcc981214338559b5a.asciidoc index d0a87d736..2363196a9 100644 --- a/docs/examples/43e86fbaeed068dcc981214338559b5a.asciidoc +++ b/docs/examples/43e86fbaeed068dcc981214338559b5a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/resolve-cluster.asciidoc:89 +// indices/resolve-cluster.asciidoc:92 [source, python] ---- diff --git a/docs/examples/443dd902f64b3217505c9595839c3b2d.asciidoc b/docs/examples/443dd902f64b3217505c9595839c3b2d.asciidoc index 44612024a..b32cc324f 100644 --- a/docs/examples/443dd902f64b3217505c9595839c3b2d.asciidoc +++ b/docs/examples/443dd902f64b3217505c9595839c3b2d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/search-multiple-indices.asciidoc:104 +// search/search-your-data/search-multiple-indices.asciidoc:138 [source, python] ---- diff --git a/docs/examples/44dfac5bc3131014e2c6bb1ebc76b33d.asciidoc b/docs/examples/44dfac5bc3131014e2c6bb1ebc76b33d.asciidoc index f65120117..eb7296c56 100644 --- a/docs/examples/44dfac5bc3131014e2c6bb1ebc76b33d.asciidoc +++ b/docs/examples/44dfac5bc3131014e2c6bb1ebc76b33d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/sort-search-results.asciidoc:144 +// search/search-your-data/sort-search-results.asciidoc:146 [source, python] ---- diff --git a/docs/examples/45954b8aaedfed57012be8b6538b0a24.asciidoc b/docs/examples/45954b8aaedfed57012be8b6538b0a24.asciidoc index 860441617..cdff938a9 100644 --- a/docs/examples/45954b8aaedfed57012be8b6538b0a24.asciidoc +++ b/docs/examples/45954b8aaedfed57012be8b6538b0a24.asciidoc @@ -1,47 +1,44 @@ // This file is autogenerated, DO NOT EDIT -// inference/chat-completion-inference.asciidoc:352 +// inference/chat-completion-inference.asciidoc:356 [source, python] ---- -resp = client.perform_request( - "POST", - "/_inference/chat_completion/openai-completion/_stream", - headers={"Content-Type": "application/json"}, - body={ - "messages": [ - { - "role": "user", - "content": [ - { - "type": "text", - "text": "What's the price of a scarf?" - } - ] - } - ], - "tools": [ - { - "type": "function", - "function": { - "name": "get_current_price", - "description": "Get the current price of a item", - "parameters": { - "type": "object", - "properties": { - "item": { - "id": "123" - } - } - } +resp = client.inference.stream_inference( + task_type="chat_completion", + inference_id="openai-completion", + messages=[ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "What's the price of a scarf?" } - } - ], - "tool_choice": { + ] + } + ], + tools=[ + { "type": "function", "function": { - "name": "get_current_price" + "name": "get_current_price", + "description": "Get the current price of a item", + "parameters": { + "type": "object", + "properties": { + "item": { + "id": "123" + } + } + } } } + ], + tool_choice={ + "type": "function", + "function": { + "name": "get_current_price" + } }, ) print(resp) diff --git a/docs/examples/46b771a9932c3fa6057a7b2679c72ef0.asciidoc b/docs/examples/46b771a9932c3fa6057a7b2679c72ef0.asciidoc new file mode 100644 index 000000000..f9bceeaba --- /dev/null +++ b/docs/examples/46b771a9932c3fa6057a7b2679c72ef0.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// migration/apis/data-stream-reindex.asciidoc:143 + +[source, python] +---- +resp = client.indices.get_migrate_reindex_status( + index="my-data-stream", +) +print(resp) +---- diff --git a/docs/examples/4982c547be1ad9455ae836990aea92c5.asciidoc b/docs/examples/4982c547be1ad9455ae836990aea92c5.asciidoc index c907beaf6..6cb70a9ca 100644 --- a/docs/examples/4982c547be1ad9455ae836990aea92c5.asciidoc +++ b/docs/examples/4982c547be1ad9455ae836990aea92c5.asciidoc @@ -6,6 +6,11 @@ resp = client.ml.start_trained_model_deployment( model_id="my_model", deployment_id="my_model_for_search", + adaptive_allocations={ + "enabled": True, + "min_number_of_allocations": 3, + "max_number_of_allocations": 10 + }, ) print(resp) ---- diff --git a/docs/examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc b/docs/examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc index 3a3a91764..b7583a76d 100644 --- a/docs/examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc +++ b/docs/examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc @@ -1,15 +1,12 @@ // This file is autogenerated, DO NOT EDIT -// inference/stream-inference.asciidoc:88 +// inference/stream-inference.asciidoc:92 [source, python] ---- -resp = client.perform_request( - "POST", - "/_inference/completion/openai-completion/_stream", - headers={"Content-Type": "application/json"}, - body={ - "input": "What is Elastic?" - }, +resp = client.inference.stream_inference( + task_type="completion", + inference_id="openai-completion", + input="What is Elastic?", ) print(resp) ---- diff --git a/docs/examples/4de4bb55bbc0a76c75d256f245a3ee3f.asciidoc b/docs/examples/4de4bb55bbc0a76c75d256f245a3ee3f.asciidoc new file mode 100644 index 000000000..11ed7d7dd --- /dev/null +++ b/docs/examples/4de4bb55bbc0a76c75d256f245a3ee3f.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// inference/elastic-infer-service.asciidoc:100 + +[source, python] +---- +resp = client.inference.put( + task_type="sparse_embedding", + inference_id="elser-model-eis", + inference_config={ + "service": "elastic", + "service_settings": { + "model_name": "elser" + } + }, +) +print(resp) +---- diff --git a/docs/examples/4f621ab694f62ddb89e0684a9e76c4d1.asciidoc b/docs/examples/4f621ab694f62ddb89e0684a9e76c4d1.asciidoc index 8e43ea56c..271540047 100644 --- a/docs/examples/4f621ab694f62ddb89e0684a9e76c4d1.asciidoc +++ b/docs/examples/4f621ab694f62ddb89e0684a9e76c4d1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/highlighting.asciidoc:574 +// search/search-your-data/highlighting.asciidoc:586 [source, python] ---- diff --git a/docs/examples/4ff2dcec03fe097075cf1d174a019a1f.asciidoc b/docs/examples/4ff2dcec03fe097075cf1d174a019a1f.asciidoc index ed884ed78..9e3334ae1 100644 --- a/docs/examples/4ff2dcec03fe097075cf1d174a019a1f.asciidoc +++ b/docs/examples/4ff2dcec03fe097075cf1d174a019a1f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/highlighting.asciidoc:709 +// search/search-your-data/highlighting.asciidoc:721 [source, python] ---- diff --git a/docs/examples/532ddf9afdcd0b1c9c0bb331e74d8df3.asciidoc b/docs/examples/532ddf9afdcd0b1c9c0bb331e74d8df3.asciidoc index d42b38962..238c865bc 100644 --- a/docs/examples/532ddf9afdcd0b1c9c0bb331e74d8df3.asciidoc +++ b/docs/examples/532ddf9afdcd0b1c9c0bb331e74d8df3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/sort-search-results.asciidoc:156 +// search/search-your-data/sort-search-results.asciidoc:158 [source, python] ---- diff --git a/docs/examples/537bce129338d9227bccb6a0283dab45.asciidoc b/docs/examples/537bce129338d9227bccb6a0283dab45.asciidoc new file mode 100644 index 000000000..2bae93faa --- /dev/null +++ b/docs/examples/537bce129338d9227bccb6a0283dab45.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// migration/apis/data-stream-reindex.asciidoc:232 + +[source, python] +---- +resp = client.cluster.put_settings( + persistent={ + "migrate.data_stream_reindex_max_request_per_second": 10000 + }, +) +print(resp) +---- diff --git a/docs/examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc b/docs/examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc index 871171fb6..d9f9291ff 100644 --- a/docs/examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc +++ b/docs/examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc @@ -3,15 +3,10 @@ [source, python] ---- -resp = client.perform_request( - "POST", - "/_security/oidc/prepare", - headers={"Content-Type": "application/json"}, - body={ - "realm": "oidc1", - "state": "lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO", - "nonce": "zOBXLJGUooRrbLbQk5YCcyC8AXw3iloynvluYhZ5" - }, +resp = client.security.oidc_prepare_authentication( + realm="oidc1", + state="lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO", + nonce="zOBXLJGUooRrbLbQk5YCcyC8AXw3iloynvluYhZ5", ) print(resp) ---- diff --git a/docs/examples/5969c446688c8b326acc80276573e9d2.asciidoc b/docs/examples/5969c446688c8b326acc80276573e9d2.asciidoc index fd1bb3e8e..b166e37cb 100644 --- a/docs/examples/5969c446688c8b326acc80276573e9d2.asciidoc +++ b/docs/examples/5969c446688c8b326acc80276573e9d2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/highlighting.asciidoc:312 +// search/search-your-data/highlighting.asciidoc:324 [source, python] ---- diff --git a/docs/examples/59aa5216630f80c5dc298fc5bba4a819.asciidoc b/docs/examples/59aa5216630f80c5dc298fc5bba4a819.asciidoc new file mode 100644 index 000000000..216f6cbbd --- /dev/null +++ b/docs/examples/59aa5216630f80c5dc298fc5bba4a819.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// migration/migrate_9_0.asciidoc:415 + +[source, python] +---- +resp = client.indices.get_settings( + index=".reindexed-v9-ml-anomalies-custom-example", +) +print(resp) +---- diff --git a/docs/examples/5b2a13366bd4e1ab4b25d04d360570dc.asciidoc b/docs/examples/5b2a13366bd4e1ab4b25d04d360570dc.asciidoc index 270362136..57c176743 100644 --- a/docs/examples/5b2a13366bd4e1ab4b25d04d360570dc.asciidoc +++ b/docs/examples/5b2a13366bd4e1ab4b25d04d360570dc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-component-template.asciidoc:261 +// indices/put-component-template.asciidoc:262 [source, python] ---- diff --git a/docs/examples/5bba213a7f543190139d1a69ab2ed076.asciidoc b/docs/examples/5bba213a7f543190139d1a69ab2ed076.asciidoc index 1bf8744d9..c9c33f40d 100644 --- a/docs/examples/5bba213a7f543190139d1a69ab2ed076.asciidoc +++ b/docs/examples/5bba213a7f543190139d1a69ab2ed076.asciidoc @@ -1,19 +1,12 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-across-clusters.asciidoc:296 +// esql/esql-across-clusters.asciidoc:302 [source, python] ---- -resp = client.perform_request( - "POST", - "/_query/async", - params={ - "format": "json" - }, - headers={"Content-Type": "application/json"}, - body={ - "query": "\n FROM cluster_one:my-index*,cluster_two:logs*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ", - "include_ccs_metadata": True - }, +resp = client.esql.async_query( + format="json", + query="\n FROM cluster_one:my-index*,cluster_two:logs*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ", + include_ccs_metadata=True, ) print(resp) ---- diff --git a/docs/examples/5ea9da129ca70a5fe534f27a82d80b29.asciidoc b/docs/examples/5ea9da129ca70a5fe534f27a82d80b29.asciidoc index a84be9f9e..eab3c0c2e 100644 --- a/docs/examples/5ea9da129ca70a5fe534f27a82d80b29.asciidoc +++ b/docs/examples/5ea9da129ca70a5fe534f27a82d80b29.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/highlighting.asciidoc:669 +// search/search-your-data/highlighting.asciidoc:681 [source, python] ---- diff --git a/docs/examples/5f3549ac7fee94682ca0d7439eebdd2a.asciidoc b/docs/examples/5f3549ac7fee94682ca0d7439eebdd2a.asciidoc index 7ce071786..895efc693 100644 --- a/docs/examples/5f3549ac7fee94682ca0d7439eebdd2a.asciidoc +++ b/docs/examples/5f3549ac7fee94682ca0d7439eebdd2a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/sort-search-results.asciidoc:233 +// search/search-your-data/sort-search-results.asciidoc:235 [source, python] ---- diff --git a/docs/examples/615dc36f0978c676624fb7d1144b4899.asciidoc b/docs/examples/615dc36f0978c676624fb7d1144b4899.asciidoc new file mode 100644 index 000000000..878cf4851 --- /dev/null +++ b/docs/examples/615dc36f0978c676624fb7d1144b4899.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc:69 + +[source, python] +---- +resp = client.indices.get_data_lifecycle_stats( + human=True, + pretty=True, +) +print(resp) +---- diff --git a/docs/examples/642161d70dacf7d153767d37d3726838.asciidoc b/docs/examples/642161d70dacf7d153767d37d3726838.asciidoc index a7c877f17..a6acd8d13 100644 --- a/docs/examples/642161d70dacf7d153767d37d3726838.asciidoc +++ b/docs/examples/642161d70dacf7d153767d37d3726838.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/rollup-index-caps.asciidoc:169 +// rollup/apis/rollup-index-caps.asciidoc:171 [source, python] ---- diff --git a/docs/examples/650a0fb27c66a790c4687267423af1da.asciidoc b/docs/examples/650a0fb27c66a790c4687267423af1da.asciidoc index 8bf376bd6..9a7d80337 100644 --- a/docs/examples/650a0fb27c66a790c4687267423af1da.asciidoc +++ b/docs/examples/650a0fb27c66a790c4687267423af1da.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// alias.asciidoc:102 +// alias.asciidoc:104 [source, python] ---- diff --git a/docs/examples/6636701d31b0c9eb8316f1f8e99cc918.asciidoc b/docs/examples/6636701d31b0c9eb8316f1f8e99cc918.asciidoc index afbe3f43f..fffecee75 100644 --- a/docs/examples/6636701d31b0c9eb8316f1f8e99cc918.asciidoc +++ b/docs/examples/6636701d31b0c9eb8316f1f8e99cc918.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// aggregations/metrics/scripted-metric-aggregation.asciidoc:17 +// aggregations/metrics/scripted-metric-aggregation.asciidoc:19 [source, python] ---- diff --git a/docs/examples/666c420fe61fa122386da3c356a64943.asciidoc b/docs/examples/666c420fe61fa122386da3c356a64943.asciidoc index 4ccb7c58f..484d647be 100644 --- a/docs/examples/666c420fe61fa122386da3c356a64943.asciidoc +++ b/docs/examples/666c420fe61fa122386da3c356a64943.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/sort-search-results.asciidoc:600 +// search/search-your-data/sort-search-results.asciidoc:602 [source, python] ---- diff --git a/docs/examples/66915e95b723ee2f6e5164a94b8f98c1.asciidoc b/docs/examples/66915e95b723ee2f6e5164a94b8f98c1.asciidoc new file mode 100644 index 000000000..61d7fc158 --- /dev/null +++ b/docs/examples/66915e95b723ee2f6e5164a94b8f98c1.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// migration/apis/create-index-from-source.asciidoc:85 + +[source, python] +---- +resp = client.indices.create_from( + source="my-index", + dest="my-new-index", + create_from=None, +) +print(resp) +---- diff --git a/docs/examples/67b71a95b6fe6c83faae51ea038a1bf1.asciidoc b/docs/examples/67b71a95b6fe6c83faae51ea038a1bf1.asciidoc new file mode 100644 index 000000000..f0c9152d8 --- /dev/null +++ b/docs/examples/67b71a95b6fe6c83faae51ea038a1bf1.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// esql/esql-rest.asciidoc:407 + +[source, python] +---- +resp = client.esql.async_query_delete( + id="FmdMX2pIang3UWhLRU5QS0lqdlppYncaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQToxOTI=", +) +print(resp) +---- diff --git a/docs/examples/69541f0bb81ab3797926bb2a00607cda.asciidoc b/docs/examples/69541f0bb81ab3797926bb2a00607cda.asciidoc index 88e55f89c..bd34b9be6 100644 --- a/docs/examples/69541f0bb81ab3797926bb2a00607cda.asciidoc +++ b/docs/examples/69541f0bb81ab3797926bb2a00607cda.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:703 +// search/retriever.asciidoc:748 [source, python] ---- diff --git a/docs/examples/698e0a2b67ba7842caa801d9ef46ebe3.asciidoc b/docs/examples/698e0a2b67ba7842caa801d9ef46ebe3.asciidoc index b164965d1..e6bfa74d3 100644 --- a/docs/examples/698e0a2b67ba7842caa801d9ef46ebe3.asciidoc +++ b/docs/examples/698e0a2b67ba7842caa801d9ef46ebe3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/highlighting.asciidoc:499 +// search/search-your-data/highlighting.asciidoc:511 [source, python] ---- diff --git a/docs/examples/69d5710bdec73041c66f21d5f96637e8.asciidoc b/docs/examples/69d5710bdec73041c66f21d5f96637e8.asciidoc index ca1d05204..cfe77f4c1 100644 --- a/docs/examples/69d5710bdec73041c66f21d5f96637e8.asciidoc +++ b/docs/examples/69d5710bdec73041c66f21d5f96637e8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/sort-search-results.asciidoc:214 +// search/search-your-data/sort-search-results.asciidoc:216 [source, python] ---- diff --git a/docs/examples/d29031409016b2b798148ef173a196ae.asciidoc b/docs/examples/6baf72c04d48cb04c2f8be609ff3b3b5.asciidoc similarity index 73% rename from docs/examples/d29031409016b2b798148ef173a196ae.asciidoc rename to docs/examples/6baf72c04d48cb04c2f8be609ff3b3b5.asciidoc index 9a2275025..cd4a70809 100644 --- a/docs/examples/d29031409016b2b798148ef173a196ae.asciidoc +++ b/docs/examples/6baf72c04d48cb04c2f8be609ff3b3b5.asciidoc @@ -3,17 +3,16 @@ [source, python] ---- -resp = client.indices.create( +resp = client.search( index="test-index", query={ - "semantic": { - "field": "my_semantic_field" + "match": { + "my_semantic_field": "Which country is Paris in?" } }, highlight={ "fields": { "my_semantic_field": { - "type": "semantic", "number_of_fragments": 2, "order": "score" } diff --git a/docs/examples/6ce8334def48552ba7d44025580d9105.asciidoc b/docs/examples/6ce8334def48552ba7d44025580d9105.asciidoc index 68fce2f90..3aca2af6a 100644 --- a/docs/examples/6ce8334def48552ba7d44025580d9105.asciidoc +++ b/docs/examples/6ce8334def48552ba7d44025580d9105.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// alias.asciidoc:240 +// alias.asciidoc:242 [source, python] ---- diff --git a/docs/examples/6dcd3916679f6aa64f79524c75991ebd.asciidoc b/docs/examples/6dcd3916679f6aa64f79524c75991ebd.asciidoc index 3e1bbdaa0..3b72c1464 100644 --- a/docs/examples/6dcd3916679f6aa64f79524c75991ebd.asciidoc +++ b/docs/examples/6dcd3916679f6aa64f79524c75991ebd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-rest.asciidoc:247 +// esql/esql-rest.asciidoc:248 [source, python] ---- diff --git a/docs/examples/6e498b9dc753b94abf2618c407fa5cd8.asciidoc b/docs/examples/6e498b9dc753b94abf2618c407fa5cd8.asciidoc new file mode 100644 index 000000000..265795311 --- /dev/null +++ b/docs/examples/6e498b9dc753b94abf2618c407fa5cd8.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// migration/migrate_9_0.asciidoc:453 + +[source, python] +---- +resp = client.reindex( + wait_for_completion=False, + source={ + "index": ".ml-anomalies-custom-example" + }, + dest={ + "index": ".reindexed-v9-ml-anomalies-custom-example" + }, +) +print(resp) +---- diff --git a/docs/examples/6e6b78e6b689a5d6aa637271b6d084e2.asciidoc b/docs/examples/6e6b78e6b689a5d6aa637271b6d084e2.asciidoc index f0612e3b4..14522e393 100644 --- a/docs/examples/6e6b78e6b689a5d6aa637271b6d084e2.asciidoc +++ b/docs/examples/6e6b78e6b689a5d6aa637271b6d084e2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:340 +// search/retriever.asciidoc:363 [source, python] ---- diff --git a/docs/examples/6ea062455229151e311869a81ee40252.asciidoc b/docs/examples/6ea062455229151e311869a81ee40252.asciidoc index 95f3a9047..7b0addc75 100644 --- a/docs/examples/6ea062455229151e311869a81ee40252.asciidoc +++ b/docs/examples/6ea062455229151e311869a81ee40252.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/search-multiple-indices.asciidoc:49 +// search/search-your-data/search-multiple-indices.asciidoc:83 [source, python] ---- diff --git a/docs/examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc b/docs/examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc index 4ffc08b07..cef497432 100644 --- a/docs/examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc +++ b/docs/examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc @@ -3,16 +3,11 @@ [source, python] ---- -resp = client.perform_request( - "POST", - "/_security/api_key/_bulk_update", - headers={"Content-Type": "application/json"}, - body={ - "ids": [ - "VuaCfGcBCdbkQm-e5aOx", - "H3_AhoIBA9hmeQJdg7ij" - ] - }, +resp = client.security.bulk_update_api_keys( + ids=[ + "VuaCfGcBCdbkQm-e5aOx", + "H3_AhoIBA9hmeQJdg7ij" + ], ) print(resp) ---- diff --git a/docs/examples/73d1a6c5ef90b7e35d43a0bfdc1e158d.asciidoc b/docs/examples/73d1a6c5ef90b7e35d43a0bfdc1e158d.asciidoc index 41b4fc221..db79aaa94 100644 --- a/docs/examples/73d1a6c5ef90b7e35d43a0bfdc1e158d.asciidoc +++ b/docs/examples/73d1a6c5ef90b7e35d43a0bfdc1e158d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/rollup-index-caps.asciidoc:94 +// rollup/apis/rollup-index-caps.asciidoc:95 [source, python] ---- diff --git a/docs/examples/744aeb2af40f519e430e21e004e3c3b7.asciidoc b/docs/examples/744aeb2af40f519e430e21e004e3c3b7.asciidoc index 73bd36aa9..952a42b78 100644 --- a/docs/examples/744aeb2af40f519e430e21e004e3c3b7.asciidoc +++ b/docs/examples/744aeb2af40f519e430e21e004e3c3b7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/multivalued-fields.asciidoc:97 +// esql/multivalued-fields.asciidoc:99 [source, python] ---- diff --git a/docs/examples/75e360d03fb416f0a65ca37c662c2e9c.asciidoc b/docs/examples/75e360d03fb416f0a65ca37c662c2e9c.asciidoc index 4cccf15fc..2e00f18c9 100644 --- a/docs/examples/75e360d03fb416f0a65ca37c662c2e9c.asciidoc +++ b/docs/examples/75e360d03fb416f0a65ca37c662c2e9c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// aggregations/metrics/scripted-metric-aggregation.asciidoc:157 +// aggregations/metrics/scripted-metric-aggregation.asciidoc:159 [source, python] ---- diff --git a/docs/examples/75e6d66e94e61bd8a555beaaee255c36.asciidoc b/docs/examples/75e6d66e94e61bd8a555beaaee255c36.asciidoc index f1e5b8201..bfb1cadf1 100644 --- a/docs/examples/75e6d66e94e61bd8a555beaaee255c36.asciidoc +++ b/docs/examples/75e6d66e94e61bd8a555beaaee255c36.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/rollup-search.asciidoc:176 +// rollup/apis/rollup-search.asciidoc:178 [source, python] ---- diff --git a/docs/examples/76e02434835630cb830724beb92df354.asciidoc b/docs/examples/76e02434835630cb830724beb92df354.asciidoc index 886da7d4d..cc7602fe4 100644 --- a/docs/examples/76e02434835630cb830724beb92df354.asciidoc +++ b/docs/examples/76e02434835630cb830724beb92df354.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-examples.asciidoc:1183 +// search/search-your-data/retrievers-examples.asciidoc:1433 [source, python] ---- diff --git a/docs/examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc b/docs/examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc index f62ff2428..f68073684 100644 --- a/docs/examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc +++ b/docs/examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc @@ -3,36 +3,31 @@ [source, python] ---- -resp = client.perform_request( - "POST", - "/_text_structure/find_message_structure", - headers={"Content-Type": "application/json"}, - body={ - "messages": [ - "[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128", - "[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]", - "[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService ] [laptop] loaded module [rest-root]", - "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-core]", - "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-redact]", - "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [ingest-user-agent]", - "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-monitoring]", - "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-s3]", - "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-analytics]", - "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-ent-search]", - "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-autoscaling]", - "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-painless]]", - "[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-expression]", - "[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-eql]", - "[2024-03-05T10:52:43,291][INFO ][o.e.e.NodeEnvironment ] [laptop] heap size [16gb], compressed ordinary object pointers [true]", - "[2024-03-05T10:52:46,098][INFO ][o.e.x.s.Security ] [laptop] Security is enabled", - "[2024-03-05T10:52:47,227][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] Profiling is enabled", - "[2024-03-05T10:52:47,259][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] profiling index templates will not be installed or reinstalled", - "[2024-03-05T10:52:47,755][INFO ][o.e.i.r.RecoverySettings ] [laptop] using rate limit [40mb] with [default=40mb, read=0b, write=0b, max=0b]", - "[2024-03-05T10:52:47,787][INFO ][o.e.d.DiscoveryModule ] [laptop] using discovery type [multi-node] and seed hosts providers [settings]", - "[2024-03-05T10:52:49,188][INFO ][o.e.n.Node ] [laptop] initialized", - "[2024-03-05T10:52:49,199][INFO ][o.e.n.Node ] [laptop] starting ..." - ] - }, +resp = client.text_structure.find_message_structure( + messages=[ + "[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128", + "[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]", + "[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService ] [laptop] loaded module [rest-root]", + "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-core]", + "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-redact]", + "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [ingest-user-agent]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-monitoring]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-s3]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-analytics]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-ent-search]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-autoscaling]", + "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-painless]]", + "[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-expression]", + "[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-eql]", + "[2024-03-05T10:52:43,291][INFO ][o.e.e.NodeEnvironment ] [laptop] heap size [16gb], compressed ordinary object pointers [true]", + "[2024-03-05T10:52:46,098][INFO ][o.e.x.s.Security ] [laptop] Security is enabled", + "[2024-03-05T10:52:47,227][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] Profiling is enabled", + "[2024-03-05T10:52:47,259][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] profiling index templates will not be installed or reinstalled", + "[2024-03-05T10:52:47,755][INFO ][o.e.i.r.RecoverySettings ] [laptop] using rate limit [40mb] with [default=40mb, read=0b, write=0b, max=0b]", + "[2024-03-05T10:52:47,787][INFO ][o.e.d.DiscoveryModule ] [laptop] using discovery type [multi-node] and seed hosts providers [settings]", + "[2024-03-05T10:52:49,188][INFO ][o.e.n.Node ] [laptop] initialized", + "[2024-03-05T10:52:49,199][INFO ][o.e.n.Node ] [laptop] starting ..." + ], ) print(resp) ---- diff --git a/docs/examples/78043831fd32004a82930c8ac8a1d809.asciidoc b/docs/examples/78043831fd32004a82930c8ac8a1d809.asciidoc index f2b32c69a..5a0d6d76f 100644 --- a/docs/examples/78043831fd32004a82930c8ac8a1d809.asciidoc +++ b/docs/examples/78043831fd32004a82930c8ac8a1d809.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-examples.asciidoc:1128 +// search/search-your-data/retrievers-examples.asciidoc:1378 [source, python] ---- diff --git a/docs/examples/78e20b4cff470ed7357de1fd74bcfeb7.asciidoc b/docs/examples/78e20b4cff470ed7357de1fd74bcfeb7.asciidoc index b14f14162..ff8f36636 100644 --- a/docs/examples/78e20b4cff470ed7357de1fd74bcfeb7.asciidoc +++ b/docs/examples/78e20b4cff470ed7357de1fd74bcfeb7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// alias.asciidoc:135 +// alias.asciidoc:137 [source, python] ---- diff --git a/docs/examples/79d206a528be704050a437adce2496dd.asciidoc b/docs/examples/79d206a528be704050a437adce2496dd.asciidoc index 25b3aae32..0a1f7c9ca 100644 --- a/docs/examples/79d206a528be704050a437adce2496dd.asciidoc +++ b/docs/examples/79d206a528be704050a437adce2496dd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:584 +// search/retriever.asciidoc:629 [source, python] ---- diff --git a/docs/examples/7b5c231526846f2f7b98d78f3656ae6a.asciidoc b/docs/examples/7b5c231526846f2f7b98d78f3656ae6a.asciidoc index 62de86215..2127f437f 100644 --- a/docs/examples/7b5c231526846f2f7b98d78f3656ae6a.asciidoc +++ b/docs/examples/7b5c231526846f2f7b98d78f3656ae6a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update.asciidoc:368 +// docs/update.asciidoc:364 [source, python] ---- diff --git a/docs/examples/7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc b/docs/examples/7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc index 1ed294e9c..aff1144dd 100644 --- a/docs/examples/7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc +++ b/docs/examples/7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc @@ -3,9 +3,8 @@ [source, python] ---- -resp = client.perform_request( - "DELETE", - "/_ingest/ip_location/database/my-database-id", +resp = client.ingest.delete_ip_location_database( + id="my-database-id", ) print(resp) ---- diff --git a/docs/examples/7bdc283b96c7a965fae23013647b8578.asciidoc b/docs/examples/7bdc283b96c7a965fae23013647b8578.asciidoc index b71f4fa2e..2bc21bbf0 100644 --- a/docs/examples/7bdc283b96c7a965fae23013647b8578.asciidoc +++ b/docs/examples/7bdc283b96c7a965fae23013647b8578.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/semantic-text.asciidoc:194 +// mapping/types/semantic-text.asciidoc:220 [source, python] ---- diff --git a/docs/examples/7daff6b7e668ab8a762b8ab5dff7a167.asciidoc b/docs/examples/7daff6b7e668ab8a762b8ab5dff7a167.asciidoc index 9b796b318..75048641a 100644 --- a/docs/examples/7daff6b7e668ab8a762b8ab5dff7a167.asciidoc +++ b/docs/examples/7daff6b7e668ab8a762b8ab5dff7a167.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/sparse-vector-query.asciidoc:257 +// query-dsl/sparse-vector-query.asciidoc:260 [source, python] ---- diff --git a/docs/examples/7f2d511cb64743c006225e5933a14bb4.asciidoc b/docs/examples/7f2d511cb64743c006225e5933a14bb4.asciidoc index 4412a2a08..60a3269ff 100644 --- a/docs/examples/7f2d511cb64743c006225e5933a14bb4.asciidoc +++ b/docs/examples/7f2d511cb64743c006225e5933a14bb4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-across-clusters.asciidoc:67 +// esql/esql-across-clusters.asciidoc:69 [source, python] ---- diff --git a/docs/examples/7fde3ff91c4a2e7080444af37d5cd287.asciidoc b/docs/examples/7fde3ff91c4a2e7080444af37d5cd287.asciidoc index 0c25a55f4..649738ffe 100644 --- a/docs/examples/7fde3ff91c4a2e7080444af37d5cd287.asciidoc +++ b/docs/examples/7fde3ff91c4a2e7080444af37d5cd287.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-rest.asciidoc:288 +// esql/esql-rest.asciidoc:289 [source, python] ---- diff --git a/docs/examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc b/docs/examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc index 702838d56..6a24cfa16 100644 --- a/docs/examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc +++ b/docs/examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc @@ -3,40 +3,35 @@ [source, python] ---- -resp = client.perform_request( - "POST", - "/_security/api_key/_bulk_update", - headers={"Content-Type": "application/json"}, - body={ - "ids": [ - "VuaCfGcBCdbkQm-e5aOx", - "H3_AhoIBA9hmeQJdg7ij" - ], - "role_descriptors": { - "role-a": { - "indices": [ - { - "names": [ - "*" - ], - "privileges": [ - "write" - ] - } - ] - } - }, - "metadata": { - "environment": { - "level": 2, - "trusted": True, - "tags": [ - "production" - ] - } - }, - "expiration": "30d" +resp = client.security.bulk_update_api_keys( + ids=[ + "VuaCfGcBCdbkQm-e5aOx", + "H3_AhoIBA9hmeQJdg7ij" + ], + role_descriptors={ + "role-a": { + "indices": [ + { + "names": [ + "*" + ], + "privileges": [ + "write" + ] + } + ] + } }, + metadata={ + "environment": { + "level": 2, + "trusted": True, + "tags": [ + "production" + ] + } + }, + expiration="30d", ) print(resp) ---- diff --git a/docs/examples/828f0045747fde4888a947bb99e190e3.asciidoc b/docs/examples/828f0045747fde4888a947bb99e190e3.asciidoc index 5e62a1545..b0c0af2a7 100644 --- a/docs/examples/828f0045747fde4888a947bb99e190e3.asciidoc +++ b/docs/examples/828f0045747fde4888a947bb99e190e3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:792 +// search/retriever.asciidoc:837 [source, python] ---- diff --git a/docs/examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc b/docs/examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc index f5e9f0231..8bbb6682c 100644 --- a/docs/examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc +++ b/docs/examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc @@ -1,35 +1,32 @@ // This file is autogenerated, DO NOT EDIT -// inference/chat-completion-inference.asciidoc:318 +// inference/chat-completion-inference.asciidoc:322 [source, python] ---- -resp = client.perform_request( - "POST", - "/_inference/chat_completion/openai-completion/_stream", - headers={"Content-Type": "application/json"}, - body={ - "messages": [ - { - "role": "assistant", - "content": "Let's find out what the weather is", - "tool_calls": [ - { - "id": "call_KcAjWtAww20AihPHphUh46Gd", - "type": "function", - "function": { - "name": "get_current_weather", - "arguments": "{\"location\":\"Boston, MA\"}" - } +resp = client.inference.stream_inference( + task_type="chat_completion", + inference_id="openai-completion", + messages=[ + { + "role": "assistant", + "content": "Let's find out what the weather is", + "tool_calls": [ + { + "id": "call_KcAjWtAww20AihPHphUh46Gd", + "type": "function", + "function": { + "name": "get_current_weather", + "arguments": "{\"location\":\"Boston, MA\"}" } - ] - }, - { - "role": "tool", - "content": "The weather is cold", - "tool_call_id": "call_KcAjWtAww20AihPHphUh46Gd" - } - ] - }, + } + ] + }, + { + "role": "tool", + "content": "The weather is cold", + "tool_call_id": "call_KcAjWtAww20AihPHphUh46Gd" + } + ], ) print(resp) ---- diff --git a/docs/examples/84465de841fe5c6099a0382f786f2cb8.asciidoc b/docs/examples/84465de841fe5c6099a0382f786f2cb8.asciidoc index e94fea979..733c7f6c0 100644 --- a/docs/examples/84465de841fe5c6099a0382f786f2cb8.asciidoc +++ b/docs/examples/84465de841fe5c6099a0382f786f2cb8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// alias.asciidoc:74 +// alias.asciidoc:76 [source, python] ---- diff --git a/docs/examples/853fc710cea79fb4e1a85fb6d149f9c5.asciidoc b/docs/examples/853fc710cea79fb4e1a85fb6d149f9c5.asciidoc index e41858318..12888ed06 100644 --- a/docs/examples/853fc710cea79fb4e1a85fb6d149f9c5.asciidoc +++ b/docs/examples/853fc710cea79fb4e1a85fb6d149f9c5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:831 +// search/retriever.asciidoc:876 [source, python] ---- diff --git a/docs/examples/8621c05cc7cf3880bde751f6670a0c3a.asciidoc b/docs/examples/8621c05cc7cf3880bde751f6670a0c3a.asciidoc new file mode 100644 index 000000000..d4812b0ab --- /dev/null +++ b/docs/examples/8621c05cc7cf3880bde751f6670a0c3a.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// migration/migrate_9_0.asciidoc:439 + +[source, python] +---- +resp = client.indices.put_settings( + index=".reindexed-v9-ml-anomalies-custom-example", + settings={ + "index": { + "number_of_replicas": 0 + } + }, +) +print(resp) +---- diff --git a/docs/examples/87457bb3467484bec3e9df4e25942ba6.asciidoc b/docs/examples/87457bb3467484bec3e9df4e25942ba6.asciidoc index 484399a22..02f191745 100644 --- a/docs/examples/87457bb3467484bec3e9df4e25942ba6.asciidoc +++ b/docs/examples/87457bb3467484bec3e9df4e25942ba6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/multivalued-fields.asciidoc:269 +// esql/multivalued-fields.asciidoc:275 [source, python] ---- diff --git a/docs/examples/88b19973b970adf9b73fca82017d4951.asciidoc b/docs/examples/88b19973b970adf9b73fca82017d4951.asciidoc index bb844e3be..9c389814b 100644 --- a/docs/examples/88b19973b970adf9b73fca82017d4951.asciidoc +++ b/docs/examples/88b19973b970adf9b73fca82017d4951.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/search-multiple-indices.asciidoc:30 +// search/search-your-data/search-multiple-indices.asciidoc:36 [source, python] ---- diff --git a/docs/examples/89f547649895176c246bb8c41313ff21.asciidoc b/docs/examples/89f547649895176c246bb8c41313ff21.asciidoc new file mode 100644 index 000000000..58b50c14d --- /dev/null +++ b/docs/examples/89f547649895176c246bb8c41313ff21.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// esql/esql-syntax.asciidoc:202 + +[source, python] +---- +resp = client.esql.query( + query="\nFROM library\n| EVAL year = DATE_EXTRACT(\"year\", release_date)\n| WHERE page_count > ? AND match(author, ?, {\"minimum_should_match\": ?})\n| LIMIT 5\n", + params=[ + 300, + "Frank Herbert", + 2 + ], +) +print(resp) +---- diff --git a/docs/examples/8a1b6eae4893c5dd27b3d81fd8d70f5b.asciidoc b/docs/examples/8a1b6eae4893c5dd27b3d81fd8d70f5b.asciidoc index 8a6c13fbf..c46aba59b 100644 --- a/docs/examples/8a1b6eae4893c5dd27b3d81fd8d70f5b.asciidoc +++ b/docs/examples/8a1b6eae4893c5dd27b3d81fd8d70f5b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/semantic-search-elser.asciidoc:157 +// migration/migrate_9_0.asciidoc:467 [source, python] ---- diff --git a/docs/examples/8b144b3eb20872595fd7cbc6c245c7c8.asciidoc b/docs/examples/8b144b3eb20872595fd7cbc6c245c7c8.asciidoc deleted file mode 100644 index 8573b7472..000000000 --- a/docs/examples/8b144b3eb20872595fd7cbc6c245c7c8.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// rest-api/security/query-role.asciidoc:133 - -[source, python] ----- -resp = client.security.query_role( - sort=[ - "name" - ], -) -print(resp) ----- diff --git a/docs/examples/8c47c80139f40f25db44f5781ca2dfbe.asciidoc b/docs/examples/8c47c80139f40f25db44f5781ca2dfbe.asciidoc new file mode 100644 index 000000000..8503017e7 --- /dev/null +++ b/docs/examples/8c47c80139f40f25db44f5781ca2dfbe.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// migration/migrate_9_0.asciidoc:491 + +[source, python] +---- +resp = client.indices.get_alias( + index=".ml-anomalies-custom-example", +) +print(resp) +---- diff --git a/docs/examples/9169d19a80175ec94f80865d0f9bef4c.asciidoc b/docs/examples/9169d19a80175ec94f80865d0f9bef4c.asciidoc index 6e0eedc2a..850788ba4 100644 --- a/docs/examples/9169d19a80175ec94f80865d0f9bef4c.asciidoc +++ b/docs/examples/9169d19a80175ec94f80865d0f9bef4c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:291 +// search/retriever.asciidoc:314 [source, python] ---- diff --git a/docs/examples/91e106a2affbc8df32cd940684a779ed.asciidoc b/docs/examples/91e106a2affbc8df32cd940684a779ed.asciidoc index acc46409b..fa050f564 100644 --- a/docs/examples/91e106a2affbc8df32cd940684a779ed.asciidoc +++ b/docs/examples/91e106a2affbc8df32cd940684a779ed.asciidoc @@ -3,11 +3,9 @@ [source, python] ---- -resp = client.perform_request( - "PUT", - "/_ingest/ip_location/database/my-database-1", - headers={"Content-Type": "application/json"}, - body={ +resp = client.ingest.put_ip_location_database( + id="my-database-1", + configuration={ "name": "GeoIP2-Domain", "maxmind": { "account_id": "1234567" diff --git a/docs/examples/927b20a221f975b75d1227b67d0eb7e2.asciidoc b/docs/examples/927b20a221f975b75d1227b67d0eb7e2.asciidoc index 5983ceb0a..5ba6e11be 100644 --- a/docs/examples/927b20a221f975b75d1227b67d0eb7e2.asciidoc +++ b/docs/examples/927b20a221f975b75d1227b67d0eb7e2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-rest.asciidoc:267 +// esql/esql-rest.asciidoc:268 [source, python] ---- diff --git a/docs/examples/944806221eb89f5af2298ccdf2902277.asciidoc b/docs/examples/944806221eb89f5af2298ccdf2902277.asciidoc index be677f846..896a4c10f 100644 --- a/docs/examples/944806221eb89f5af2298ccdf2902277.asciidoc +++ b/docs/examples/944806221eb89f5af2298ccdf2902277.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/rollup-caps.asciidoc:169 +// rollup/apis/rollup-caps.asciidoc:171 [source, python] ---- diff --git a/docs/examples/947efe87db7f8813c0878f8affc3e2d1.asciidoc b/docs/examples/947efe87db7f8813c0878f8affc3e2d1.asciidoc new file mode 100644 index 000000000..43dfda185 --- /dev/null +++ b/docs/examples/947efe87db7f8813c0878f8affc3e2d1.asciidoc @@ -0,0 +1,8 @@ +// This file is autogenerated, DO NOT EDIT +// indices/resolve-cluster.asciidoc:83 + +[source, python] +---- +resp = client.indices.resolve_cluster() +print(resp) +---- diff --git a/docs/examples/948418e0ef1b7e7cfee2f11be715d7d2.asciidoc b/docs/examples/948418e0ef1b7e7cfee2f11be715d7d2.asciidoc index 3abdfaedb..700fe6f3a 100644 --- a/docs/examples/948418e0ef1b7e7cfee2f11be715d7d2.asciidoc +++ b/docs/examples/948418e0ef1b7e7cfee2f11be715d7d2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-examples.asciidoc:465 +// search/search-your-data/retrievers-examples.asciidoc:715 [source, python] ---- diff --git a/docs/examples/95c1b376652533c352bbf793c74d1b08.asciidoc b/docs/examples/95c1b376652533c352bbf793c74d1b08.asciidoc index a0778d4b3..273d0eb91 100644 --- a/docs/examples/95c1b376652533c352bbf793c74d1b08.asciidoc +++ b/docs/examples/95c1b376652533c352bbf793c74d1b08.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/query-role.asciidoc:233 +// rest-api/security/query-role.asciidoc:247 [source, python] ---- diff --git a/docs/examples/971fd23adb81bb5842c7750e0379336a.asciidoc b/docs/examples/971fd23adb81bb5842c7750e0379336a.asciidoc index 502b26032..a7eb8de57 100644 --- a/docs/examples/971fd23adb81bb5842c7750e0379336a.asciidoc +++ b/docs/examples/971fd23adb81bb5842c7750e0379336a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:719 +// search/retriever.asciidoc:764 [source, python] ---- diff --git a/docs/examples/97c6c07f46f4177f0565a04bc50924a3.asciidoc b/docs/examples/97c6c07f46f4177f0565a04bc50924a3.asciidoc index d6d19299d..35f2440da 100644 --- a/docs/examples/97c6c07f46f4177f0565a04bc50924a3.asciidoc +++ b/docs/examples/97c6c07f46f4177f0565a04bc50924a3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-examples.asciidoc:105 +// search/search-your-data/retrievers-examples.asciidoc:113 [source, python] ---- diff --git a/docs/examples/991b9ba53f0eccec8ec5a42f8d9b655c.asciidoc b/docs/examples/991b9ba53f0eccec8ec5a42f8d9b655c.asciidoc index 4129bbe4c..04eac66eb 100644 --- a/docs/examples/991b9ba53f0eccec8ec5a42f8d9b655c.asciidoc +++ b/docs/examples/991b9ba53f0eccec8ec5a42f8d9b655c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/highlighting.asciidoc:616 +// search/search-your-data/highlighting.asciidoc:628 [source, python] ---- diff --git a/docs/examples/99fb82d49ac477e6a9dfdd71f9465374.asciidoc b/docs/examples/99fb82d49ac477e6a9dfdd71f9465374.asciidoc index df9b99ed6..97135e772 100644 --- a/docs/examples/99fb82d49ac477e6a9dfdd71f9465374.asciidoc +++ b/docs/examples/99fb82d49ac477e6a9dfdd71f9465374.asciidoc @@ -3,9 +3,8 @@ [source, python] ---- -resp = client.perform_request( - "DELETE", - "/_ingest/ip_location/database/example-database-id", +resp = client.ingest.delete_ip_location_database( + id="example-database-id", ) print(resp) ---- diff --git a/docs/examples/9afa0844883b7471883aa378a8dd10b4.asciidoc b/docs/examples/9afa0844883b7471883aa378a8dd10b4.asciidoc index b8c42b55b..331a8522d 100644 --- a/docs/examples/9afa0844883b7471883aa378a8dd10b4.asciidoc +++ b/docs/examples/9afa0844883b7471883aa378a8dd10b4.asciidoc @@ -3,11 +3,10 @@ [source, python] ---- -resp = client.perform_request( - "POST", - "/_application/analytics/my_analytics_collection/event/search_click", - headers={"Content-Type": "application/json"}, - body={ +resp = client.search_application.post_behavioral_analytics_event( + collection_name="my_analytics_collection", + event_type="search_click", + payload={ "session": { "id": "1797ca95-91c9-4e2e-b1bd-9c38e6f386a9" }, diff --git a/docs/examples/9beb260834f8cfb240f6308950dbb9c2.asciidoc b/docs/examples/9beb260834f8cfb240f6308950dbb9c2.asciidoc index 4e029bc94..aaa229972 100644 --- a/docs/examples/9beb260834f8cfb240f6308950dbb9c2.asciidoc +++ b/docs/examples/9beb260834f8cfb240f6308950dbb9c2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/sort-search-results.asciidoc:521 +// search/search-your-data/sort-search-results.asciidoc:523 [source, python] ---- diff --git a/docs/examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc b/docs/examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc index 227cb5a9e..425a0e6c8 100644 --- a/docs/examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc +++ b/docs/examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc @@ -3,16 +3,11 @@ [source, python] ---- -resp = client.perform_request( - "POST", - "/_security/oidc/authenticate", - headers={"Content-Type": "application/json"}, - body={ - "redirect_uri": "https://oidc-kibana.elastic.co:5603/api/security/oidc/callback?code=jtI3Ntt8v3_XvcLzCFGq&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", - "state": "4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", - "nonce": "WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM", - "realm": "oidc1" - }, +resp = client.security.oidc_authenticate( + redirect_uri="https://oidc-kibana.elastic.co:5603/api/security/oidc/callback?code=jtI3Ntt8v3_XvcLzCFGq&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", + state="4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", + nonce="WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM", + realm="oidc1", ) print(resp) ---- diff --git a/docs/examples/9de4edafd22a8b9cb557632b2c8779cd.asciidoc b/docs/examples/9de4edafd22a8b9cb557632b2c8779cd.asciidoc index f0d7cb7d0..435e9f608 100644 --- a/docs/examples/9de4edafd22a8b9cb557632b2c8779cd.asciidoc +++ b/docs/examples/9de4edafd22a8b9cb557632b2c8779cd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-rest.asciidoc:308 +// esql/esql-rest.asciidoc:309 [source, python] ---- diff --git a/docs/examples/a00311843b5f8f3e9f7d511334a828b1.asciidoc b/docs/examples/a00311843b5f8f3e9f7d511334a828b1.asciidoc index 5802d6251..9d5f860ed 100644 --- a/docs/examples/a00311843b5f8f3e9f7d511334a828b1.asciidoc +++ b/docs/examples/a00311843b5f8f3e9f7d511334a828b1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/rollup-caps.asciidoc:97 +// rollup/apis/rollup-caps.asciidoc:98 [source, python] ---- diff --git a/docs/examples/a162eb50853331c80596f5994e9d1c38.asciidoc b/docs/examples/a162eb50853331c80596f5994e9d1c38.asciidoc index f8772be64..407bfdda7 100644 --- a/docs/examples/a162eb50853331c80596f5994e9d1c38.asciidoc +++ b/docs/examples/a162eb50853331c80596f5994e9d1c38.asciidoc @@ -3,14 +3,10 @@ [source, python] ---- -resp = client.perform_request( - "POST", - "/_application/search_application/my_search_application/_render_query", - headers={"Content-Type": "application/json"}, - body={ - "params": { - "query_string": "rock climbing" - } +resp = client.search_application.render_query( + name="my_search_application", + params={ + "query_string": "rock climbing" }, ) print(resp) diff --git a/docs/examples/a1dda7e7c01be96a4acf7b725d70385f.asciidoc b/docs/examples/a1dda7e7c01be96a4acf7b725d70385f.asciidoc index 743681aa0..304ea62c6 100644 --- a/docs/examples/a1dda7e7c01be96a4acf7b725d70385f.asciidoc +++ b/docs/examples/a1dda7e7c01be96a4acf7b725d70385f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:639 +// search/retriever.asciidoc:684 [source, python] ---- diff --git a/docs/examples/a34e758e019f563d323ca90ad9fd6e3e.asciidoc b/docs/examples/a34e758e019f563d323ca90ad9fd6e3e.asciidoc index 366706ece..7571d51c7 100644 --- a/docs/examples/a34e758e019f563d323ca90ad9fd6e3e.asciidoc +++ b/docs/examples/a34e758e019f563d323ca90ad9fd6e3e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// alias.asciidoc:266 +// alias.asciidoc:268 [source, python] ---- diff --git a/docs/examples/a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc b/docs/examples/a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc index 7d18ab764..a1a4feb8b 100644 --- a/docs/examples/a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc +++ b/docs/examples/a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:154 +// search/retriever.asciidoc:159 [source, python] ---- diff --git a/docs/examples/a46f566ca031375658c22f89b87dc6d2.asciidoc b/docs/examples/a46f566ca031375658c22f89b87dc6d2.asciidoc new file mode 100644 index 000000000..b046f9db2 --- /dev/null +++ b/docs/examples/a46f566ca031375658c22f89b87dc6d2.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// migration/migrate_9_0.asciidoc:379 + +[source, python] +---- +resp = client.cat.indices( + index=".ml-anomalies-custom-example", + v=True, + h="index,store.size", +) +print(resp) +---- diff --git a/docs/examples/a60aaed30d7d26eaacbb2c0ed4ddc66d.asciidoc b/docs/examples/a60aaed30d7d26eaacbb2c0ed4ddc66d.asciidoc new file mode 100644 index 000000000..9c66465db --- /dev/null +++ b/docs/examples/a60aaed30d7d26eaacbb2c0ed4ddc66d.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// migration/apis/data-stream-reindex-cancel.asciidoc:41 + +[source, python] +---- +resp = client.indices.cancel_migrate_reindex( + index="my-data-stream", +) +print(resp) +---- diff --git a/docs/examples/a675fafa7c688cb3ea1be09bf887ebf0.asciidoc b/docs/examples/a675fafa7c688cb3ea1be09bf887ebf0.asciidoc new file mode 100644 index 000000000..ec9d585b8 --- /dev/null +++ b/docs/examples/a675fafa7c688cb3ea1be09bf887ebf0.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// migration/apis/data-stream-reindex.asciidoc:310 + +[source, python] +---- +resp = client.indices.get( + index=".migrated-ds-my-data-stream-2025.01.23-000001", + human=True, + filter_path="*.settings.index.version.created_string", +) +print(resp) +---- diff --git a/docs/examples/a9554396506888e392a1aee0ca28e6fc.asciidoc b/docs/examples/a9554396506888e392a1aee0ca28e6fc.asciidoc index 4b8648d66..6b99effea 100644 --- a/docs/examples/a9554396506888e392a1aee0ca28e6fc.asciidoc +++ b/docs/examples/a9554396506888e392a1aee0ca28e6fc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// alias.asciidoc:327 +// alias.asciidoc:329 [source, python] ---- diff --git a/docs/examples/a95a123b9f862e52ab1e8f875961c852.asciidoc b/docs/examples/a95a123b9f862e52ab1e8f875961c852.asciidoc index 37a2b6565..e0f8c052a 100644 --- a/docs/examples/a95a123b9f862e52ab1e8f875961c852.asciidoc +++ b/docs/examples/a95a123b9f862e52ab1e8f875961c852.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/search-multiple-indices.asciidoc:90 +// search/search-your-data/search-multiple-indices.asciidoc:124 [source, python] ---- diff --git a/docs/examples/a9f14efc26fdd3c37a71f06c310163d9.asciidoc b/docs/examples/a9f14efc26fdd3c37a71f06c310163d9.asciidoc index e3f0a60c7..79fb403e2 100644 --- a/docs/examples/a9f14efc26fdd3c37a71f06c310163d9.asciidoc +++ b/docs/examples/a9f14efc26fdd3c37a71f06c310163d9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:605 +// search/retriever.asciidoc:650 [source, python] ---- diff --git a/docs/examples/ac22cc2b0f4ad659055feed2852a2d59.asciidoc b/docs/examples/ac22cc2b0f4ad659055feed2852a2d59.asciidoc index 2023f2b91..b722f9c00 100644 --- a/docs/examples/ac22cc2b0f4ad659055feed2852a2d59.asciidoc +++ b/docs/examples/ac22cc2b0f4ad659055feed2852a2d59.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-examples.asciidoc:1235 +// search/search-your-data/retrievers-examples.asciidoc:1485 [source, python] ---- diff --git a/docs/examples/acb10091ad335ddd15d71021aaf23c62.asciidoc b/docs/examples/acb10091ad335ddd15d71021aaf23c62.asciidoc index 376de04ba..ba72e08ea 100644 --- a/docs/examples/acb10091ad335ddd15d71021aaf23c62.asciidoc +++ b/docs/examples/acb10091ad335ddd15d71021aaf23c62.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/sort-search-results.asciidoc:629 +// search/search-your-data/sort-search-results.asciidoc:631 [source, python] ---- diff --git a/docs/examples/adced6e22ef03c2ae3b14aa5bdd24fd9.asciidoc b/docs/examples/adced6e22ef03c2ae3b14aa5bdd24fd9.asciidoc new file mode 100644 index 000000000..d3a25bc3d --- /dev/null +++ b/docs/examples/adced6e22ef03c2ae3b14aa5bdd24fd9.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// migration/apis/data-stream-reindex-status.asciidoc:130 + +[source, python] +---- +resp = client.indices.get_migrate_reindex_status( + index="my-data-stream", +) +print(resp) +---- diff --git a/docs/examples/aee4734ee63dbbbd12a21ee886f7a829.asciidoc b/docs/examples/aee4734ee63dbbbd12a21ee886f7a829.asciidoc index cb793773a..e9097b421 100644 --- a/docs/examples/aee4734ee63dbbbd12a21ee886f7a829.asciidoc +++ b/docs/examples/aee4734ee63dbbbd12a21ee886f7a829.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/sort-search-results.asciidoc:546 +// search/search-your-data/sort-search-results.asciidoc:548 [source, python] ---- diff --git a/docs/examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc b/docs/examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc index 671165a4a..d3c2b9d84 100644 --- a/docs/examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc +++ b/docs/examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc @@ -3,9 +3,8 @@ [source, python] ---- -resp = client.perform_request( - "POST", - "/_application/search_application/my_search_application/_render_query", +resp = client.search_application.render_query( + name="my_search_application", ) print(resp) ---- diff --git a/docs/examples/b1e81b70b874a1f0cf75a0ec6e430ddc.asciidoc b/docs/examples/b1e81b70b874a1f0cf75a0ec6e430ddc.asciidoc new file mode 100644 index 000000000..bec033a8c --- /dev/null +++ b/docs/examples/b1e81b70b874a1f0cf75a0ec6e430ddc.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// esql/esql-async-query-stop-api.asciidoc:25 + +[source, python] +---- +resp = client.esql.async_query_stop( + id="FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", +) +print(resp) +---- diff --git a/docs/examples/b583bf8d3a2f49d633aa2cfed5606418.asciidoc b/docs/examples/b583bf8d3a2f49d633aa2cfed5606418.asciidoc index 61fa4718a..797108412 100644 --- a/docs/examples/b583bf8d3a2f49d633aa2cfed5606418.asciidoc +++ b/docs/examples/b583bf8d3a2f49d633aa2cfed5606418.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-component-template.asciidoc:195 +// indices/put-component-template.asciidoc:196 [source, python] ---- diff --git a/docs/examples/b5bc1bb7278f2f95bc54790c78c928e0.asciidoc b/docs/examples/b5bc1bb7278f2f95bc54790c78c928e0.asciidoc index 30ad58e2e..0e8d41aca 100644 --- a/docs/examples/b5bc1bb7278f2f95bc54790c78c928e0.asciidoc +++ b/docs/examples/b5bc1bb7278f2f95bc54790c78c928e0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/get-job.asciidoc:169 +// rollup/apis/get-job.asciidoc:170 [source, python] ---- diff --git a/docs/examples/b607eea422295a3e9acd75f9ed1c8cb7.asciidoc b/docs/examples/b607eea422295a3e9acd75f9ed1c8cb7.asciidoc index c1251db36..799a9e6fc 100644 --- a/docs/examples/b607eea422295a3e9acd75f9ed1c8cb7.asciidoc +++ b/docs/examples/b607eea422295a3e9acd75f9ed1c8cb7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/sort-search-results.asciidoc:370 +// search/search-your-data/sort-search-results.asciidoc:372 [source, python] ---- diff --git a/docs/examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc b/docs/examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc index 756defefe..5e79af3af 100644 --- a/docs/examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc +++ b/docs/examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc @@ -186,13 +186,9 @@ resp = client.bulk( ) print(resp) -resp1 = client.perform_request( - "GET", - "/_text_structure/find_field_structure", - params={ - "index": "test-logs", - "field": "message" - }, +resp1 = client.text_structure.find_field_structure( + index="test-logs", + field="message", ) print(resp1) ---- diff --git a/docs/examples/357edc9d10e98ed776401c7a439a1a55.asciidoc b/docs/examples/ba0e7e0b18fc9ec6c623d40186d1f61b.asciidoc similarity index 78% rename from docs/examples/357edc9d10e98ed776401c7a439a1a55.asciidoc rename to docs/examples/ba0e7e0b18fc9ec6c623d40186d1f61b.asciidoc index 90b277dc4..c25ea798e 100644 --- a/docs/examples/357edc9d10e98ed776401c7a439a1a55.asciidoc +++ b/docs/examples/ba0e7e0b18fc9ec6c623d40186d1f61b.asciidoc @@ -1,11 +1,12 @@ // This file is autogenerated, DO NOT EDIT -// indices/resolve-cluster.asciidoc:244 +// indices/resolve-cluster.asciidoc:271 [source, python] ---- resp = client.indices.resolve_cluster( name="not-present,clust*:my-index*,oldcluster:*", ignore_unavailable=False, + timeout="5s", ) print(resp) ---- diff --git a/docs/examples/bb2ba5d1885f87506f90dbb002e518f4.asciidoc b/docs/examples/bb2ba5d1885f87506f90dbb002e518f4.asciidoc index 42efc866a..f1cecf299 100644 --- a/docs/examples/bb2ba5d1885f87506f90dbb002e518f4.asciidoc +++ b/docs/examples/bb2ba5d1885f87506f90dbb002e518f4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-examples.asciidoc:354 +// search/search-your-data/retrievers-examples.asciidoc:604 [source, python] ---- diff --git a/docs/examples/436d50b85fc8f0977d02059eec00719b.asciidoc b/docs/examples/bcd1afb793240b1dddd9fa5d3f21192b.asciidoc similarity index 52% rename from docs/examples/436d50b85fc8f0977d02059eec00719b.asciidoc rename to docs/examples/bcd1afb793240b1dddd9fa5d3f21192b.asciidoc index bd4578790..bc2e08cf0 100644 --- a/docs/examples/436d50b85fc8f0977d02059eec00719b.asciidoc +++ b/docs/examples/bcd1afb793240b1dddd9fa5d3f21192b.asciidoc @@ -6,15 +6,11 @@ resp = client.update( index="test", id="1", - script={ - "source": "ctx._source.counter += params.count", - "lang": "painless", - "params": { - "count": 4 - } + doc={ + "product_price": 100 }, upsert={ - "counter": 1 + "product_price": 50 }, ) print(resp) diff --git a/docs/examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc b/docs/examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc index f3b87e892..05c952c57 100644 --- a/docs/examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc +++ b/docs/examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc @@ -3,38 +3,33 @@ [source, python] ---- -resp = client.perform_request( - "POST", - "/_ingest/_simulate", - headers={"Content-Type": "application/json"}, - body={ - "docs": [ - { - "_index": "my-index", - "_id": "123", - "_source": { - "foo": "bar" - } - }, - { - "_index": "my-index", - "_id": "456", - "_source": { - "foo": "rab" - } +resp = client.simulate.ingest( + docs=[ + { + "_index": "my-index", + "_id": "123", + "_source": { + "foo": "bar" } - ], - "pipeline_substitutions": { - "my-pipeline": { - "processors": [ - { - "uppercase": { - "field": "foo" - } - } - ] + }, + { + "_index": "my-index", + "_id": "456", + "_source": { + "foo": "rab" } } + ], + pipeline_substitutions={ + "my-pipeline": { + "processors": [ + { + "uppercase": { + "field": "foo" + } + } + ] + } }, ) print(resp) diff --git a/docs/examples/be5fef0640c3a650ee96f84e3376a1be.asciidoc b/docs/examples/be5fef0640c3a650ee96f84e3376a1be.asciidoc index 416fc9bf1..2c5b4ba3a 100644 --- a/docs/examples/be5fef0640c3a650ee96f84e3376a1be.asciidoc +++ b/docs/examples/be5fef0640c3a650ee96f84e3376a1be.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/update.asciidoc:339 +// docs/update.asciidoc:335 [source, python] ---- diff --git a/docs/examples/bf1de9fa1b825fa875d27fa08821a6d1.asciidoc b/docs/examples/bf1de9fa1b825fa875d27fa08821a6d1.asciidoc index 23c293ac8..7e2332729 100644 --- a/docs/examples/bf1de9fa1b825fa875d27fa08821a6d1.asciidoc +++ b/docs/examples/bf1de9fa1b825fa875d27fa08821a6d1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-across-clusters.asciidoc:117 +// esql/esql-across-clusters.asciidoc:119 [source, python] ---- diff --git a/docs/examples/bf3f520b47581d861e802730aaf2a519.asciidoc b/docs/examples/bf3f520b47581d861e802730aaf2a519.asciidoc index dfc5bc6f9..87ce2230e 100644 --- a/docs/examples/bf3f520b47581d861e802730aaf2a519.asciidoc +++ b/docs/examples/bf3f520b47581d861e802730aaf2a519.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// alias.asciidoc:33 +// alias.asciidoc:35 [source, python] ---- diff --git a/docs/examples/c147de68fd6da032ad4a3c1bf626f5d6.asciidoc b/docs/examples/c147de68fd6da032ad4a3c1bf626f5d6.asciidoc index 0c6394b4a..e2f4cc12b 100644 --- a/docs/examples/c147de68fd6da032ad4a3c1bf626f5d6.asciidoc +++ b/docs/examples/c147de68fd6da032ad4a3c1bf626f5d6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/highlighting.asciidoc:410 +// search/search-your-data/highlighting.asciidoc:422 [source, python] ---- diff --git a/docs/examples/c3b77e11b16e37e9e37e28dec922432e.asciidoc b/docs/examples/c3b77e11b16e37e9e37e28dec922432e.asciidoc new file mode 100644 index 000000000..09b688408 --- /dev/null +++ b/docs/examples/c3b77e11b16e37e9e37e28dec922432e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// esql/esql-syntax.asciidoc:187 + +[source, python] +---- +resp = client.esql.query( + query="\nFROM library\n| WHERE match(author, \"Frank Herbert\", {\"minimum_should_match\": 2, \"operator\": \"AND\"})\n| LIMIT 5\n", +) +print(resp) +---- diff --git a/docs/examples/c4607ca79b2bcde39305d6f4f21cad37.asciidoc b/docs/examples/c4607ca79b2bcde39305d6f4f21cad37.asciidoc index 3bec7673e..fd2d55854 100644 --- a/docs/examples/c4607ca79b2bcde39305d6f4f21cad37.asciidoc +++ b/docs/examples/c4607ca79b2bcde39305d6f4f21cad37.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-rest.asciidoc:225 +// esql/esql-rest.asciidoc:226 [source, python] ---- diff --git a/docs/examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc b/docs/examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc index 20dca25b2..267309f1b 100644 --- a/docs/examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc +++ b/docs/examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc @@ -3,17 +3,12 @@ [source, python] ---- -resp = client.perform_request( - "POST", - "/_security/api_key/_bulk_update", - headers={"Content-Type": "application/json"}, - body={ - "ids": [ - "VuaCfGcBCdbkQm-e5aOx", - "H3_AhoIBA9hmeQJdg7ij" - ], - "role_descriptors": {} - }, +resp = client.security.bulk_update_api_keys( + ids=[ + "VuaCfGcBCdbkQm-e5aOx", + "H3_AhoIBA9hmeQJdg7ij" + ], + role_descriptors={}, ) print(resp) ---- diff --git a/docs/examples/c6339d09f85000a6432304b0ec63b8f6.asciidoc b/docs/examples/c6339d09f85000a6432304b0ec63b8f6.asciidoc index 448adcb42..514f0f8aa 100644 --- a/docs/examples/c6339d09f85000a6432304b0ec63b8f6.asciidoc +++ b/docs/examples/c6339d09f85000a6432304b0ec63b8f6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-component-template.asciidoc:235 +// indices/put-component-template.asciidoc:236 [source, python] ---- diff --git a/docs/examples/ca5dda98e977125d40a7fe1e178e213f.asciidoc b/docs/examples/ca5dda98e977125d40a7fe1e178e213f.asciidoc index 1ae157bfb..a6c8c5cb1 100644 --- a/docs/examples/ca5dda98e977125d40a7fe1e178e213f.asciidoc +++ b/docs/examples/ca5dda98e977125d40a7fe1e178e213f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/sparse-vector-query.asciidoc:131 +// query-dsl/sparse-vector-query.asciidoc:134 [source, python] ---- diff --git a/docs/examples/cc5eefcc2102aae7e87b0c87b4af10b8.asciidoc b/docs/examples/cc5eefcc2102aae7e87b0c87b4af10b8.asciidoc index c403a96b0..b994f1ee9 100644 --- a/docs/examples/cc5eefcc2102aae7e87b0c87b4af10b8.asciidoc +++ b/docs/examples/cc5eefcc2102aae7e87b0c87b4af10b8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/multivalued-fields.asciidoc:53 +// esql/multivalued-fields.asciidoc:54 [source, python] ---- diff --git a/docs/examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc b/docs/examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc index 38ea94f46..8de3e1f3e 100644 --- a/docs/examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc +++ b/docs/examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc @@ -3,75 +3,70 @@ [source, python] ---- -resp = client.perform_request( - "POST", - "/_ingest/_simulate", - headers={"Content-Type": "application/json"}, - body={ - "docs": [ - { - "_index": "my-index", - "_id": "id", - "_source": { - "foo": "bar" - } - }, - { - "_index": "my-index", - "_id": "id", - "_source": { - "foo": "rab" - } - } - ], - "pipeline_substitutions": { - "my-pipeline": { - "processors": [ - { - "set": { - "field": "field3", - "value": "value3" - } - } - ] +resp = client.simulate.ingest( + docs=[ + { + "_index": "my-index", + "_id": "id", + "_source": { + "foo": "bar" } }, - "component_template_substitutions": { - "my-component-template": { - "template": { - "mappings": { - "dynamic": "true", - "properties": { - "field3": { - "type": "keyword" - } - } - }, - "settings": { - "index": { - "default_pipeline": "my-pipeline" + { + "_index": "my-index", + "_id": "id", + "_source": { + "foo": "rab" + } + } + ], + pipeline_substitutions={ + "my-pipeline": { + "processors": [ + { + "set": { + "field": "field3", + "value": "value3" + } + } + ] + } + }, + component_template_substitutions={ + "my-component-template": { + "template": { + "mappings": { + "dynamic": "true", + "properties": { + "field3": { + "type": "keyword" } } + }, + "settings": { + "index": { + "default_pipeline": "my-pipeline" + } } } - }, - "index_template_substitutions": { - "my-index-template": { - "index_patterns": [ - "my-index-*" - ], - "composed_of": [ - "component_template_1", - "component_template_2" - ] - } - }, - "mapping_addition": { - "dynamic": "strict", - "properties": { - "foo": { - "type": "keyword" - } + } + }, + index_template_substitutions={ + "my-index-template": { + "index_patterns": [ + "my-index-*" + ], + "composed_of": [ + "component_template_1", + "component_template_2" + ] + } + }, + mapping_addition={ + "dynamic": "strict", + "properties": { + "foo": { + "type": "keyword" } } }, diff --git a/docs/examples/cd7da0c3769682f546cc1888e569382e.asciidoc b/docs/examples/cd7da0c3769682f546cc1888e569382e.asciidoc index b117cb26f..ded893b21 100644 --- a/docs/examples/cd7da0c3769682f546cc1888e569382e.asciidoc +++ b/docs/examples/cd7da0c3769682f546cc1888e569382e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/highlighting.asciidoc:764 +// search/search-your-data/highlighting.asciidoc:776 [source, python] ---- diff --git a/docs/examples/d1d8b6e642db1a7c70dbbf0fe6d8e92d.asciidoc b/docs/examples/d1d8b6e642db1a7c70dbbf0fe6d8e92d.asciidoc index fb9b3dc8c..45bd12593 100644 --- a/docs/examples/d1d8b6e642db1a7c70dbbf0fe6d8e92d.asciidoc +++ b/docs/examples/d1d8b6e642db1a7c70dbbf0fe6d8e92d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/sparse-vector-query.asciidoc:192 +// query-dsl/sparse-vector-query.asciidoc:195 [source, python] ---- diff --git a/docs/examples/d2e7dead222cfbebbd2c21a7cc1893b4.asciidoc b/docs/examples/d2e7dead222cfbebbd2c21a7cc1893b4.asciidoc new file mode 100644 index 000000000..713900e56 --- /dev/null +++ b/docs/examples/d2e7dead222cfbebbd2c21a7cc1893b4.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// api-conventions.asciidoc:260 + +[source, python] +---- +resp = client.cluster.state( + metric="metadata", + filter_path="metadata.indices.*.system", +) +print(resp) +---- diff --git a/docs/examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc b/docs/examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc index bda513c8a..210dfef8d 100644 --- a/docs/examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc +++ b/docs/examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc @@ -3,14 +3,9 @@ [source, python] ---- -resp = client.perform_request( - "POST", - "/_security/oidc/prepare", - headers={"Content-Type": "application/json"}, - body={ - "iss": "http://127.0.0.1:8080", - "login_hint": "this_is_an_opaque_string" - }, +resp = client.security.oidc_prepare_authentication( + iss="http://127.0.0.1:8080", + login_hint="this_is_an_opaque_string", ) print(resp) ---- diff --git a/docs/examples/d3a0f648d0fd50b54a4e9ebe363c5047.asciidoc b/docs/examples/d3a0f648d0fd50b54a4e9ebe363c5047.asciidoc new file mode 100644 index 000000000..fdf783f2b --- /dev/null +++ b/docs/examples/d3a0f648d0fd50b54a4e9ebe363c5047.asciidoc @@ -0,0 +1,48 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/retrievers-examples.asciidoc:221 + +[source, python] +---- +resp = client.search( + index="retrievers_example", + retriever={ + "linear": { + "retrievers": [ + { + "retriever": { + "standard": { + "query": { + "query_string": { + "query": "(information retrieval) OR (artificial intelligence)", + "default_field": "text" + } + } + } + }, + "weight": 2, + "normalizer": "minmax" + }, + { + "retriever": { + "knn": { + "field": "vector", + "query_vector": [ + 0.23, + 0.67, + 0.89 + ], + "k": 3, + "num_candidates": 5 + } + }, + "weight": 1.5, + "normalizer": "minmax" + } + ], + "rank_window_size": 10 + } + }, + source=False, +) +print(resp) +---- diff --git a/docs/examples/d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc b/docs/examples/d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc index 8726658f3..d277e6a15 100644 --- a/docs/examples/d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc +++ b/docs/examples/d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc @@ -3,11 +3,9 @@ [source, python] ---- -resp = client.perform_request( - "PUT", - "/_ingest/ip_location/database/my-database-2", - headers={"Content-Type": "application/json"}, - body={ +resp = client.ingest.put_ip_location_database( + id="my-database-2", + configuration={ "name": "standard_location", "ipinfo": {} }, diff --git a/docs/examples/d6a4548b29e939fb197189c20c7c016f.asciidoc b/docs/examples/d6a4548b29e939fb197189c20c7c016f.asciidoc new file mode 100644 index 000000000..55232f34a --- /dev/null +++ b/docs/examples/d6a4548b29e939fb197189c20c7c016f.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// inference/elastic-infer-service.asciidoc:115 + +[source, python] +---- +resp = client.inference.put( + task_type="chat_completion", + inference_id="chat-completion-endpoint", + inference_config={ + "service": "elastic", + "service_settings": { + "model_id": "model-1" + } + }, +) +print(resp) +---- diff --git a/docs/examples/d8c053ee26c1533ce936ec81101d8e1b.asciidoc b/docs/examples/d8c053ee26c1533ce936ec81101d8e1b.asciidoc index 78590c949..3be18be28 100644 --- a/docs/examples/d8c053ee26c1533ce936ec81101d8e1b.asciidoc +++ b/docs/examples/d8c053ee26c1533ce936ec81101d8e1b.asciidoc @@ -3,9 +3,8 @@ [source, python] ---- -resp = client.perform_request( - "GET", - "/_ingest/ip_location/database/my-database-id", +resp = client.ingest.get_ip_location_database( + id="my-database-id", ) print(resp) ---- diff --git a/docs/examples/d93d52b6057a7aff3d0766ca44c505e0.asciidoc b/docs/examples/d93d52b6057a7aff3d0766ca44c505e0.asciidoc index d6d263200..ccde60cca 100644 --- a/docs/examples/d93d52b6057a7aff3d0766ca44c505e0.asciidoc +++ b/docs/examples/d93d52b6057a7aff3d0766ca44c505e0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// alias.asciidoc:204 +// alias.asciidoc:206 [source, python] ---- diff --git a/docs/examples/dd16c9c981551c9da47ebb5ef5105fa0.asciidoc b/docs/examples/dd16c9c981551c9da47ebb5ef5105fa0.asciidoc new file mode 100644 index 000000000..8ffce9166 --- /dev/null +++ b/docs/examples/dd16c9c981551c9da47ebb5ef5105fa0.asciidoc @@ -0,0 +1,57 @@ +// This file is autogenerated, DO NOT EDIT +// migration/migrate_9_0.asciidoc:535 + +[source, python] +---- +resp = client.indices.update_aliases( + actions=[ + { + "add": { + "index": ".reindexed-v9-ml-anomalies-custom-example", + "alias": ".ml-anomalies-example1", + "filter": { + "term": { + "job_id": { + "value": "example1" + } + } + }, + "is_hidden": True + } + }, + { + "add": { + "index": ".reindexed-v9-ml-anomalies-custom-example", + "alias": ".ml-anomalies-example2", + "filter": { + "term": { + "job_id": { + "value": "example2" + } + } + }, + "is_hidden": True + } + }, + { + "remove": { + "index": ".ml-anomalies-custom-example", + "aliases": ".ml-anomalies-*" + } + }, + { + "remove_index": { + "index": ".ml-anomalies-custom-example" + } + }, + { + "add": { + "index": ".reindexed-v9-ml-anomalies-custom-example", + "alias": ".ml-anomalies-custom-example", + "is_hidden": True + } + } + ], +) +print(resp) +---- diff --git a/docs/examples/dd71b0c9f9197684ff29c61062c55660.asciidoc b/docs/examples/dd71b0c9f9197684ff29c61062c55660.asciidoc index 3f2fdfb21..6acf42c3f 100644 --- a/docs/examples/dd71b0c9f9197684ff29c61062c55660.asciidoc +++ b/docs/examples/dd71b0c9f9197684ff29c61062c55660.asciidoc @@ -3,9 +3,6 @@ [source, python] ---- -resp = client.perform_request( - "GET", - "/_security/settings", -) +resp = client.security.get_settings() print(resp) ---- diff --git a/docs/examples/dddb6a6ebd145f8411c5b4910d332f87.asciidoc b/docs/examples/dddb6a6ebd145f8411c5b4910d332f87.asciidoc index fbbeb3a8f..79eadab36 100644 --- a/docs/examples/dddb6a6ebd145f8411c5b4910d332f87.asciidoc +++ b/docs/examples/dddb6a6ebd145f8411c5b4910d332f87.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/multivalued-fields.asciidoc:228 +// esql/multivalued-fields.asciidoc:233 [source, python] ---- diff --git a/docs/examples/dde92fdf3469349ffe2c81764333543a.asciidoc b/docs/examples/dde92fdf3469349ffe2c81764333543a.asciidoc new file mode 100644 index 000000000..2df1ba4b3 --- /dev/null +++ b/docs/examples/dde92fdf3469349ffe2c81764333543a.asciidoc @@ -0,0 +1,14 @@ +// This file is autogenerated, DO NOT EDIT +// migration/apis/create-index-from-source.asciidoc:137 + +[source, python] +---- +resp = client.indices.create_from( + source="my-index", + dest="my-new-index", + create_from={ + "remove_index_blocks": False + }, +) +print(resp) +---- diff --git a/docs/examples/de139866a220124360e5e27d1a736ea4.asciidoc b/docs/examples/de139866a220124360e5e27d1a736ea4.asciidoc index a65c0590d..122457dad 100644 --- a/docs/examples/de139866a220124360e5e27d1a736ea4.asciidoc +++ b/docs/examples/de139866a220124360e5e27d1a736ea4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/sort-search-results.asciidoc:286 +// search/search-your-data/sort-search-results.asciidoc:288 [source, python] ---- diff --git a/docs/examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc b/docs/examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc index 0c74e3bf6..17fc821e9 100644 --- a/docs/examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc +++ b/docs/examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc @@ -3,13 +3,8 @@ [source, python] ---- -resp = client.perform_request( - "POST", - "/_security/oidc/prepare", - headers={"Content-Type": "application/json"}, - body={ - "realm": "oidc1" - }, +resp = client.security.oidc_prepare_authentication( + realm="oidc1", ) print(resp) ---- diff --git a/docs/examples/e4b38973c74037335378d8480f1ce894.asciidoc b/docs/examples/e4b38973c74037335378d8480f1ce894.asciidoc index a9514e8d7..bdf7f710d 100644 --- a/docs/examples/e4b38973c74037335378d8480f1ce894.asciidoc +++ b/docs/examples/e4b38973c74037335378d8480f1ce894.asciidoc @@ -3,39 +3,34 @@ [source, python] ---- -resp = client.perform_request( - "POST", - "/_ingest/_simulate", - headers={"Content-Type": "application/json"}, - body={ - "docs": [ - { - "_index": "my-index", - "_id": "123", - "_source": { - "foo": "foo" - } - }, - { - "_index": "my-index", - "_id": "456", - "_source": { - "bar": "rab" - } +resp = client.simulate.ingest( + docs=[ + { + "_index": "my-index", + "_id": "123", + "_source": { + "foo": "foo" } - ], - "component_template_substitutions": { - "my-mappings_template": { - "template": { - "mappings": { - "dynamic": "strict", - "properties": { - "foo": { - "type": "keyword" - }, - "bar": { - "type": "keyword" - } + }, + { + "_index": "my-index", + "_id": "456", + "_source": { + "bar": "rab" + } + } + ], + component_template_substitutions={ + "my-mappings_template": { + "template": { + "mappings": { + "dynamic": "strict", + "properties": { + "foo": { + "type": "keyword" + }, + "bar": { + "type": "keyword" } } } diff --git a/docs/examples/e6ccd979c34ba03007e625c6ec3e71a9.asciidoc b/docs/examples/e6ccd979c34ba03007e625c6ec3e71a9.asciidoc index 4e2a2d9e7..d7d7853cb 100644 --- a/docs/examples/e6ccd979c34ba03007e625c6ec3e71a9.asciidoc +++ b/docs/examples/e6ccd979c34ba03007e625c6ec3e71a9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// alias.asciidoc:258 +// alias.asciidoc:260 [source, python] ---- diff --git a/docs/examples/e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc b/docs/examples/e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc index 15cb77d28..28e0f8426 100644 --- a/docs/examples/e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc +++ b/docs/examples/e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-examples.asciidoc:198 +// search/search-your-data/retrievers-examples.asciidoc:448 [source, python] ---- diff --git a/docs/examples/e6faae2e272ee57727f38e55a3de5bb2.asciidoc b/docs/examples/e6faae2e272ee57727f38e55a3de5bb2.asciidoc index 5efa745d0..a3e95dd72 100644 --- a/docs/examples/e6faae2e272ee57727f38e55a3de5bb2.asciidoc +++ b/docs/examples/e6faae2e272ee57727f38e55a3de5bb2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/highlighting.asciidoc:545 +// search/search-your-data/highlighting.asciidoc:557 [source, python] ---- diff --git a/docs/examples/e715fb8c792bf09ac98f0ceca99beb84.asciidoc b/docs/examples/e715fb8c792bf09ac98f0ceca99beb84.asciidoc new file mode 100644 index 000000000..e799bf64f --- /dev/null +++ b/docs/examples/e715fb8c792bf09ac98f0ceca99beb84.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// migration/migrate_9_0.asciidoc:345 + +[source, python] +---- +resp = client.migration.deprecations( + index=".ml-anomalies-*", +) +print(resp) +---- diff --git a/docs/examples/e93ff228ab3e63738e1c83fdfb7424b9.asciidoc b/docs/examples/e93ff228ab3e63738e1c83fdfb7424b9.asciidoc index daca718ee..92bdbacaf 100644 --- a/docs/examples/e93ff228ab3e63738e1c83fdfb7424b9.asciidoc +++ b/docs/examples/e93ff228ab3e63738e1c83fdfb7424b9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/highlighting.asciidoc:434 +// search/search-your-data/highlighting.asciidoc:446 [source, python] ---- diff --git a/docs/examples/eac3bc428d03eb4926fa51f74b9bc4d5.asciidoc b/docs/examples/eac3bc428d03eb4926fa51f74b9bc4d5.asciidoc index 1e1e3bad5..d9273b092 100644 --- a/docs/examples/eac3bc428d03eb4926fa51f74b9bc4d5.asciidoc +++ b/docs/examples/eac3bc428d03eb4926fa51f74b9bc4d5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/highlighting.asciidoc:342 +// search/search-your-data/highlighting.asciidoc:354 [source, python] ---- diff --git a/docs/examples/ec135f0cc0d3f526df68000b2a95c65b.asciidoc b/docs/examples/ec135f0cc0d3f526df68000b2a95c65b.asciidoc new file mode 100644 index 000000000..10f0be019 --- /dev/null +++ b/docs/examples/ec135f0cc0d3f526df68000b2a95c65b.asciidoc @@ -0,0 +1,12 @@ +// This file is autogenerated, DO NOT EDIT +// migration/migrate_9_0.asciidoc:403 + +[source, python] +---- +resp = client.indices.create_from( + source=".ml-anomalies-custom-example", + dest=".reindexed-v9-ml-anomalies-custom-example", + create_from=None, +) +print(resp) +---- diff --git a/docs/examples/ed5c3b45e8de912faba44507d827eb93.asciidoc b/docs/examples/ed5c3b45e8de912faba44507d827eb93.asciidoc index a099a031c..e6ca4ba6b 100644 --- a/docs/examples/ed5c3b45e8de912faba44507d827eb93.asciidoc +++ b/docs/examples/ed5c3b45e8de912faba44507d827eb93.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/sort-search-results.asciidoc:499 +// search/search-your-data/sort-search-results.asciidoc:501 [source, python] ---- diff --git a/docs/examples/edae616e1244babf6032aecc6aaaf836.asciidoc b/docs/examples/edae616e1244babf6032aecc6aaaf836.asciidoc index 342dde9e2..d47895fb1 100644 --- a/docs/examples/edae616e1244babf6032aecc6aaaf836.asciidoc +++ b/docs/examples/edae616e1244babf6032aecc6aaaf836.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/sort-search-results.asciidoc:472 +// search/search-your-data/sort-search-results.asciidoc:474 [source, python] ---- diff --git a/docs/examples/edb25dc0162b039d477cb06aed2d6275.asciidoc b/docs/examples/edb25dc0162b039d477cb06aed2d6275.asciidoc index 90f2ebf6c..76902ca50 100644 --- a/docs/examples/edb25dc0162b039d477cb06aed2d6275.asciidoc +++ b/docs/examples/edb25dc0162b039d477cb06aed2d6275.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/sparse-vector-query.asciidoc:149 +// query-dsl/sparse-vector-query.asciidoc:152 [source, python] ---- diff --git a/docs/examples/ee08328cd157d547de19b4abe867b23e.asciidoc b/docs/examples/ee08328cd157d547de19b4abe867b23e.asciidoc index b149f81c4..b50d454fe 100644 --- a/docs/examples/ee08328cd157d547de19b4abe867b23e.asciidoc +++ b/docs/examples/ee08328cd157d547de19b4abe867b23e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// alias.asciidoc:275 +// alias.asciidoc:277 [source, python] ---- diff --git a/docs/examples/f29b2674299ddf51a25ed87619025ede.asciidoc b/docs/examples/f29b2674299ddf51a25ed87619025ede.asciidoc index ce3e50526..dfae20141 100644 --- a/docs/examples/f29b2674299ddf51a25ed87619025ede.asciidoc +++ b/docs/examples/f29b2674299ddf51a25ed87619025ede.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/rollup-search.asciidoc:121 +// rollup/apis/rollup-search.asciidoc:122 [source, python] ---- diff --git a/docs/examples/f39512478cae2db8f4566a1e4af9e8f5.asciidoc b/docs/examples/f39512478cae2db8f4566a1e4af9e8f5.asciidoc index 271a51425..569f9d2ad 100644 --- a/docs/examples/f39512478cae2db8f4566a1e4af9e8f5.asciidoc +++ b/docs/examples/f39512478cae2db8f4566a1e4af9e8f5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/rollup-getting-started.asciidoc:213 +// rollup/rollup-getting-started.asciidoc:217 [source, python] ---- diff --git a/docs/examples/f8cb1a04c2e487ff006b5ae0e1a7afbd.asciidoc b/docs/examples/f8cb1a04c2e487ff006b5ae0e1a7afbd.asciidoc index d66ed0a8c..f851ff1bc 100644 --- a/docs/examples/f8cb1a04c2e487ff006b5ae0e1a7afbd.asciidoc +++ b/docs/examples/f8cb1a04c2e487ff006b5ae0e1a7afbd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/rollup-caps.asciidoc:178 +// rollup/apis/rollup-caps.asciidoc:181 [source, python] ---- diff --git a/docs/examples/9ad0864bcd665b63551e944653d32423.asciidoc b/docs/examples/f994498dd6576be657dedce2822d2b9e.asciidoc similarity index 82% rename from docs/examples/9ad0864bcd665b63551e944653d32423.asciidoc rename to docs/examples/f994498dd6576be657dedce2822d2b9e.asciidoc index 9615a7520..b29501e73 100644 --- a/docs/examples/9ad0864bcd665b63551e944653d32423.asciidoc +++ b/docs/examples/f994498dd6576be657dedce2822d2b9e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/semantic-text-hybrid-search:118 +// search/search-your-data/semantic-text-hybrid-search:119 [source, python] ---- @@ -30,6 +30,13 @@ resp = client.search( ] } }, + highlight={ + "fields": { + "semantic_text": { + "number_of_fragments": 2 + } + } + }, ) print(resp) ---- diff --git a/docs/examples/fbb38243221c8fb311660616e3add9ce.asciidoc b/docs/examples/fbb38243221c8fb311660616e3add9ce.asciidoc index f3511fb9a..25a5cb185 100644 --- a/docs/examples/fbb38243221c8fb311660616e3add9ce.asciidoc +++ b/docs/examples/fbb38243221c8fb311660616e3add9ce.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/sort-search-results.asciidoc:418 +// search/search-your-data/sort-search-results.asciidoc:420 [source, python] ---- diff --git a/docs/examples/fe208d94ec93eabf3bd06139fa70701e.asciidoc b/docs/examples/fe208d94ec93eabf3bd06139fa70701e.asciidoc index 21882266f..f38b22cb3 100644 --- a/docs/examples/fe208d94ec93eabf3bd06139fa70701e.asciidoc +++ b/docs/examples/fe208d94ec93eabf3bd06139fa70701e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/migrating-to-downsampling.asciidoc:58 +// rollup/migrating-to-downsampling.asciidoc:59 [source, python] ---- diff --git a/docs/examples/fe3a927d868cbc530e08e05964d5174a.asciidoc b/docs/examples/fe3a927d868cbc530e08e05964d5174a.asciidoc index adad0a7f7..17408d56f 100644 --- a/docs/examples/fe3a927d868cbc530e08e05964d5174a.asciidoc +++ b/docs/examples/fe3a927d868cbc530e08e05964d5174a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/sort-search-results.asciidoc:115 +// search/search-your-data/sort-search-results.asciidoc:117 [source, python] ---- diff --git a/docs/examples/681d24c2633f598fc43d6afff8996dbb.asciidoc b/docs/examples/ffda10edaa7ce087703193c3cb95a426.asciidoc similarity index 90% rename from docs/examples/681d24c2633f598fc43d6afff8996dbb.asciidoc rename to docs/examples/ffda10edaa7ce087703193c3cb95a426.asciidoc index 79fe95816..d88901a8f 100644 --- a/docs/examples/681d24c2633f598fc43d6afff8996dbb.asciidoc +++ b/docs/examples/ffda10edaa7ce087703193c3cb95a426.asciidoc @@ -27,6 +27,9 @@ resp = client.indices.create( }, "topic": { "type": "keyword" + }, + "timestamp": { + "type": "date" } } }, @@ -48,7 +51,8 @@ resp1 = client.index( "llm", "ai", "information_retrieval" - ] + ], + "timestamp": "2021-01-01T12:10:30" }, ) print(resp1) @@ -67,7 +71,8 @@ resp2 = client.index( "topic": [ "ai", "medicine" - ] + ], + "timestamp": "2022-01-01T12:10:30" }, ) print(resp2) @@ -86,7 +91,8 @@ resp3 = client.index( "topic": [ "ai", "security" - ] + ], + "timestamp": "2023-01-01T12:10:30" }, ) print(resp3) @@ -106,7 +112,8 @@ resp4 = client.index( "ai", "elastic", "assistant" - ] + ], + "timestamp": "2024-01-01T12:10:30" }, ) print(resp4) @@ -126,7 +133,8 @@ resp5 = client.index( "documentation", "observability", "elastic" - ] + ], + "timestamp": "2025-01-01T12:10:30" }, ) print(resp5) diff --git a/docs/guide/release-notes.asciidoc b/docs/guide/release-notes.asciidoc index 1b8d3957f..069cedc39 100644 --- a/docs/guide/release-notes.asciidoc +++ b/docs/guide/release-notes.asciidoc @@ -1,6 +1,7 @@ [[release-notes]] == Release notes +* <> * <> * <> * <> @@ -44,6 +45,18 @@ * <> * <> +[discrete] +[[rn-8-17-2]] +=== 8.17.2 (2025-03-04) + +- Explain how to use sub clients in API docs (https://github.com/elastic/elasticsearch-py/pull/2798[#2798]) +- Render descriptions as Markdown in API docs +- Update APIs + * Add `keep_alive` to Submit async search API + * Add `Run and Delete` an async ES|QL query APIs + * Add Get async ES|QL query results API + * Add `include_ccs_metadata` to ES|QL query API + [discrete] [[rn-8-17-1]] === 8.17.1 (2025-01-23) diff --git a/elasticsearch/_version.py b/elasticsearch/_version.py index d1cc6c87a..00e2789aa 100644 --- a/elasticsearch/_version.py +++ b/elasticsearch/_version.py @@ -15,4 +15,4 @@ # specific language governing permissions and limitations # under the License. -__versionstr__ = "8.17.1" +__versionstr__ = "8.17.2" diff --git a/utils/generate-docs-examples/package-lock.json b/utils/generate-docs-examples/package-lock.json index 6c4f1bc41..f00b0308c 100644 --- a/utils/generate-docs-examples/package-lock.json +++ b/utils/generate-docs-examples/package-lock.json @@ -17,12 +17,12 @@ } }, "node_modules/@elastic/request-converter": { - "version": "8.16.1", - "resolved": "https://registry.npmjs.org/@elastic/request-converter/-/request-converter-8.16.1.tgz", - "integrity": "sha512-lg2qCJ4kyxsP/0NpZo0+NsJfaY4JwyxGIVqD2l2Vmx9tv7ZNaZMn/TjHKBo2+jN0laJBInpxpnkPUgVWo5kw1g==", + "version": "8.18.0", + "resolved": "https://registry.npmjs.org/@elastic/request-converter/-/request-converter-8.18.0.tgz", + "integrity": "sha512-xEIB17voGulAfBThFqqtk8Osc+dNHiCqN9GW0Nf6PunNdvmAT5YvMb6u4NNI+NPAxNu90ak396g+ThjH9VRGIw==", "license": "Apache-2.0", "dependencies": { - "child-process-promise": "^2.2.1", + "base64url": "^3.0.1", "commander": "^12.1.0", "find-my-way-ts": "^0.1.2", "handlebars": "^4.7.8", @@ -82,6 +82,15 @@ ], "license": "MIT" }, + "node_modules/base64url": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/base64url/-/base64url-3.0.1.tgz", + "integrity": "sha512-ir1UPr3dkwexU7FdV8qBBbNDRUhMmIekYMFZfi+C/sLNnRESKPl23nB9b2pltqfOQNnGzsDdId90AEtG5tCx4A==", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, "node_modules/bl": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", From b011e84414c4ea5fd745b2a788713a33b9e0503c Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Fri, 7 Mar 2025 11:22:44 +0400 Subject: [PATCH 39/65] Fix logo URL (#2825) (#2831) (cherry picked from commit 922020165c3f3e57571682cce15404ddf22c88eb) # Conflicts: # docs/guide/images/logo-elastic-glyph-color.svg --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a86da2a24..3782448c0 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@

        - Elastic logo + Elastic logo

        # Elasticsearch Python Client From 607f3f619ab5cb8f25829f12f0d423d6a0cd9287 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 10 Mar 2025 14:46:24 +0100 Subject: [PATCH 40/65] Auto-generated API code (#2836) --- elasticsearch/_async/client/indices.py | 14 +- elasticsearch/_async/client/inference.py | 440 +++++++++++++++++++---- elasticsearch/_async/client/simulate.py | 10 +- elasticsearch/_sync/client/indices.py | 14 +- elasticsearch/_sync/client/inference.py | 440 +++++++++++++++++++---- elasticsearch/_sync/client/simulate.py | 10 +- elasticsearch/dsl/query.py | 22 ++ elasticsearch/dsl/types.py | 42 +++ 8 files changed, 816 insertions(+), 176 deletions(-) diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index 890f6903e..4082a68f7 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -3009,7 +3009,7 @@ async def migrate_to_data_stream( The write index for the alias becomes the write index for the stream.

        - ``_ + ``_ :param name: Name of the index alias to convert to a data stream. :param master_timeout: Period to wait for a connection to the master node. If @@ -3065,7 +3065,7 @@ async def modify_data_stream( Performs one or more data stream modification actions in a single atomic operation.

        - ``_ + ``_ :param actions: Actions to perform. """ @@ -3230,7 +3230,7 @@ async def promote_data_stream( This will affect the lifecycle management of the data stream and interfere with the data stream size and retention.

        - ``_ + ``_ :param name: The name of the data stream :param master_timeout: Period to wait for a connection to the master node. If @@ -3296,7 +3296,7 @@ async def put_alias( Adds a data stream or index to an alias.

        - ``_ + ``_ :param index: Comma-separated list of data streams or indices to add. Supports wildcards (`*`). Wildcard patterns that match both data streams and indices @@ -3403,7 +3403,7 @@ async def put_data_lifecycle( Update the data stream lifecycle of the specified data streams.

        - ``_ + ``_ :param name: Comma-separated list of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. @@ -3531,7 +3531,7 @@ async def put_index_template( If an entry already exists with the same key, then it is overwritten by the new definition.

        - ``_ + ``_ :param name: Index or template name :param allow_auto_create: This setting overrides the value of the `action.auto_create_index` @@ -5465,7 +5465,7 @@ async def update_aliases( Adds a data stream or index to an alias.

        - ``_ + ``_ :param actions: Actions to perform. :param master_timeout: Period to wait for a connection to the master node. If diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index 804e920b6..7c53afdfa 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -25,6 +25,74 @@ class InferenceClient(NamespacedClient): + @_rewrite_parameters( + body_fields=("input", "task_settings"), + ) + async def completion( + self, + *, + inference_id: str, + input: t.Optional[t.Union[str, t.Sequence[str]]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Any] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Perform completion inference on the service

        + + + ``_ + + :param inference_id: The inference Id + :param input: Inference input. Either a string or an array of strings. + :param task_settings: Optional task settings + :param timeout: Specifies the amount of time to wait for the inference request + to complete. + """ + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") + if input is None and body is None: + raise ValueError("Empty value passed for parameter 'input'") + __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)} + __path = f'/_inference/completion/{__path_parts["inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + if not __body: + if input is not None: + __body["input"] = input + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.completion", + path_parts=__path_parts, + ) + @_rewrite_parameters() async def delete( self, @@ -33,7 +101,13 @@ async def delete( task_type: t.Optional[ t.Union[ str, - t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"], + t.Literal[ + "chat_completion", + "completion", + "rerank", + "sparse_embedding", + "text_embedding", + ], ] ] = None, dry_run: t.Optional[bool] = None, @@ -102,7 +176,13 @@ async def get( task_type: t.Optional[ t.Union[ str, - t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"], + t.Literal[ + "chat_completion", + "completion", + "rerank", + "sparse_embedding", + "text_embedding", + ], ] ] = None, inference_id: t.Optional[str] = None, @@ -155,24 +235,188 @@ async def get( ) @_rewrite_parameters( - body_fields=("input", "query", "task_settings"), + body_name="inference_config", ) - async def inference( + async def put( self, *, inference_id: str, - input: t.Optional[t.Union[str, t.Sequence[str]]] = None, + inference_config: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, task_type: t.Optional[ t.Union[ str, - t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"], + t.Literal[ + "chat_completion", + "completion", + "rerank", + "sparse_embedding", + "text_embedding", + ], ] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create an inference endpoint. + When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        +

        IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. + For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. + However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

        + + + ``_ + + :param inference_id: The inference Id + :param inference_config: + :param task_type: The task type + """ + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") + if inference_config is None and body is None: + raise ValueError( + "Empty value passed for parameters 'inference_config' and 'body', one of them should be set." + ) + elif inference_config is not None and body is not None: + raise ValueError("Cannot set both 'inference_config' and 'body'") + __path_parts: t.Dict[str, str] + if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: + __path_parts = { + "task_type": _quote(task_type), + "inference_id": _quote(inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' + elif inference_id not in SKIP_IN_PATH: + __path_parts = {"inference_id": _quote(inference_id)} + __path = f'/_inference/{__path_parts["inference_id"]}' + else: + raise ValueError("Couldn't find a path for the given parameters") + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __body = inference_config if inference_config is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings"), + ) + async def put_watsonx( + self, + *, + task_type: t.Union[str, t.Literal["text_embedding"]], + watsonx_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["watsonxai"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create a Watsonx inference endpoint.

        +

        Creates an inference endpoint to perform an inference task with the watsonxai service. + You need an IBM Cloud Databases for Elasticsearch deployment to use the watsonxai inference service. + You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.

        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The task type. The only valid task type for the model to perform + is `text_embedding`. + :param watsonx_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `watsonxai`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `watsonxai` service. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if watsonx_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'watsonx_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "watsonx_inference_id": _quote(watsonx_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["watsonx_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_watsonx", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("input", "query", "task_settings"), + ) + async def rerank( + self, + *, + inference_id: str, + input: t.Optional[t.Union[str, t.Sequence[str]]] = None, query: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Any] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, @@ -180,14 +424,7 @@ async def inference( """ .. raw:: html -

        Perform inference on the service.

        -

        This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. - It returns a response with the results of the tasks. - The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.

        -
        -

        info - The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

        -
        +

        Perform rereanking inference on the service

        ``_ @@ -196,9 +433,7 @@ async def inference( :param input: The text on which you want to perform the inference task. It can be a single string or an array. > info > Inference endpoints for the `completion` task type currently only support a single string as input. - :param task_type: The type of inference task that the model performs. - :param query: The query input, which is required only for the `rerank` task. - It is not required for other tasks. + :param query: Query input. :param task_settings: Task settings for the individual inference request. These settings are specific to the task type you specified and override the task settings specified when initializing the service. @@ -208,18 +443,10 @@ async def inference( raise ValueError("Empty value passed for parameter 'inference_id'") if input is None and body is None: raise ValueError("Empty value passed for parameter 'input'") - __path_parts: t.Dict[str, str] - if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: - __path_parts = { - "task_type": _quote(task_type), - "inference_id": _quote(inference_id), - } - __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' - elif inference_id not in SKIP_IN_PATH: - __path_parts = {"inference_id": _quote(inference_id)} - __path = f'/_inference/{__path_parts["inference_id"]}' - else: - raise ValueError("Couldn't find a path for the given parameters") + if query is None and body is None: + raise ValueError("Empty value passed for parameter 'query'") + __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)} + __path = f'/_inference/rerank/{__path_parts["inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: @@ -250,71 +477,48 @@ async def inference( params=__query, headers=__headers, body=__body, - endpoint_id="inference.inference", + endpoint_id="inference.rerank", path_parts=__path_parts, ) @_rewrite_parameters( - body_name="inference_config", + body_fields=("input", "task_settings"), ) - async def put( + async def sparse_embedding( self, *, inference_id: str, - inference_config: t.Optional[t.Mapping[str, t.Any]] = None, - body: t.Optional[t.Mapping[str, t.Any]] = None, - task_type: t.Optional[ - t.Union[ - str, - t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"], - ] - ] = None, + input: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Any] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html -

        Create an inference endpoint. - When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        -

        IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. - For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. - However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

        +

        Perform sparse embedding inference on the service

        - ``_ + ``_ :param inference_id: The inference Id - :param inference_config: - :param task_type: The task type + :param input: Inference input. Either a string or an array of strings. + :param task_settings: Optional task settings + :param timeout: Specifies the amount of time to wait for the inference request + to complete. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") - if inference_config is None and body is None: - raise ValueError( - "Empty value passed for parameters 'inference_config' and 'body', one of them should be set." - ) - elif inference_config is not None and body is not None: - raise ValueError("Cannot set both 'inference_config' and 'body'") - __path_parts: t.Dict[str, str] - if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: - __path_parts = { - "task_type": _quote(task_type), - "inference_id": _quote(inference_id), - } - __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' - elif inference_id not in SKIP_IN_PATH: - __path_parts = {"inference_id": _quote(inference_id)} - __path = f'/_inference/{__path_parts["inference_id"]}' - else: - raise ValueError("Couldn't find a path for the given parameters") + if input is None and body is None: + raise ValueError("Empty value passed for parameter 'input'") + __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)} + __path = f'/_inference/sparse_embedding/{__path_parts["inference_id"]}' __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: @@ -323,15 +527,93 @@ async def put( __query["human"] = human if pretty is not None: __query["pretty"] = pretty - __body = inference_config if inference_config is not None else body - __headers = {"accept": "application/json", "content-type": "application/json"} + if timeout is not None: + __query["timeout"] = timeout + if not __body: + if input is not None: + __body["input"] = input + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] - "PUT", + "POST", __path, params=__query, headers=__headers, body=__body, - endpoint_id="inference.put", + endpoint_id="inference.sparse_embedding", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("input", "task_settings"), + ) + async def text_embedding( + self, + *, + inference_id: str, + input: t.Optional[t.Union[str, t.Sequence[str]]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Any] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Perform text embedding inference on the service

        + + + ``_ + + :param inference_id: The inference Id + :param input: Inference input. Either a string or an array of strings. + :param task_settings: Optional task settings + :param timeout: Specifies the amount of time to wait for the inference request + to complete. + """ + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") + if input is None and body is None: + raise ValueError("Empty value passed for parameter 'input'") + __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)} + __path = f'/_inference/text_embedding/{__path_parts["inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + if not __body: + if input is not None: + __body["input"] = input + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.text_embedding", path_parts=__path_parts, ) @@ -347,7 +629,13 @@ async def update( task_type: t.Optional[ t.Union[ str, - t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"], + t.Literal[ + "chat_completion", + "completion", + "rerank", + "sparse_embedding", + "text_embedding", + ], ] ] = None, error_trace: t.Optional[bool] = None, @@ -403,7 +691,7 @@ async def update( __body = inference_config if inference_config is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] - "POST", + "PUT", __path, params=__query, headers=__headers, diff --git a/elasticsearch/_async/client/simulate.py b/elasticsearch/_async/client/simulate.py index 73f71429f..5b2f11b2e 100644 --- a/elasticsearch/_async/client/simulate.py +++ b/elasticsearch/_async/client/simulate.py @@ -35,7 +35,7 @@ class SimulateClient(NamespacedClient): body_fields=( "docs", "component_template_substitutions", - "index_template_subtitutions", + "index_template_substitutions", "mapping_addition", "pipeline_substitutions", ), @@ -52,7 +52,7 @@ async def ingest( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, - index_template_subtitutions: t.Optional[ + index_template_substitutions: t.Optional[ t.Mapping[str, t.Mapping[str, t.Any]] ] = None, mapping_addition: t.Optional[t.Mapping[str, t.Any]] = None, @@ -90,7 +90,7 @@ async def ingest( an index argument. :param component_template_substitutions: A map of component template names to substitute component template definition objects. - :param index_template_subtitutions: A map of index template names to substitute + :param index_template_substitutions: A map of index template names to substitute index template definition objects. :param mapping_addition: :param pipeline: The pipeline to use as the default pipeline. This value can @@ -127,8 +127,8 @@ async def ingest( __body["component_template_substitutions"] = ( component_template_substitutions ) - if index_template_subtitutions is not None: - __body["index_template_subtitutions"] = index_template_subtitutions + if index_template_substitutions is not None: + __body["index_template_substitutions"] = index_template_substitutions if mapping_addition is not None: __body["mapping_addition"] = mapping_addition if pipeline_substitutions is not None: diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index ddcd59823..314e952b3 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -3009,7 +3009,7 @@ def migrate_to_data_stream( The write index for the alias becomes the write index for the stream.

        - ``_ + ``_ :param name: Name of the index alias to convert to a data stream. :param master_timeout: Period to wait for a connection to the master node. If @@ -3065,7 +3065,7 @@ def modify_data_stream( Performs one or more data stream modification actions in a single atomic operation.

        - ``_ + ``_ :param actions: Actions to perform. """ @@ -3230,7 +3230,7 @@ def promote_data_stream( This will affect the lifecycle management of the data stream and interfere with the data stream size and retention.

        - ``_ + ``_ :param name: The name of the data stream :param master_timeout: Period to wait for a connection to the master node. If @@ -3296,7 +3296,7 @@ def put_alias( Adds a data stream or index to an alias.

        - ``_ + ``_ :param index: Comma-separated list of data streams or indices to add. Supports wildcards (`*`). Wildcard patterns that match both data streams and indices @@ -3403,7 +3403,7 @@ def put_data_lifecycle( Update the data stream lifecycle of the specified data streams.

        - ``_ + ``_ :param name: Comma-separated list of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. @@ -3531,7 +3531,7 @@ def put_index_template( If an entry already exists with the same key, then it is overwritten by the new definition.

        - ``_ + ``_ :param name: Index or template name :param allow_auto_create: This setting overrides the value of the `action.auto_create_index` @@ -5465,7 +5465,7 @@ def update_aliases( Adds a data stream or index to an alias.

        - ``_ + ``_ :param actions: Actions to perform. :param master_timeout: Period to wait for a connection to the master node. If diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index 7dc4a8cc2..0c8caa0de 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -25,6 +25,74 @@ class InferenceClient(NamespacedClient): + @_rewrite_parameters( + body_fields=("input", "task_settings"), + ) + def completion( + self, + *, + inference_id: str, + input: t.Optional[t.Union[str, t.Sequence[str]]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Any] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Perform completion inference on the service

        + + + ``_ + + :param inference_id: The inference Id + :param input: Inference input. Either a string or an array of strings. + :param task_settings: Optional task settings + :param timeout: Specifies the amount of time to wait for the inference request + to complete. + """ + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") + if input is None and body is None: + raise ValueError("Empty value passed for parameter 'input'") + __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)} + __path = f'/_inference/completion/{__path_parts["inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + if not __body: + if input is not None: + __body["input"] = input + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.completion", + path_parts=__path_parts, + ) + @_rewrite_parameters() def delete( self, @@ -33,7 +101,13 @@ def delete( task_type: t.Optional[ t.Union[ str, - t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"], + t.Literal[ + "chat_completion", + "completion", + "rerank", + "sparse_embedding", + "text_embedding", + ], ] ] = None, dry_run: t.Optional[bool] = None, @@ -102,7 +176,13 @@ def get( task_type: t.Optional[ t.Union[ str, - t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"], + t.Literal[ + "chat_completion", + "completion", + "rerank", + "sparse_embedding", + "text_embedding", + ], ] ] = None, inference_id: t.Optional[str] = None, @@ -155,24 +235,188 @@ def get( ) @_rewrite_parameters( - body_fields=("input", "query", "task_settings"), + body_name="inference_config", ) - def inference( + def put( self, *, inference_id: str, - input: t.Optional[t.Union[str, t.Sequence[str]]] = None, + inference_config: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, task_type: t.Optional[ t.Union[ str, - t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"], + t.Literal[ + "chat_completion", + "completion", + "rerank", + "sparse_embedding", + "text_embedding", + ], ] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create an inference endpoint. + When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        +

        IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. + For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. + However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

        + + + ``_ + + :param inference_id: The inference Id + :param inference_config: + :param task_type: The task type + """ + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") + if inference_config is None and body is None: + raise ValueError( + "Empty value passed for parameters 'inference_config' and 'body', one of them should be set." + ) + elif inference_config is not None and body is not None: + raise ValueError("Cannot set both 'inference_config' and 'body'") + __path_parts: t.Dict[str, str] + if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: + __path_parts = { + "task_type": _quote(task_type), + "inference_id": _quote(inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' + elif inference_id not in SKIP_IN_PATH: + __path_parts = {"inference_id": _quote(inference_id)} + __path = f'/_inference/{__path_parts["inference_id"]}' + else: + raise ValueError("Couldn't find a path for the given parameters") + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __body = inference_config if inference_config is not None else body + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings"), + ) + def put_watsonx( + self, + *, + task_type: t.Union[str, t.Literal["text_embedding"]], + watsonx_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["watsonxai"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create a Watsonx inference endpoint.

        +

        Creates an inference endpoint to perform an inference task with the watsonxai service. + You need an IBM Cloud Databases for Elasticsearch deployment to use the watsonxai inference service. + You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.

        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The task type. The only valid task type for the model to perform + is `text_embedding`. + :param watsonx_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `watsonxai`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `watsonxai` service. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if watsonx_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'watsonx_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "watsonx_inference_id": _quote(watsonx_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["watsonx_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_watsonx", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("input", "query", "task_settings"), + ) + def rerank( + self, + *, + inference_id: str, + input: t.Optional[t.Union[str, t.Sequence[str]]] = None, query: t.Optional[str] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Any] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, @@ -180,14 +424,7 @@ def inference( """ .. raw:: html -

        Perform inference on the service.

        -

        This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. - It returns a response with the results of the tasks. - The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.

        -
        -

        info - The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

        -
        +

        Perform rereanking inference on the service

        ``_ @@ -196,9 +433,7 @@ def inference( :param input: The text on which you want to perform the inference task. It can be a single string or an array. > info > Inference endpoints for the `completion` task type currently only support a single string as input. - :param task_type: The type of inference task that the model performs. - :param query: The query input, which is required only for the `rerank` task. - It is not required for other tasks. + :param query: Query input. :param task_settings: Task settings for the individual inference request. These settings are specific to the task type you specified and override the task settings specified when initializing the service. @@ -208,18 +443,10 @@ def inference( raise ValueError("Empty value passed for parameter 'inference_id'") if input is None and body is None: raise ValueError("Empty value passed for parameter 'input'") - __path_parts: t.Dict[str, str] - if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: - __path_parts = { - "task_type": _quote(task_type), - "inference_id": _quote(inference_id), - } - __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' - elif inference_id not in SKIP_IN_PATH: - __path_parts = {"inference_id": _quote(inference_id)} - __path = f'/_inference/{__path_parts["inference_id"]}' - else: - raise ValueError("Couldn't find a path for the given parameters") + if query is None and body is None: + raise ValueError("Empty value passed for parameter 'query'") + __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)} + __path = f'/_inference/rerank/{__path_parts["inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: @@ -250,71 +477,48 @@ def inference( params=__query, headers=__headers, body=__body, - endpoint_id="inference.inference", + endpoint_id="inference.rerank", path_parts=__path_parts, ) @_rewrite_parameters( - body_name="inference_config", + body_fields=("input", "task_settings"), ) - def put( + def sparse_embedding( self, *, inference_id: str, - inference_config: t.Optional[t.Mapping[str, t.Any]] = None, - body: t.Optional[t.Mapping[str, t.Any]] = None, - task_type: t.Optional[ - t.Union[ - str, - t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"], - ] - ] = None, + input: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Any] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html -

        Create an inference endpoint. - When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        -

        IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. - For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. - However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

        +

        Perform sparse embedding inference on the service

        - ``_ + ``_ :param inference_id: The inference Id - :param inference_config: - :param task_type: The task type + :param input: Inference input. Either a string or an array of strings. + :param task_settings: Optional task settings + :param timeout: Specifies the amount of time to wait for the inference request + to complete. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") - if inference_config is None and body is None: - raise ValueError( - "Empty value passed for parameters 'inference_config' and 'body', one of them should be set." - ) - elif inference_config is not None and body is not None: - raise ValueError("Cannot set both 'inference_config' and 'body'") - __path_parts: t.Dict[str, str] - if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: - __path_parts = { - "task_type": _quote(task_type), - "inference_id": _quote(inference_id), - } - __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' - elif inference_id not in SKIP_IN_PATH: - __path_parts = {"inference_id": _quote(inference_id)} - __path = f'/_inference/{__path_parts["inference_id"]}' - else: - raise ValueError("Couldn't find a path for the given parameters") + if input is None and body is None: + raise ValueError("Empty value passed for parameter 'input'") + __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)} + __path = f'/_inference/sparse_embedding/{__path_parts["inference_id"]}' __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: @@ -323,15 +527,93 @@ def put( __query["human"] = human if pretty is not None: __query["pretty"] = pretty - __body = inference_config if inference_config is not None else body - __headers = {"accept": "application/json", "content-type": "application/json"} + if timeout is not None: + __query["timeout"] = timeout + if not __body: + if input is not None: + __body["input"] = input + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] - "PUT", + "POST", __path, params=__query, headers=__headers, body=__body, - endpoint_id="inference.put", + endpoint_id="inference.sparse_embedding", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("input", "task_settings"), + ) + def text_embedding( + self, + *, + inference_id: str, + input: t.Optional[t.Union[str, t.Sequence[str]]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Any] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Perform text embedding inference on the service

        + + + ``_ + + :param inference_id: The inference Id + :param input: Inference input. Either a string or an array of strings. + :param task_settings: Optional task settings + :param timeout: Specifies the amount of time to wait for the inference request + to complete. + """ + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") + if input is None and body is None: + raise ValueError("Empty value passed for parameter 'input'") + __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)} + __path = f'/_inference/text_embedding/{__path_parts["inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + if not __body: + if input is not None: + __body["input"] = input + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.text_embedding", path_parts=__path_parts, ) @@ -347,7 +629,13 @@ def update( task_type: t.Optional[ t.Union[ str, - t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"], + t.Literal[ + "chat_completion", + "completion", + "rerank", + "sparse_embedding", + "text_embedding", + ], ] ] = None, error_trace: t.Optional[bool] = None, @@ -403,7 +691,7 @@ def update( __body = inference_config if inference_config is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] - "POST", + "PUT", __path, params=__query, headers=__headers, diff --git a/elasticsearch/_sync/client/simulate.py b/elasticsearch/_sync/client/simulate.py index 36339f412..939754394 100644 --- a/elasticsearch/_sync/client/simulate.py +++ b/elasticsearch/_sync/client/simulate.py @@ -35,7 +35,7 @@ class SimulateClient(NamespacedClient): body_fields=( "docs", "component_template_substitutions", - "index_template_subtitutions", + "index_template_substitutions", "mapping_addition", "pipeline_substitutions", ), @@ -52,7 +52,7 @@ def ingest( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, - index_template_subtitutions: t.Optional[ + index_template_substitutions: t.Optional[ t.Mapping[str, t.Mapping[str, t.Any]] ] = None, mapping_addition: t.Optional[t.Mapping[str, t.Any]] = None, @@ -90,7 +90,7 @@ def ingest( an index argument. :param component_template_substitutions: A map of component template names to substitute component template definition objects. - :param index_template_subtitutions: A map of index template names to substitute + :param index_template_substitutions: A map of index template names to substitute index template definition objects. :param mapping_addition: :param pipeline: The pipeline to use as the default pipeline. This value can @@ -127,8 +127,8 @@ def ingest( __body["component_template_substitutions"] = ( component_template_substitutions ) - if index_template_subtitutions is not None: - __body["index_template_subtitutions"] = index_template_subtitutions + if index_template_substitutions is not None: + __body["index_template_substitutions"] = index_template_substitutions if mapping_addition is not None: __body["mapping_addition"] = mapping_addition if pipeline_substitutions is not None: diff --git a/elasticsearch/dsl/query.py b/elasticsearch/dsl/query.py index b5808959c..6e87f926c 100644 --- a/elasticsearch/dsl/query.py +++ b/elasticsearch/dsl/query.py @@ -795,6 +795,28 @@ def __init__( ) +class GeoGrid(Query): + """ + Matches `geo_point` and `geo_shape` values that intersect a grid cell + from a GeoGrid aggregation. + + :arg _field: The field to use in this query. + :arg _value: The query value for the field. + """ + + name = "geo_grid" + + def __init__( + self, + _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, + _value: Union["types.GeoGridQuery", Dict[str, Any], "DefaultType"] = DEFAULT, + **kwargs: Any, + ): + if _field is not DEFAULT: + kwargs[str(_field)] = _value + super().__init__(**kwargs) + + class GeoPolygon(Query): """ :arg _field: The field to use in this query. diff --git a/elasticsearch/dsl/types.py b/elasticsearch/dsl/types.py index d1c39003e..4ea6d8361 100644 --- a/elasticsearch/dsl/types.py +++ b/elasticsearch/dsl/types.py @@ -880,6 +880,48 @@ def __init__( super().__init__(kwargs) +class GeoGridQuery(AttrDict[Any]): + """ + :arg geogrid: + :arg geohash: + :arg geohex: + :arg boost: Floating point number used to decrease or increase the + relevance scores of the query. Boost values are relative to the + default value of 1.0. A boost value between 0 and 1.0 decreases + the relevance score. A value greater than 1.0 increases the + relevance score. Defaults to `1` if omitted. + :arg _name: + """ + + geogrid: Union[str, DefaultType] + geohash: Union[str, DefaultType] + geohex: Union[str, DefaultType] + boost: Union[float, DefaultType] + _name: Union[str, DefaultType] + + def __init__( + self, + *, + geogrid: Union[str, DefaultType] = DEFAULT, + geohash: Union[str, DefaultType] = DEFAULT, + geohex: Union[str, DefaultType] = DEFAULT, + boost: Union[float, DefaultType] = DEFAULT, + _name: Union[str, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if geogrid is not DEFAULT: + kwargs["geogrid"] = geogrid + if geohash is not DEFAULT: + kwargs["geohash"] = geohash + if geohex is not DEFAULT: + kwargs["geohex"] = geohex + if boost is not DEFAULT: + kwargs["boost"] = boost + if _name is not DEFAULT: + kwargs["_name"] = _name + super().__init__(kwargs) + + class GeoHashLocation(AttrDict[Any]): """ :arg geohash: (required) From 7b2bd18dab043b9ba299fdbc10b6731ce9a0a7d7 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 1 Apr 2025 15:32:15 +0400 Subject: [PATCH 41/65] Remove unneded nonlocal and global declarations (#2866) (#2870) (cherry picked from commit 83025a601ce112e6045abe1b5d84cfbaed722b3f) Co-authored-by: Miguel Grinberg --- elasticsearch/_async/client/_base.py | 1 - elasticsearch/_async/helpers.py | 1 - elasticsearch/_sync/client/_base.py | 1 - elasticsearch/_sync/client/utils.py | 3 --- test_elasticsearch/test_server/test_rest_api_spec.py | 4 +--- utils/build-dists.py | 1 - 6 files changed, 1 insertion(+), 10 deletions(-) diff --git a/elasticsearch/_async/client/_base.py b/elasticsearch/_async/client/_base.py index dd0b0f44e..cc090671c 100644 --- a/elasticsearch/_async/client/_base.py +++ b/elasticsearch/_async/client/_base.py @@ -298,7 +298,6 @@ async def _perform_request( def mimetype_header_to_compat(header: str) -> None: # Converts all parts of a Accept/Content-Type headers # from application/X -> application/vnd.elasticsearch+X - nonlocal request_headers mimetype = request_headers.get(header, None) if mimetype: request_headers[header] = _COMPAT_MIMETYPE_RE.sub( diff --git a/elasticsearch/_async/helpers.py b/elasticsearch/_async/helpers.py index 4c53f0bbe..e4d5e6bc5 100644 --- a/elasticsearch/_async/helpers.py +++ b/elasticsearch/_async/helpers.py @@ -136,7 +136,6 @@ def aiter(x: Union[Iterable[T], AsyncIterable[T]]) -> AsyncIterator[T]: return x.__aiter__() async def f() -> AsyncIterable[T]: - nonlocal x ix: Iterable[T] = x for item in ix: yield item diff --git a/elasticsearch/_sync/client/_base.py b/elasticsearch/_sync/client/_base.py index 8929b1db7..868b71073 100644 --- a/elasticsearch/_sync/client/_base.py +++ b/elasticsearch/_sync/client/_base.py @@ -298,7 +298,6 @@ def _perform_request( def mimetype_header_to_compat(header: str) -> None: # Converts all parts of a Accept/Content-Type headers # from application/X -> application/vnd.elasticsearch+X - nonlocal request_headers mimetype = request_headers.get(header, None) if mimetype: request_headers[header] = _COMPAT_MIMETYPE_RE.sub( diff --git a/elasticsearch/_sync/client/utils.py b/elasticsearch/_sync/client/utils.py index 9f957987c..51afe1c78 100644 --- a/elasticsearch/_sync/client/utils.py +++ b/elasticsearch/_sync/client/utils.py @@ -134,7 +134,6 @@ def client_node_configs( def apply_node_options(node_config: NodeConfig) -> NodeConfig: """Needs special handling of headers since .replace() wipes out existing headers""" - nonlocal node_options headers = node_config.headers.copy() # type: ignore[attr-defined] headers_to_add = node_options.pop("headers", ()) @@ -343,8 +342,6 @@ def _rewrite_parameters( def wrapper(api: F) -> F: @wraps(api) def wrapped(*args: Any, **kwargs: Any) -> Any: - nonlocal api, body_name, body_fields - # Let's give a nicer error message when users pass positional arguments. if len(args) >= 2: raise TypeError( diff --git a/test_elasticsearch/test_server/test_rest_api_spec.py b/test_elasticsearch/test_server/test_rest_api_spec.py index 0b602684a..a84f0822a 100644 --- a/test_elasticsearch/test_server/test_rest_api_spec.py +++ b/test_elasticsearch/test_server/test_rest_api_spec.py @@ -280,8 +280,6 @@ def run_catch(self, catch, exception): self.last_response = exception.body def run_skip(self, skip): - global IMPLEMENTED_FEATURES - if "features" in skip: features = skip["features"] if not isinstance(features, (tuple, list)): @@ -437,7 +435,7 @@ def _lookup(self, path): return value def _feature_enabled(self, name): - global XPACK_FEATURES, IMPLEMENTED_FEATURES + global XPACK_FEATURES if XPACK_FEATURES is None: try: xinfo = self.client.xpack.info() diff --git a/utils/build-dists.py b/utils/build-dists.py index ec8083103..d67d6053a 100644 --- a/utils/build-dists.py +++ b/utils/build-dists.py @@ -42,7 +42,6 @@ def set_tmp_dir(): def run(*argv, expect_exit_code=0): - global tmp_dir try: prev_dir = os.getcwd() if tmp_dir is None: From 02f3ebf718d963f754f3e4dbd400f7909132f3f7 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Tue, 1 Apr 2025 13:06:51 +0100 Subject: [PATCH 42/65] Auto-generated API code (#2846) Co-authored-by: Quentin Pradet --- elasticsearch/_async/client/__init__.py | 94 +- elasticsearch/_async/client/cluster.py | 4 +- elasticsearch/_async/client/esql.py | 17 +- elasticsearch/_async/client/indices.py | 51 +- elasticsearch/_async/client/inference.py | 1579 +++++++++++++++++++++- elasticsearch/_async/client/license.py | 2 +- elasticsearch/_async/client/ml.py | 12 +- elasticsearch/_async/client/security.py | 14 +- elasticsearch/_async/client/transform.py | 4 +- elasticsearch/_async/client/watcher.py | 26 +- elasticsearch/_async/client/xpack.py | 2 +- elasticsearch/_sync/client/__init__.py | 94 +- elasticsearch/_sync/client/cluster.py | 4 +- elasticsearch/_sync/client/esql.py | 17 +- elasticsearch/_sync/client/indices.py | 51 +- elasticsearch/_sync/client/inference.py | 1579 +++++++++++++++++++++- elasticsearch/_sync/client/license.py | 2 +- elasticsearch/_sync/client/ml.py | 12 +- elasticsearch/_sync/client/security.py | 14 +- elasticsearch/_sync/client/transform.py | 4 +- elasticsearch/_sync/client/watcher.py | 26 +- elasticsearch/_sync/client/xpack.py | 2 +- elasticsearch/dsl/field.py | 85 +- elasticsearch/dsl/types.py | 55 +- 24 files changed, 3583 insertions(+), 167 deletions(-) diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index d1ff463f1..9a3f8509c 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -1121,12 +1121,17 @@ async def create( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + if_primary_term: t.Optional[int] = None, + if_seq_no: t.Optional[int] = None, include_source_on_error: t.Optional[bool] = None, + op_type: t.Optional[t.Union[str, t.Literal["create", "index"]]] = None, pipeline: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, + require_alias: t.Optional[bool] = None, + require_data_stream: t.Optional[bool] = None, routing: t.Optional[str] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, version: t.Optional[int] = None, @@ -1204,8 +1209,18 @@ async def create( :param id: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format. :param document: + :param if_primary_term: Only perform the operation if the document has this primary + term. + :param if_seq_no: Only perform the operation if the document has this sequence + number. :param include_source_on_error: True or false if to include the document source in the error message in case of parsing errors. + :param op_type: Set to `create` to only index the document if it does not already + exist (put if absent). If a document with the specified `_id` already exists, + the indexing operation will fail. The behavior is the same as using the `/_create` + endpoint. If a document ID is specified, this paramater defaults to `index`. + Otherwise, it defaults to `create`. If the request targets a data stream, + an `op_type` of `create` is required. :param pipeline: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final @@ -1214,6 +1229,9 @@ async def create( :param refresh: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. + :param require_alias: If `true`, the destination must be an index alias. + :param require_data_stream: If `true`, the request's actions must target a data + stream (existing or to be created). :param routing: A custom value that is used to route operations to a specific shard. :param timeout: The period the request waits for the following operations: automatic @@ -1254,14 +1272,24 @@ async def create( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if if_primary_term is not None: + __query["if_primary_term"] = if_primary_term + if if_seq_no is not None: + __query["if_seq_no"] = if_seq_no if include_source_on_error is not None: __query["include_source_on_error"] = include_source_on_error + if op_type is not None: + __query["op_type"] = op_type if pipeline is not None: __query["pipeline"] = pipeline if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh + if require_alias is not None: + __query["require_alias"] = require_alias + if require_data_stream is not None: + __query["require_data_stream"] = require_data_stream if routing is not None: __query["routing"] = routing if timeout is not None: @@ -1553,7 +1581,7 @@ async def delete_by_query( If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. - :param from_: Starting offset (default: 0) + :param from_: Skips the specified number of documents. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param lenient: If `true`, format-based query failures (such as providing text @@ -3720,6 +3748,7 @@ async def open_point_in_time( human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, index_filter: t.Optional[t.Mapping[str, t.Any]] = None, + max_concurrent_shard_requests: t.Optional[int] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, routing: t.Optional[str] = None, @@ -3775,6 +3804,8 @@ async def open_point_in_time( a missing or closed index. :param index_filter: Filter indices if the provided query rewrites to `match_none` on every shard. + :param max_concurrent_shard_requests: Maximum number of concurrent shard requests + that each sub-search request executes per node. :param preference: The node or shard the operation should be performed on. By default, it is random. :param routing: A custom value that is used to route operations to a specific @@ -3802,6 +3833,8 @@ async def open_point_in_time( __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable + if max_concurrent_shard_requests is not None: + __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests if preference is not None: __query["preference"] = preference if pretty is not None: @@ -5973,7 +6006,20 @@ async def terms_enum( ) @_rewrite_parameters( - body_fields=("doc", "filter", "per_field_analyzer"), + body_fields=( + "doc", + "field_statistics", + "fields", + "filter", + "offsets", + "payloads", + "per_field_analyzer", + "positions", + "routing", + "term_statistics", + "version", + "version_type", + ), ) async def termvectors( self, @@ -6050,9 +6096,9 @@ async def termvectors( (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field). - :param fields: A comma-separated list or wildcard expressions of fields to include - in the statistics. It is used as the default list unless a specific field - list is provided in the `completion_fields` or `fielddata_fields` parameters. + :param fields: A list of fields to include in the statistics. It is used as the + default list unless a specific field list is provided in the `completion_fields` + or `fielddata_fields` parameters. :param filter: Filter terms based on their tf-idf scores. This could be useful in order find out a good characteristic vector of a document. This feature works in a similar manner to the second phase of the More Like This Query. @@ -6090,41 +6136,41 @@ async def termvectors( __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace - if field_statistics is not None: - __query["field_statistics"] = field_statistics - if fields is not None: - __query["fields"] = fields if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human - if offsets is not None: - __query["offsets"] = offsets - if payloads is not None: - __query["payloads"] = payloads - if positions is not None: - __query["positions"] = positions if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if realtime is not None: __query["realtime"] = realtime - if routing is not None: - __query["routing"] = routing - if term_statistics is not None: - __query["term_statistics"] = term_statistics - if version is not None: - __query["version"] = version - if version_type is not None: - __query["version_type"] = version_type if not __body: if doc is not None: __body["doc"] = doc + if field_statistics is not None: + __body["field_statistics"] = field_statistics + if fields is not None: + __body["fields"] = fields if filter is not None: __body["filter"] = filter + if offsets is not None: + __body["offsets"] = offsets + if payloads is not None: + __body["payloads"] = payloads if per_field_analyzer is not None: __body["per_field_analyzer"] = per_field_analyzer + if positions is not None: + __body["positions"] = positions + if routing is not None: + __body["routing"] = routing + if term_statistics is not None: + __body["term_statistics"] = term_statistics + if version is not None: + __body["version"] = version + if version_type is not None: + __body["version_type"] = version_type if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} @@ -6475,7 +6521,7 @@ async def update_by_query( wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - :param from_: Starting offset (default: 0) + :param from_: Skips the specified number of documents. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param lenient: If `true`, format-based query failures (such as providing text diff --git a/elasticsearch/_async/client/cluster.py b/elasticsearch/_async/client/cluster.py index 9acb82f65..ee5ab1218 100644 --- a/elasticsearch/_async/client/cluster.py +++ b/elasticsearch/_async/client/cluster.py @@ -185,7 +185,7 @@ async def delete_voting_config_exclusions( Remove master-eligible nodes from the voting configuration exclusion list.

        - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. :param wait_for_removal: Specifies whether to wait for all excluded nodes to @@ -680,7 +680,7 @@ async def post_voting_config_exclusions( They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes.

        - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. :param node_ids: A comma-separated list of the persistent ids of the nodes to diff --git a/elasticsearch/_async/client/esql.py b/elasticsearch/_async/client/esql.py index d36ac49ed..a1dc79cd1 100644 --- a/elasticsearch/_async/client/esql.py +++ b/elasticsearch/_async/client/esql.py @@ -35,6 +35,7 @@ class EsqlClient(NamespacedClient): "params", "profile", "tables", + "wait_for_completion_timeout", ), ignore_deprecated_options={"params"}, ) @@ -42,6 +43,7 @@ async def async_query( self, *, query: t.Optional[str] = None, + allow_partial_results: t.Optional[bool] = None, columnar: t.Optional[bool] = None, delimiter: t.Optional[str] = None, drop_null_columns: t.Optional[bool] = None, @@ -86,6 +88,9 @@ async def async_query( :param query: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. + :param allow_partial_results: If `true`, partial results will be returned if + there are shard failures, but the query can continue to execute on other + clusters and shards. :param columnar: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one @@ -134,6 +139,8 @@ async def async_query( __path = "/_query/async" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} + if allow_partial_results is not None: + __query["allow_partial_results"] = allow_partial_results if delimiter is not None: __query["delimiter"] = delimiter if drop_null_columns is not None: @@ -152,8 +159,6 @@ async def async_query( __query["keep_on_completion"] = keep_on_completion if pretty is not None: __query["pretty"] = pretty - if wait_for_completion_timeout is not None: - __query["wait_for_completion_timeout"] = wait_for_completion_timeout if not __body: if query is not None: __body["query"] = query @@ -171,6 +176,8 @@ async def async_query( __body["profile"] = profile if tables is not None: __body["tables"] = tables + if wait_for_completion_timeout is not None: + __body["wait_for_completion_timeout"] = wait_for_completion_timeout __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", @@ -378,6 +385,7 @@ async def query( self, *, query: t.Optional[str] = None, + allow_partial_results: t.Optional[bool] = None, columnar: t.Optional[bool] = None, delimiter: t.Optional[str] = None, drop_null_columns: t.Optional[bool] = None, @@ -416,6 +424,9 @@ async def query( :param query: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. + :param allow_partial_results: If `true`, partial results will be returned if + there are shard failures, but the query can continue to execute on other + clusters and shards. :param columnar: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one @@ -450,6 +461,8 @@ async def query( __path = "/_query" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} + if allow_partial_results is not None: + __query["allow_partial_results"] = allow_partial_results if delimiter is not None: __query["delimiter"] = delimiter if drop_null_columns is not None: diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index 4082a68f7..79cac1395 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -1621,7 +1621,9 @@ async def exists_index_template( name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, + local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> HeadApiResponse: @@ -1636,6 +1638,10 @@ async def exists_index_template( :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. + :param flat_settings: If true, returns settings in flat format. + :param local: If true, the request retrieves information from the local node + only. Defaults to false, which means information is retrieved from the master + node. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -1649,8 +1655,12 @@ async def exists_index_template( __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path + if flat_settings is not None: + __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human + if local is not None: + __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: @@ -1800,9 +1810,6 @@ async def field_usage_stats( human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, - wait_for_active_shards: t.Optional[ - t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] - ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html @@ -1832,9 +1839,6 @@ async def field_usage_stats( in the statistics. :param ignore_unavailable: If `true`, missing or closed indices are not included in the response. - :param wait_for_active_shards: The number of shard copies that must be active - before proceeding with the operation. Set to all or any positive integer - up to the total number of shards in the index (`number_of_replicas+1`). """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -1857,8 +1861,6 @@ async def field_usage_stats( __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty - if wait_for_active_shards is not None: - __query["wait_for_active_shards"] = wait_for_active_shards __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", @@ -3838,6 +3840,7 @@ async def put_settings( master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, preserve_existing: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + reopen: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -3880,6 +3883,9 @@ async def put_settings( no response is received before the timeout expires, the request fails and returns an error. :param preserve_existing: If `true`, existing index settings remain unchanged. + :param reopen: Whether to close and reopen the index to apply non-dynamic settings. + If set to `true` the indices to which the settings are being applied will + be closed temporarily and then reopened in order to apply the changes. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ @@ -3917,6 +3923,8 @@ async def put_settings( __query["preserve_existing"] = preserve_existing if pretty is not None: __query["pretty"] = pretty + if reopen is not None: + __query["reopen"] = reopen if timeout is not None: __query["timeout"] = timeout __body = settings if settings is not None else body @@ -3984,7 +3992,7 @@ async def put_template( :param name: The name of the template :param aliases: Aliases for the index. - :param cause: + :param cause: User defined reason for creating/updating the index template :param create: If true, this request cannot replace or update existing index templates. :param index_patterns: Array of wildcard expressions used to match the names @@ -4222,6 +4230,7 @@ async def reload_search_analyzers( human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + resource: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html @@ -4249,6 +4258,7 @@ async def reload_search_analyzers( that are open, closed or both. :param ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) + :param resource: Changed resource to reload analyzers from if applicable """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -4269,6 +4279,8 @@ async def reload_search_analyzers( __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty + if resource is not None: + __query["resource"] = resource __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", @@ -4505,6 +4517,7 @@ async def rollover( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + lazy: t.Optional[bool] = None, mappings: t.Optional[t.Mapping[str, t.Any]] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, @@ -4561,6 +4574,9 @@ async def rollover( conditions are satisfied. :param dry_run: If `true`, checks whether the current index satisfies the specified conditions but does not perform a rollover. + :param lazy: If set to true, the rollover action will only mark a data stream + to signal that it needs to be rolled over at the next write. Only allowed + on data streams. :param mappings: Mapping for fields in the index. If specified, this mapping can include field names, field data types, and mapping paramaters. :param master_timeout: Period to wait for a connection to the master node. If @@ -4595,6 +4611,8 @@ async def rollover( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if lazy is not None: + __query["lazy"] = lazy if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: @@ -4915,6 +4933,8 @@ async def simulate_index_template( self, *, name: str, + cause: t.Optional[str] = None, + create: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, @@ -4932,6 +4952,10 @@ async def simulate_index_template( ``_ :param name: Name of the index to simulate + :param cause: User defined reason for dry-run creating the new template for simulation + purposes + :param create: Whether the index template we optionally defined in the body should + only be dry-run added if new or can also replace an existing one :param include_defaults: If true, returns all relevant default configurations for the index template. :param master_timeout: Period to wait for a connection to the master node. If @@ -4943,6 +4967,10 @@ async def simulate_index_template( __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_index_template/_simulate_index/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} + if cause is not None: + __query["cause"] = cause + if create is not None: + __query["create"] = create if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: @@ -4985,6 +5013,7 @@ async def simulate_template( *, name: t.Optional[str] = None, allow_auto_create: t.Optional[bool] = None, + cause: t.Optional[str] = None, composed_of: t.Optional[t.Sequence[str]] = None, create: t.Optional[bool] = None, data_stream: t.Optional[t.Mapping[str, t.Any]] = None, @@ -5021,6 +5050,8 @@ async def simulate_template( via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. + :param cause: User defined reason for dry-run creating the new template for simulation + purposes :param composed_of: An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. @@ -5065,6 +5096,8 @@ async def simulate_template( __path = "/_index_template/_simulate" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} + if cause is not None: + __query["cause"] = cause if create is not None: __query["create"] = create if error_trace is not None: diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index 7c53afdfa..ca1217207 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -234,6 +234,67 @@ async def get( path_parts=__path_parts, ) + @_rewrite_parameters( + body_name="chat_completion_request", + ) + async def post_eis_chat_completion( + self, + *, + eis_inference_id: str, + chat_completion_request: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Perform a chat completion task through the Elastic Inference Service (EIS).

        +

        Perform a chat completion inference task with the elastic service.

        + + + ``_ + + :param eis_inference_id: The unique identifier of the inference endpoint. + :param chat_completion_request: + """ + if eis_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'eis_inference_id'") + if chat_completion_request is None and body is None: + raise ValueError( + "Empty value passed for parameters 'chat_completion_request' and 'body', one of them should be set." + ) + elif chat_completion_request is not None and body is not None: + raise ValueError("Cannot set both 'chat_completion_request' and 'body'") + __path_parts: t.Dict[str, str] = {"eis_inference_id": _quote(eis_inference_id)} + __path = ( + f'/_inference/chat_completion/{__path_parts["eis_inference_id"]}/_stream' + ) + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __body = ( + chat_completion_request if chat_completion_request is not None else body + ) + __headers = {"accept": "application/json", "content-type": "application/json"} + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.post_eis_chat_completion", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_name="inference_config", ) @@ -321,6 +382,1522 @@ async def put( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + async def put_alibabacloud( + self, + *, + task_type: t.Union[ + str, t.Literal["completion", "rerank", "space_embedding", "text_embedding"] + ], + alibabacloud_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["alibabacloud-ai-search"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create an AlibabaCloud AI Search inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the alibabacloud-ai-search service.

        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param alibabacloud_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `alibabacloud-ai-search`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `alibabacloud-ai-search` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if alibabacloud_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'alibabacloud_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "alibabacloud_inference_id": _quote(alibabacloud_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["alibabacloud_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_alibabacloud", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + async def put_amazonbedrock( + self, + *, + task_type: t.Union[str, t.Literal["completion", "text_embedding"]], + amazonbedrock_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["amazonbedrock"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create an Amazon Bedrock inference endpoint.

        +

        Creates an inference endpoint to perform an inference task with the amazonbedrock service.

        +
        +

        info + You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.

        +
        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param amazonbedrock_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `amazonbedrock`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `amazonbedrock` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if amazonbedrock_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'amazonbedrock_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "amazonbedrock_inference_id": _quote(amazonbedrock_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["amazonbedrock_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_amazonbedrock", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + async def put_anthropic( + self, + *, + task_type: t.Union[str, t.Literal["completion"]], + anthropic_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["anthropic"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create an Anthropic inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the anthropic service.

        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The task type. The only valid task type for the model to perform + is `completion`. + :param anthropic_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `anthropic`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `watsonxai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if anthropic_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'anthropic_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "anthropic_inference_id": _quote(anthropic_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["anthropic_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_anthropic", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + async def put_azureaistudio( + self, + *, + task_type: t.Union[str, t.Literal["completion", "text_embedding"]], + azureaistudio_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["azureaistudio"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create an Azure AI studio inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the azureaistudio service.

        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param azureaistudio_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `azureaistudio`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `openai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if azureaistudio_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'azureaistudio_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "azureaistudio_inference_id": _quote(azureaistudio_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["azureaistudio_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_azureaistudio", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + async def put_azureopenai( + self, + *, + task_type: t.Union[str, t.Literal["completion", "text_embedding"]], + azureopenai_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["azureopenai"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create an Azure OpenAI inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the azureopenai service.

        +

        The list of chat completion models that you can choose from in your Azure OpenAI deployment include:

        + +

        The list of embeddings models that you can choose from in your deployment can be found in the Azure models documentation.

        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + NOTE: The `chat_completion` task type only supports streaming and only through + the _stream API. + :param azureopenai_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `azureopenai`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `azureopenai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if azureopenai_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'azureopenai_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "azureopenai_inference_id": _quote(azureopenai_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["azureopenai_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_azureopenai", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + async def put_cohere( + self, + *, + task_type: t.Union[str, t.Literal["completion", "rerank", "text_embedding"]], + cohere_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["cohere"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create a Cohere inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the cohere service.

        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param cohere_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `cohere`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `cohere` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if cohere_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'cohere_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "cohere_inference_id": _quote(cohere_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["cohere_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_cohere", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings"), + ) + async def put_eis( + self, + *, + task_type: t.Union[str, t.Literal["chat_completion"]], + eis_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["elastic"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create an Elastic Inference Service (EIS) inference endpoint.

        +

        Create an inference endpoint to perform an inference task through the Elastic Inference Service (EIS).

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + NOTE: The `chat_completion` task type only supports streaming and only through + the _stream API. + :param eis_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `elastic`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `elastic` service. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if eis_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'eis_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "eis_inference_id": _quote(eis_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["eis_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_eis", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + async def put_elasticsearch( + self, + *, + task_type: t.Union[ + str, t.Literal["rerank", "sparse_embedding", "text_embedding"] + ], + elasticsearch_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["elasticsearch"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create an Elasticsearch inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the elasticsearch service.

        +
        +

        info + Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings.

        +
        +

        If you use the ELSER or the E5 model through the elasticsearch service, the API request will automatically download and deploy the model if it isn't downloaded yet.

        +
        +

        info + You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.

        +
        +

        After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param elasticsearch_inference_id: The unique identifier of the inference endpoint. + The must not match the `model_id`. + :param service: The type of service supported for the specified task type. In + this case, `elasticsearch`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `elasticsearch` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if elasticsearch_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'elasticsearch_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "elasticsearch_inference_id": _quote(elasticsearch_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["elasticsearch_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_elasticsearch", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings", "chunking_settings"), + ) + async def put_elser( + self, + *, + task_type: t.Union[str, t.Literal["sparse_embedding"]], + elser_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["elser"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create an ELSER inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the elser service. + You can also deploy ELSER by using the Elasticsearch inference integration.

        +
        +

        info + Your Elasticsearch deployment contains a preconfigured ELSER inference endpoint, you only need to create the enpoint using the API if you want to customize the settings.

        +
        +

        The API request will automatically download and deploy the ELSER model if it isn't already downloaded.

        +
        +

        info + You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.

        +
        +

        After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param elser_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `elser`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `elser` service. + :param chunking_settings: The chunking configuration object. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if elser_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'elser_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "elser_inference_id": _quote(elser_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["elser_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_elser", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings", "chunking_settings"), + ) + async def put_googleaistudio( + self, + *, + task_type: t.Union[str, t.Literal["completion", "text_embedding"]], + googleaistudio_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["googleaistudio"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create an Google AI Studio inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the googleaistudio service.

        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param googleaistudio_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `googleaistudio`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `googleaistudio` service. + :param chunking_settings: The chunking configuration object. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if googleaistudio_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'googleaistudio_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "googleaistudio_inference_id": _quote(googleaistudio_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["googleaistudio_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_googleaistudio", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + async def put_googlevertexai( + self, + *, + task_type: t.Union[str, t.Literal["rerank", "text_embedding"]], + googlevertexai_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["googlevertexai"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create a Google Vertex AI inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the googlevertexai service.

        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param googlevertexai_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `googlevertexai`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `googlevertexai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if googlevertexai_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'googlevertexai_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "googlevertexai_inference_id": _quote(googlevertexai_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["googlevertexai_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_googlevertexai", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings", "chunking_settings"), + ) + async def put_hugging_face( + self, + *, + task_type: t.Union[str, t.Literal["text_embedding"]], + huggingface_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["hugging_face"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create a Hugging Face inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the hugging_face service.

        +

        You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. + Select the model you want to use on the new endpoint creation page (for example intfloat/e5-small-v2), then select the sentence embeddings task under the advanced configuration section. + Create the endpoint and copy the URL after the endpoint initialization has been finished.

        +

        The following models are recommended for the Hugging Face service:

        +
          +
        • all-MiniLM-L6-v2
        • +
        • all-MiniLM-L12-v2
        • +
        • all-mpnet-base-v2
        • +
        • e5-base-v2
        • +
        • e5-small-v2
        • +
        • multilingual-e5-base
        • +
        • multilingual-e5-small
        • +
        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param huggingface_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `hugging_face`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `hugging_face` service. + :param chunking_settings: The chunking configuration object. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if huggingface_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'huggingface_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "huggingface_inference_id": _quote(huggingface_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["huggingface_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_hugging_face", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + async def put_jinaai( + self, + *, + task_type: t.Union[str, t.Literal["rerank", "text_embedding"]], + jinaai_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["jinaai"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create an JinaAI inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the jinaai service.

        +

        To review the available rerank models, refer to https://jina.ai/reranker. + To review the available text_embedding models, refer to the https://jina.ai/embeddings/.

        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param jinaai_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `jinaai`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `jinaai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if jinaai_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'jinaai_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "jinaai_inference_id": _quote(jinaai_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["jinaai_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_jinaai", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings", "chunking_settings"), + ) + async def put_mistral( + self, + *, + task_type: t.Union[str, t.Literal["text_embedding"]], + mistral_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["mistral"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create a Mistral inference endpoint.

        +

        Creates an inference endpoint to perform an inference task with the mistral service.

        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The task type. The only valid task type for the model to perform + is `text_embedding`. + :param mistral_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `mistral`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `mistral` service. + :param chunking_settings: The chunking configuration object. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if mistral_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'mistral_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "mistral_inference_id": _quote(mistral_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["mistral_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_mistral", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + async def put_openai( + self, + *, + task_type: t.Union[ + str, t.Literal["chat_completion", "completion", "text_embedding"] + ], + openai_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["openai"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create an OpenAI inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the openai service or openai compatible APIs.

        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + NOTE: The `chat_completion` task type only supports streaming and only through + the _stream API. + :param openai_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `openai`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `openai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if openai_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'openai_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "openai_inference_id": _quote(openai_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["openai_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_openai", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + async def put_voyageai( + self, + *, + task_type: t.Union[str, t.Literal["rerank", "text_embedding"]], + voyageai_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["voyageai"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create a VoyageAI inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the voyageai service.

        +

        Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param voyageai_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `voyageai`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `voyageai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if voyageai_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'voyageai_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "voyageai_inference_id": _quote(voyageai_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["voyageai_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_voyageai", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=("service", "service_settings"), ) @@ -341,7 +1918,7 @@ async def put_watsonx( .. raw:: html

        Create a Watsonx inference endpoint.

        -

        Creates an inference endpoint to perform an inference task with the watsonxai service. +

        Create an inference endpoint to perform an inference task with the watsonxai service. You need an IBM Cloud Databases for Elasticsearch deployment to use the watsonxai inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. diff --git a/elasticsearch/_async/client/license.py b/elasticsearch/_async/client/license.py index aac236243..61d5865da 100644 --- a/elasticsearch/_async/client/license.py +++ b/elasticsearch/_async/client/license.py @@ -237,7 +237,7 @@ async def post( If the operator privileges feature is enabled, only operator users can use this API.

        - ``_ + ``_ :param acknowledge: Specifies whether you acknowledge the license changes. :param license: diff --git a/elasticsearch/_async/client/ml.py b/elasticsearch/_async/client/ml.py index d39c84f73..f6d8142c7 100644 --- a/elasticsearch/_async/client/ml.py +++ b/elasticsearch/_async/client/ml.py @@ -3604,11 +3604,11 @@ async def put_datafeed( :param ignore_unavailable: If true, unavailable indices (missing or closed) are ignored. :param indexes: An array of index names. Wildcards are supported. If any of the - indices are in remote clusters, the machine learning nodes must have the - `remote_cluster_client` role. + indices are in remote clusters, the master nodes and the machine learning + nodes must have the `remote_cluster_client` role. :param indices: An array of index names. Wildcards are supported. If any of the - indices are in remote clusters, the machine learning nodes must have the - `remote_cluster_client` role. + indices are in remote clusters, the master nodes and the machine learning + nodes must have the `remote_cluster_client` role. :param indices_options: Specifies index expansion options that are used during search :param job_id: Identifier for the anomaly detection job. @@ -5004,7 +5004,7 @@ async def update_data_frame_analytics(

        Update a data frame analytics job.

        - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -5577,7 +5577,7 @@ async def update_trained_model_deployment(

        Update a trained model deployment.

        - ``_ + ``_ :param model_id: The unique identifier of the trained model. Currently, only PyTorch models are supported. diff --git a/elasticsearch/_async/client/security.py b/elasticsearch/_async/client/security.py index 2fb66dddb..a2f5ac605 100644 --- a/elasticsearch/_async/client/security.py +++ b/elasticsearch/_async/client/security.py @@ -2867,12 +2867,12 @@ async def oidc_authenticate( ) @_rewrite_parameters( - body_fields=("access_token", "refresh_token"), + body_fields=("token", "refresh_token"), ) async def oidc_logout( self, *, - access_token: t.Optional[str] = None, + token: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, @@ -2892,11 +2892,11 @@ async def oidc_logout( ``_ - :param access_token: The access token to be invalidated. + :param token: The access token to be invalidated. :param refresh_token: The refresh token to be invalidated. """ - if access_token is None and body is None: - raise ValueError("Empty value passed for parameter 'access_token'") + if token is None and body is None: + raise ValueError("Empty value passed for parameter 'token'") __path_parts: t.Dict[str, str] = {} __path = "/_security/oidc/logout" __query: t.Dict[str, t.Any] = {} @@ -2910,8 +2910,8 @@ async def oidc_logout( if pretty is not None: __query["pretty"] = pretty if not __body: - if access_token is not None: - __body["access_token"] = access_token + if token is not None: + __body["token"] = token if refresh_token is not None: __body["refresh_token"] = refresh_token __headers = {"accept": "application/json", "content-type": "application/json"} diff --git a/elasticsearch/_async/client/transform.py b/elasticsearch/_async/client/transform.py index 7b134118b..56ebaf10b 100644 --- a/elasticsearch/_async/client/transform.py +++ b/elasticsearch/_async/client/transform.py @@ -795,7 +795,7 @@ async def update_transform( time of update and runs with those privileges.

        - ``_ + ``_ :param transform_id: Identifier for the transform. :param defer_validation: When true, deferrable validations are not run. This @@ -890,7 +890,7 @@ async def upgrade_transforms( You may want to perform a recent cluster backup prior to the upgrade.

        - ``_ + ``_ :param dry_run: When true, the request checks for updates but does not run them. :param timeout: Period to wait for a response. If no response is received before diff --git a/elasticsearch/_async/client/watcher.py b/elasticsearch/_async/client/watcher.py index 7fe3d0a4b..be2f8265e 100644 --- a/elasticsearch/_async/client/watcher.py +++ b/elasticsearch/_async/client/watcher.py @@ -48,7 +48,7 @@ async def ack_watch( This happens when the condition of the watch is not met (the condition evaluates to false).

        - ``_ + ``_ :param watch_id: The watch identifier. :param action_id: A comma-separated list of the action identifiers to acknowledge. @@ -104,7 +104,7 @@ async def activate_watch( A watch can be either active or inactive.

        - ``_ + ``_ :param watch_id: The watch identifier. """ @@ -148,7 +148,7 @@ async def deactivate_watch( A watch can be either active or inactive.

        - ``_ + ``_ :param watch_id: The watch identifier. """ @@ -196,7 +196,7 @@ async def delete_watch( When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the .watches index.

        - ``_ + ``_ :param id: The watch identifier. """ @@ -277,7 +277,7 @@ async def execute_watch(

        When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch.

        - ``_ + ``_ :param id: The watch identifier. :param action_modes: Determines how to handle the watch actions as part of the @@ -365,7 +365,7 @@ async def get_settings( Only a subset of settings are shown, for example index.auto_expand_replicas and index.number_of_replicas.

        - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails @@ -410,7 +410,7 @@ async def get_watch(

        Get a watch.

        - ``_ + ``_ :param id: The watch identifier. """ @@ -485,7 +485,7 @@ async def put_watch( If the user is able to read index a, but not index b, the same will apply when the watch runs.

        - ``_ + ``_ :param id: The identifier for the watch. :param actions: The list of actions that will be run if the condition matches. @@ -598,7 +598,7 @@ async def query_watches(

        Note that only the _id and metadata.* fields are queryable or sortable.

        - ``_ + ``_ :param from_: The offset from the first result to fetch. It must be non-negative. :param query: A query that filters the watches to be returned. @@ -673,7 +673,7 @@ async def start( Start the Watcher service if it is not already running.

        - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. """ @@ -739,7 +739,7 @@ async def stats( You retrieve more metrics by using the metric parameter.

        - ``_ + ``_ :param metric: Defines which additional metrics are included in the response. :param emit_stacktraces: Defines whether stack traces are generated for each @@ -790,7 +790,7 @@ async def stop( Stop the Watcher service if it is running.

        - ``_ + ``_ :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns @@ -848,7 +848,7 @@ async def update_settings( This includes index.auto_expand_replicas and index.number_of_replicas.

        - ``_ + ``_ :param index_auto_expand_replicas: :param index_number_of_replicas: diff --git a/elasticsearch/_async/client/xpack.py b/elasticsearch/_async/client/xpack.py index 36e87da61..2fc8f27bf 100644 --- a/elasticsearch/_async/client/xpack.py +++ b/elasticsearch/_async/client/xpack.py @@ -103,7 +103,7 @@ async def usage( The API also provides some usage statistics.

        - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index 705a3914d..b8d4b3297 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -1119,12 +1119,17 @@ def create( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + if_primary_term: t.Optional[int] = None, + if_seq_no: t.Optional[int] = None, include_source_on_error: t.Optional[bool] = None, + op_type: t.Optional[t.Union[str, t.Literal["create", "index"]]] = None, pipeline: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, + require_alias: t.Optional[bool] = None, + require_data_stream: t.Optional[bool] = None, routing: t.Optional[str] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, version: t.Optional[int] = None, @@ -1202,8 +1207,18 @@ def create( :param id: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format. :param document: + :param if_primary_term: Only perform the operation if the document has this primary + term. + :param if_seq_no: Only perform the operation if the document has this sequence + number. :param include_source_on_error: True or false if to include the document source in the error message in case of parsing errors. + :param op_type: Set to `create` to only index the document if it does not already + exist (put if absent). If a document with the specified `_id` already exists, + the indexing operation will fail. The behavior is the same as using the `/_create` + endpoint. If a document ID is specified, this paramater defaults to `index`. + Otherwise, it defaults to `create`. If the request targets a data stream, + an `op_type` of `create` is required. :param pipeline: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final @@ -1212,6 +1227,9 @@ def create( :param refresh: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. + :param require_alias: If `true`, the destination must be an index alias. + :param require_data_stream: If `true`, the request's actions must target a data + stream (existing or to be created). :param routing: A custom value that is used to route operations to a specific shard. :param timeout: The period the request waits for the following operations: automatic @@ -1252,14 +1270,24 @@ def create( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if if_primary_term is not None: + __query["if_primary_term"] = if_primary_term + if if_seq_no is not None: + __query["if_seq_no"] = if_seq_no if include_source_on_error is not None: __query["include_source_on_error"] = include_source_on_error + if op_type is not None: + __query["op_type"] = op_type if pipeline is not None: __query["pipeline"] = pipeline if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh + if require_alias is not None: + __query["require_alias"] = require_alias + if require_data_stream is not None: + __query["require_data_stream"] = require_data_stream if routing is not None: __query["routing"] = routing if timeout is not None: @@ -1551,7 +1579,7 @@ def delete_by_query( If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. - :param from_: Starting offset (default: 0) + :param from_: Skips the specified number of documents. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param lenient: If `true`, format-based query failures (such as providing text @@ -3718,6 +3746,7 @@ def open_point_in_time( human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, index_filter: t.Optional[t.Mapping[str, t.Any]] = None, + max_concurrent_shard_requests: t.Optional[int] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, routing: t.Optional[str] = None, @@ -3773,6 +3802,8 @@ def open_point_in_time( a missing or closed index. :param index_filter: Filter indices if the provided query rewrites to `match_none` on every shard. + :param max_concurrent_shard_requests: Maximum number of concurrent shard requests + that each sub-search request executes per node. :param preference: The node or shard the operation should be performed on. By default, it is random. :param routing: A custom value that is used to route operations to a specific @@ -3800,6 +3831,8 @@ def open_point_in_time( __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable + if max_concurrent_shard_requests is not None: + __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests if preference is not None: __query["preference"] = preference if pretty is not None: @@ -5971,7 +6004,20 @@ def terms_enum( ) @_rewrite_parameters( - body_fields=("doc", "filter", "per_field_analyzer"), + body_fields=( + "doc", + "field_statistics", + "fields", + "filter", + "offsets", + "payloads", + "per_field_analyzer", + "positions", + "routing", + "term_statistics", + "version", + "version_type", + ), ) def termvectors( self, @@ -6048,9 +6094,9 @@ def termvectors( (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field). - :param fields: A comma-separated list or wildcard expressions of fields to include - in the statistics. It is used as the default list unless a specific field - list is provided in the `completion_fields` or `fielddata_fields` parameters. + :param fields: A list of fields to include in the statistics. It is used as the + default list unless a specific field list is provided in the `completion_fields` + or `fielddata_fields` parameters. :param filter: Filter terms based on their tf-idf scores. This could be useful in order find out a good characteristic vector of a document. This feature works in a similar manner to the second phase of the More Like This Query. @@ -6088,41 +6134,41 @@ def termvectors( __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace - if field_statistics is not None: - __query["field_statistics"] = field_statistics - if fields is not None: - __query["fields"] = fields if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human - if offsets is not None: - __query["offsets"] = offsets - if payloads is not None: - __query["payloads"] = payloads - if positions is not None: - __query["positions"] = positions if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if realtime is not None: __query["realtime"] = realtime - if routing is not None: - __query["routing"] = routing - if term_statistics is not None: - __query["term_statistics"] = term_statistics - if version is not None: - __query["version"] = version - if version_type is not None: - __query["version_type"] = version_type if not __body: if doc is not None: __body["doc"] = doc + if field_statistics is not None: + __body["field_statistics"] = field_statistics + if fields is not None: + __body["fields"] = fields if filter is not None: __body["filter"] = filter + if offsets is not None: + __body["offsets"] = offsets + if payloads is not None: + __body["payloads"] = payloads if per_field_analyzer is not None: __body["per_field_analyzer"] = per_field_analyzer + if positions is not None: + __body["positions"] = positions + if routing is not None: + __body["routing"] = routing + if term_statistics is not None: + __body["term_statistics"] = term_statistics + if version is not None: + __body["version"] = version + if version_type is not None: + __body["version_type"] = version_type if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} @@ -6473,7 +6519,7 @@ def update_by_query( wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - :param from_: Starting offset (default: 0) + :param from_: Skips the specified number of documents. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param lenient: If `true`, format-based query failures (such as providing text diff --git a/elasticsearch/_sync/client/cluster.py b/elasticsearch/_sync/client/cluster.py index 54e83a132..a0652ff92 100644 --- a/elasticsearch/_sync/client/cluster.py +++ b/elasticsearch/_sync/client/cluster.py @@ -185,7 +185,7 @@ def delete_voting_config_exclusions( Remove master-eligible nodes from the voting configuration exclusion list.

        - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. :param wait_for_removal: Specifies whether to wait for all excluded nodes to @@ -680,7 +680,7 @@ def post_voting_config_exclusions( They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes.

        - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. :param node_ids: A comma-separated list of the persistent ids of the nodes to diff --git a/elasticsearch/_sync/client/esql.py b/elasticsearch/_sync/client/esql.py index 25baccf1f..410276d83 100644 --- a/elasticsearch/_sync/client/esql.py +++ b/elasticsearch/_sync/client/esql.py @@ -35,6 +35,7 @@ class EsqlClient(NamespacedClient): "params", "profile", "tables", + "wait_for_completion_timeout", ), ignore_deprecated_options={"params"}, ) @@ -42,6 +43,7 @@ def async_query( self, *, query: t.Optional[str] = None, + allow_partial_results: t.Optional[bool] = None, columnar: t.Optional[bool] = None, delimiter: t.Optional[str] = None, drop_null_columns: t.Optional[bool] = None, @@ -86,6 +88,9 @@ def async_query( :param query: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. + :param allow_partial_results: If `true`, partial results will be returned if + there are shard failures, but the query can continue to execute on other + clusters and shards. :param columnar: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one @@ -134,6 +139,8 @@ def async_query( __path = "/_query/async" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} + if allow_partial_results is not None: + __query["allow_partial_results"] = allow_partial_results if delimiter is not None: __query["delimiter"] = delimiter if drop_null_columns is not None: @@ -152,8 +159,6 @@ def async_query( __query["keep_on_completion"] = keep_on_completion if pretty is not None: __query["pretty"] = pretty - if wait_for_completion_timeout is not None: - __query["wait_for_completion_timeout"] = wait_for_completion_timeout if not __body: if query is not None: __body["query"] = query @@ -171,6 +176,8 @@ def async_query( __body["profile"] = profile if tables is not None: __body["tables"] = tables + if wait_for_completion_timeout is not None: + __body["wait_for_completion_timeout"] = wait_for_completion_timeout __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", @@ -378,6 +385,7 @@ def query( self, *, query: t.Optional[str] = None, + allow_partial_results: t.Optional[bool] = None, columnar: t.Optional[bool] = None, delimiter: t.Optional[str] = None, drop_null_columns: t.Optional[bool] = None, @@ -416,6 +424,9 @@ def query( :param query: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. + :param allow_partial_results: If `true`, partial results will be returned if + there are shard failures, but the query can continue to execute on other + clusters and shards. :param columnar: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one @@ -450,6 +461,8 @@ def query( __path = "/_query" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} + if allow_partial_results is not None: + __query["allow_partial_results"] = allow_partial_results if delimiter is not None: __query["delimiter"] = delimiter if drop_null_columns is not None: diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index 314e952b3..a25c3bc0b 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -1621,7 +1621,9 @@ def exists_index_template( name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, + local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> HeadApiResponse: @@ -1636,6 +1638,10 @@ def exists_index_template( :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. + :param flat_settings: If true, returns settings in flat format. + :param local: If true, the request retrieves information from the local node + only. Defaults to false, which means information is retrieved from the master + node. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -1649,8 +1655,12 @@ def exists_index_template( __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path + if flat_settings is not None: + __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human + if local is not None: + __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: @@ -1800,9 +1810,6 @@ def field_usage_stats( human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, - wait_for_active_shards: t.Optional[ - t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] - ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html @@ -1832,9 +1839,6 @@ def field_usage_stats( in the statistics. :param ignore_unavailable: If `true`, missing or closed indices are not included in the response. - :param wait_for_active_shards: The number of shard copies that must be active - before proceeding with the operation. Set to all or any positive integer - up to the total number of shards in the index (`number_of_replicas+1`). """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -1857,8 +1861,6 @@ def field_usage_stats( __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty - if wait_for_active_shards is not None: - __query["wait_for_active_shards"] = wait_for_active_shards __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", @@ -3838,6 +3840,7 @@ def put_settings( master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, preserve_existing: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + reopen: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -3880,6 +3883,9 @@ def put_settings( no response is received before the timeout expires, the request fails and returns an error. :param preserve_existing: If `true`, existing index settings remain unchanged. + :param reopen: Whether to close and reopen the index to apply non-dynamic settings. + If set to `true` the indices to which the settings are being applied will + be closed temporarily and then reopened in order to apply the changes. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ @@ -3917,6 +3923,8 @@ def put_settings( __query["preserve_existing"] = preserve_existing if pretty is not None: __query["pretty"] = pretty + if reopen is not None: + __query["reopen"] = reopen if timeout is not None: __query["timeout"] = timeout __body = settings if settings is not None else body @@ -3984,7 +3992,7 @@ def put_template( :param name: The name of the template :param aliases: Aliases for the index. - :param cause: + :param cause: User defined reason for creating/updating the index template :param create: If true, this request cannot replace or update existing index templates. :param index_patterns: Array of wildcard expressions used to match the names @@ -4222,6 +4230,7 @@ def reload_search_analyzers( human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + resource: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html @@ -4249,6 +4258,7 @@ def reload_search_analyzers( that are open, closed or both. :param ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) + :param resource: Changed resource to reload analyzers from if applicable """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") @@ -4269,6 +4279,8 @@ def reload_search_analyzers( __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty + if resource is not None: + __query["resource"] = resource __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", @@ -4505,6 +4517,7 @@ def rollover( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + lazy: t.Optional[bool] = None, mappings: t.Optional[t.Mapping[str, t.Any]] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, @@ -4561,6 +4574,9 @@ def rollover( conditions are satisfied. :param dry_run: If `true`, checks whether the current index satisfies the specified conditions but does not perform a rollover. + :param lazy: If set to true, the rollover action will only mark a data stream + to signal that it needs to be rolled over at the next write. Only allowed + on data streams. :param mappings: Mapping for fields in the index. If specified, this mapping can include field names, field data types, and mapping paramaters. :param master_timeout: Period to wait for a connection to the master node. If @@ -4595,6 +4611,8 @@ def rollover( __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if lazy is not None: + __query["lazy"] = lazy if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: @@ -4915,6 +4933,8 @@ def simulate_index_template( self, *, name: str, + cause: t.Optional[str] = None, + create: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, @@ -4932,6 +4952,10 @@ def simulate_index_template( ``_ :param name: Name of the index to simulate + :param cause: User defined reason for dry-run creating the new template for simulation + purposes + :param create: Whether the index template we optionally defined in the body should + only be dry-run added if new or can also replace an existing one :param include_defaults: If true, returns all relevant default configurations for the index template. :param master_timeout: Period to wait for a connection to the master node. If @@ -4943,6 +4967,10 @@ def simulate_index_template( __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_index_template/_simulate_index/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} + if cause is not None: + __query["cause"] = cause + if create is not None: + __query["create"] = create if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: @@ -4985,6 +5013,7 @@ def simulate_template( *, name: t.Optional[str] = None, allow_auto_create: t.Optional[bool] = None, + cause: t.Optional[str] = None, composed_of: t.Optional[t.Sequence[str]] = None, create: t.Optional[bool] = None, data_stream: t.Optional[t.Mapping[str, t.Any]] = None, @@ -5021,6 +5050,8 @@ def simulate_template( via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. + :param cause: User defined reason for dry-run creating the new template for simulation + purposes :param composed_of: An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. @@ -5065,6 +5096,8 @@ def simulate_template( __path = "/_index_template/_simulate" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} + if cause is not None: + __query["cause"] = cause if create is not None: __query["create"] = create if error_trace is not None: diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index 0c8caa0de..15c996cd7 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -234,6 +234,67 @@ def get( path_parts=__path_parts, ) + @_rewrite_parameters( + body_name="chat_completion_request", + ) + def post_eis_chat_completion( + self, + *, + eis_inference_id: str, + chat_completion_request: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Perform a chat completion task through the Elastic Inference Service (EIS).

        +

        Perform a chat completion inference task with the elastic service.

        + + + ``_ + + :param eis_inference_id: The unique identifier of the inference endpoint. + :param chat_completion_request: + """ + if eis_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'eis_inference_id'") + if chat_completion_request is None and body is None: + raise ValueError( + "Empty value passed for parameters 'chat_completion_request' and 'body', one of them should be set." + ) + elif chat_completion_request is not None and body is not None: + raise ValueError("Cannot set both 'chat_completion_request' and 'body'") + __path_parts: t.Dict[str, str] = {"eis_inference_id": _quote(eis_inference_id)} + __path = ( + f'/_inference/chat_completion/{__path_parts["eis_inference_id"]}/_stream' + ) + __query: t.Dict[str, t.Any] = {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + __body = ( + chat_completion_request if chat_completion_request is not None else body + ) + __headers = {"accept": "application/json", "content-type": "application/json"} + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.post_eis_chat_completion", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_name="inference_config", ) @@ -321,6 +382,1522 @@ def put( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + def put_alibabacloud( + self, + *, + task_type: t.Union[ + str, t.Literal["completion", "rerank", "space_embedding", "text_embedding"] + ], + alibabacloud_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["alibabacloud-ai-search"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create an AlibabaCloud AI Search inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the alibabacloud-ai-search service.

        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param alibabacloud_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `alibabacloud-ai-search`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `alibabacloud-ai-search` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if alibabacloud_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'alibabacloud_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "alibabacloud_inference_id": _quote(alibabacloud_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["alibabacloud_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_alibabacloud", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + def put_amazonbedrock( + self, + *, + task_type: t.Union[str, t.Literal["completion", "text_embedding"]], + amazonbedrock_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["amazonbedrock"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create an Amazon Bedrock inference endpoint.

        +

        Creates an inference endpoint to perform an inference task with the amazonbedrock service.

        +
        +

        info + You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.

        +
        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param amazonbedrock_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `amazonbedrock`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `amazonbedrock` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if amazonbedrock_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'amazonbedrock_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "amazonbedrock_inference_id": _quote(amazonbedrock_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["amazonbedrock_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_amazonbedrock", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + def put_anthropic( + self, + *, + task_type: t.Union[str, t.Literal["completion"]], + anthropic_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["anthropic"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create an Anthropic inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the anthropic service.

        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The task type. The only valid task type for the model to perform + is `completion`. + :param anthropic_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `anthropic`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `watsonxai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if anthropic_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'anthropic_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "anthropic_inference_id": _quote(anthropic_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["anthropic_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_anthropic", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + def put_azureaistudio( + self, + *, + task_type: t.Union[str, t.Literal["completion", "text_embedding"]], + azureaistudio_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["azureaistudio"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create an Azure AI studio inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the azureaistudio service.

        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param azureaistudio_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `azureaistudio`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `openai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if azureaistudio_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'azureaistudio_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "azureaistudio_inference_id": _quote(azureaistudio_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["azureaistudio_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_azureaistudio", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + def put_azureopenai( + self, + *, + task_type: t.Union[str, t.Literal["completion", "text_embedding"]], + azureopenai_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["azureopenai"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create an Azure OpenAI inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the azureopenai service.

        +

        The list of chat completion models that you can choose from in your Azure OpenAI deployment include:

        + +

        The list of embeddings models that you can choose from in your deployment can be found in the Azure models documentation.

        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + NOTE: The `chat_completion` task type only supports streaming and only through + the _stream API. + :param azureopenai_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `azureopenai`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `azureopenai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if azureopenai_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'azureopenai_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "azureopenai_inference_id": _quote(azureopenai_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["azureopenai_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_azureopenai", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + def put_cohere( + self, + *, + task_type: t.Union[str, t.Literal["completion", "rerank", "text_embedding"]], + cohere_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["cohere"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create a Cohere inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the cohere service.

        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param cohere_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `cohere`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `cohere` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if cohere_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'cohere_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "cohere_inference_id": _quote(cohere_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["cohere_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_cohere", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings"), + ) + def put_eis( + self, + *, + task_type: t.Union[str, t.Literal["chat_completion"]], + eis_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["elastic"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create an Elastic Inference Service (EIS) inference endpoint.

        +

        Create an inference endpoint to perform an inference task through the Elastic Inference Service (EIS).

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + NOTE: The `chat_completion` task type only supports streaming and only through + the _stream API. + :param eis_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `elastic`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `elastic` service. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if eis_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'eis_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "eis_inference_id": _quote(eis_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["eis_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_eis", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + def put_elasticsearch( + self, + *, + task_type: t.Union[ + str, t.Literal["rerank", "sparse_embedding", "text_embedding"] + ], + elasticsearch_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["elasticsearch"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create an Elasticsearch inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the elasticsearch service.

        +
        +

        info + Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings.

        +
        +

        If you use the ELSER or the E5 model through the elasticsearch service, the API request will automatically download and deploy the model if it isn't downloaded yet.

        +
        +

        info + You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.

        +
        +

        After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param elasticsearch_inference_id: The unique identifier of the inference endpoint. + The must not match the `model_id`. + :param service: The type of service supported for the specified task type. In + this case, `elasticsearch`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `elasticsearch` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if elasticsearch_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'elasticsearch_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "elasticsearch_inference_id": _quote(elasticsearch_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["elasticsearch_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_elasticsearch", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings", "chunking_settings"), + ) + def put_elser( + self, + *, + task_type: t.Union[str, t.Literal["sparse_embedding"]], + elser_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["elser"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create an ELSER inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the elser service. + You can also deploy ELSER by using the Elasticsearch inference integration.

        +
        +

        info + Your Elasticsearch deployment contains a preconfigured ELSER inference endpoint, you only need to create the enpoint using the API if you want to customize the settings.

        +
        +

        The API request will automatically download and deploy the ELSER model if it isn't already downloaded.

        +
        +

        info + You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.

        +
        +

        After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param elser_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `elser`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `elser` service. + :param chunking_settings: The chunking configuration object. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if elser_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'elser_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "elser_inference_id": _quote(elser_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["elser_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_elser", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings", "chunking_settings"), + ) + def put_googleaistudio( + self, + *, + task_type: t.Union[str, t.Literal["completion", "text_embedding"]], + googleaistudio_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["googleaistudio"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create an Google AI Studio inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the googleaistudio service.

        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param googleaistudio_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `googleaistudio`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `googleaistudio` service. + :param chunking_settings: The chunking configuration object. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if googleaistudio_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'googleaistudio_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "googleaistudio_inference_id": _quote(googleaistudio_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["googleaistudio_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_googleaistudio", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + def put_googlevertexai( + self, + *, + task_type: t.Union[str, t.Literal["rerank", "text_embedding"]], + googlevertexai_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["googlevertexai"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create a Google Vertex AI inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the googlevertexai service.

        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param googlevertexai_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `googlevertexai`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `googlevertexai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if googlevertexai_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'googlevertexai_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "googlevertexai_inference_id": _quote(googlevertexai_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["googlevertexai_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_googlevertexai", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings", "chunking_settings"), + ) + def put_hugging_face( + self, + *, + task_type: t.Union[str, t.Literal["text_embedding"]], + huggingface_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["hugging_face"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create a Hugging Face inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the hugging_face service.

        +

        You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. + Select the model you want to use on the new endpoint creation page (for example intfloat/e5-small-v2), then select the sentence embeddings task under the advanced configuration section. + Create the endpoint and copy the URL after the endpoint initialization has been finished.

        +

        The following models are recommended for the Hugging Face service:

        +
          +
        • all-MiniLM-L6-v2
        • +
        • all-MiniLM-L12-v2
        • +
        • all-mpnet-base-v2
        • +
        • e5-base-v2
        • +
        • e5-small-v2
        • +
        • multilingual-e5-base
        • +
        • multilingual-e5-small
        • +
        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param huggingface_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `hugging_face`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `hugging_face` service. + :param chunking_settings: The chunking configuration object. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if huggingface_inference_id in SKIP_IN_PATH: + raise ValueError( + "Empty value passed for parameter 'huggingface_inference_id'" + ) + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "huggingface_inference_id": _quote(huggingface_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["huggingface_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_hugging_face", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + def put_jinaai( + self, + *, + task_type: t.Union[str, t.Literal["rerank", "text_embedding"]], + jinaai_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["jinaai"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create an JinaAI inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the jinaai service.

        +

        To review the available rerank models, refer to https://jina.ai/reranker. + To review the available text_embedding models, refer to the https://jina.ai/embeddings/.

        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param jinaai_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `jinaai`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `jinaai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if jinaai_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'jinaai_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "jinaai_inference_id": _quote(jinaai_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["jinaai_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_jinaai", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings", "chunking_settings"), + ) + def put_mistral( + self, + *, + task_type: t.Union[str, t.Literal["text_embedding"]], + mistral_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["mistral"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create a Mistral inference endpoint.

        +

        Creates an inference endpoint to perform an inference task with the mistral service.

        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The task type. The only valid task type for the model to perform + is `text_embedding`. + :param mistral_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `mistral`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `mistral` service. + :param chunking_settings: The chunking configuration object. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if mistral_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'mistral_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "mistral_inference_id": _quote(mistral_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["mistral_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_mistral", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + def put_openai( + self, + *, + task_type: t.Union[ + str, t.Literal["chat_completion", "completion", "text_embedding"] + ], + openai_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["openai"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create an OpenAI inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the openai service or openai compatible APIs.

        +

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. + After creating the endpoint, wait for the model deployment to complete before using it. + To verify the deployment status, use the get trained model statistics API. + Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". + Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + NOTE: The `chat_completion` task type only supports streaming and only through + the _stream API. + :param openai_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `openai`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `openai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if openai_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'openai_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "openai_inference_id": _quote(openai_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["openai_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_openai", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + def put_voyageai( + self, + *, + task_type: t.Union[str, t.Literal["rerank", "text_embedding"]], + voyageai_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["voyageai"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Create a VoyageAI inference endpoint.

        +

        Create an inference endpoint to perform an inference task with the voyageai service.

        +

        Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

        + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param voyageai_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `voyageai`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `voyageai` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if voyageai_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'voyageai_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "voyageai_inference_id": _quote(voyageai_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["voyageai_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_voyageai", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=("service", "service_settings"), ) @@ -341,7 +1918,7 @@ def put_watsonx( .. raw:: html

        Create a Watsonx inference endpoint.

        -

        Creates an inference endpoint to perform an inference task with the watsonxai service. +

        Create an inference endpoint to perform an inference task with the watsonxai service. You need an IBM Cloud Databases for Elasticsearch deployment to use the watsonxai inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.

        When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. diff --git a/elasticsearch/_sync/client/license.py b/elasticsearch/_sync/client/license.py index 842e47354..302ae7ea6 100644 --- a/elasticsearch/_sync/client/license.py +++ b/elasticsearch/_sync/client/license.py @@ -237,7 +237,7 @@ def post( If the operator privileges feature is enabled, only operator users can use this API.

        - ``_ + ``_ :param acknowledge: Specifies whether you acknowledge the license changes. :param license: diff --git a/elasticsearch/_sync/client/ml.py b/elasticsearch/_sync/client/ml.py index fbcb25309..46104a32e 100644 --- a/elasticsearch/_sync/client/ml.py +++ b/elasticsearch/_sync/client/ml.py @@ -3604,11 +3604,11 @@ def put_datafeed( :param ignore_unavailable: If true, unavailable indices (missing or closed) are ignored. :param indexes: An array of index names. Wildcards are supported. If any of the - indices are in remote clusters, the machine learning nodes must have the - `remote_cluster_client` role. + indices are in remote clusters, the master nodes and the machine learning + nodes must have the `remote_cluster_client` role. :param indices: An array of index names. Wildcards are supported. If any of the - indices are in remote clusters, the machine learning nodes must have the - `remote_cluster_client` role. + indices are in remote clusters, the master nodes and the machine learning + nodes must have the `remote_cluster_client` role. :param indices_options: Specifies index expansion options that are used during search :param job_id: Identifier for the anomaly detection job. @@ -5004,7 +5004,7 @@ def update_data_frame_analytics(

        Update a data frame analytics job.

        - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -5577,7 +5577,7 @@ def update_trained_model_deployment(

        Update a trained model deployment.

        - ``_ + ``_ :param model_id: The unique identifier of the trained model. Currently, only PyTorch models are supported. diff --git a/elasticsearch/_sync/client/security.py b/elasticsearch/_sync/client/security.py index c8bb4cf4e..c0c7840ec 100644 --- a/elasticsearch/_sync/client/security.py +++ b/elasticsearch/_sync/client/security.py @@ -2867,12 +2867,12 @@ def oidc_authenticate( ) @_rewrite_parameters( - body_fields=("access_token", "refresh_token"), + body_fields=("token", "refresh_token"), ) def oidc_logout( self, *, - access_token: t.Optional[str] = None, + token: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, @@ -2892,11 +2892,11 @@ def oidc_logout( ``_ - :param access_token: The access token to be invalidated. + :param token: The access token to be invalidated. :param refresh_token: The refresh token to be invalidated. """ - if access_token is None and body is None: - raise ValueError("Empty value passed for parameter 'access_token'") + if token is None and body is None: + raise ValueError("Empty value passed for parameter 'token'") __path_parts: t.Dict[str, str] = {} __path = "/_security/oidc/logout" __query: t.Dict[str, t.Any] = {} @@ -2910,8 +2910,8 @@ def oidc_logout( if pretty is not None: __query["pretty"] = pretty if not __body: - if access_token is not None: - __body["access_token"] = access_token + if token is not None: + __body["token"] = token if refresh_token is not None: __body["refresh_token"] = refresh_token __headers = {"accept": "application/json", "content-type": "application/json"} diff --git a/elasticsearch/_sync/client/transform.py b/elasticsearch/_sync/client/transform.py index 1df9564d1..184662ab5 100644 --- a/elasticsearch/_sync/client/transform.py +++ b/elasticsearch/_sync/client/transform.py @@ -795,7 +795,7 @@ def update_transform( time of update and runs with those privileges.

        - ``_ + ``_ :param transform_id: Identifier for the transform. :param defer_validation: When true, deferrable validations are not run. This @@ -890,7 +890,7 @@ def upgrade_transforms( You may want to perform a recent cluster backup prior to the upgrade.

        - ``_ + ``_ :param dry_run: When true, the request checks for updates but does not run them. :param timeout: Period to wait for a response. If no response is received before diff --git a/elasticsearch/_sync/client/watcher.py b/elasticsearch/_sync/client/watcher.py index 065accc2f..84e3b7f82 100644 --- a/elasticsearch/_sync/client/watcher.py +++ b/elasticsearch/_sync/client/watcher.py @@ -48,7 +48,7 @@ def ack_watch( This happens when the condition of the watch is not met (the condition evaluates to false).

        - ``_ + ``_ :param watch_id: The watch identifier. :param action_id: A comma-separated list of the action identifiers to acknowledge. @@ -104,7 +104,7 @@ def activate_watch( A watch can be either active or inactive.

        - ``_ + ``_ :param watch_id: The watch identifier. """ @@ -148,7 +148,7 @@ def deactivate_watch( A watch can be either active or inactive.

        - ``_ + ``_ :param watch_id: The watch identifier. """ @@ -196,7 +196,7 @@ def delete_watch( When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the .watches index.

        - ``_ + ``_ :param id: The watch identifier. """ @@ -277,7 +277,7 @@ def execute_watch(

        When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch.

        - ``_ + ``_ :param id: The watch identifier. :param action_modes: Determines how to handle the watch actions as part of the @@ -365,7 +365,7 @@ def get_settings( Only a subset of settings are shown, for example index.auto_expand_replicas and index.number_of_replicas.

        - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails @@ -410,7 +410,7 @@ def get_watch(

        Get a watch.

        - ``_ + ``_ :param id: The watch identifier. """ @@ -485,7 +485,7 @@ def put_watch( If the user is able to read index a, but not index b, the same will apply when the watch runs.

        - ``_ + ``_ :param id: The identifier for the watch. :param actions: The list of actions that will be run if the condition matches. @@ -598,7 +598,7 @@ def query_watches(

        Note that only the _id and metadata.* fields are queryable or sortable.

        - ``_ + ``_ :param from_: The offset from the first result to fetch. It must be non-negative. :param query: A query that filters the watches to be returned. @@ -673,7 +673,7 @@ def start( Start the Watcher service if it is not already running.

        - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. """ @@ -739,7 +739,7 @@ def stats( You retrieve more metrics by using the metric parameter.

        - ``_ + ``_ :param metric: Defines which additional metrics are included in the response. :param emit_stacktraces: Defines whether stack traces are generated for each @@ -790,7 +790,7 @@ def stop( Stop the Watcher service if it is running.

        - ``_ + ``_ :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns @@ -848,7 +848,7 @@ def update_settings( This includes index.auto_expand_replicas and index.number_of_replicas.

        - ``_ + ``_ :param index_auto_expand_replicas: :param index_number_of_replicas: diff --git a/elasticsearch/_sync/client/xpack.py b/elasticsearch/_sync/client/xpack.py index d52c8dd69..182715cf7 100644 --- a/elasticsearch/_sync/client/xpack.py +++ b/elasticsearch/_sync/client/xpack.py @@ -103,7 +103,7 @@ def usage( The API also provides some usage statistics.

        - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails diff --git a/elasticsearch/dsl/field.py b/elasticsearch/dsl/field.py index 50f30b405..eb61be48a 100644 --- a/elasticsearch/dsl/field.py +++ b/elasticsearch/dsl/field.py @@ -762,6 +762,11 @@ class Boolean(Field): :arg fielddata: :arg index: :arg null_value: + :arg ignore_malformed: + :arg script: + :arg on_script_error: + :arg time_series_dimension: For internal use by Elastic only. Marks + the field as a time series dimension. Defaults to false. :arg doc_values: :arg copy_to: :arg store: @@ -789,6 +794,10 @@ def __init__( ] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, null_value: Union[bool, "DefaultType"] = DEFAULT, + ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, + time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], @@ -816,6 +825,14 @@ def __init__( kwargs["index"] = index if null_value is not DEFAULT: kwargs["null_value"] = null_value + if ignore_malformed is not DEFAULT: + kwargs["ignore_malformed"] = ignore_malformed + if script is not DEFAULT: + kwargs["script"] = script + if on_script_error is not DEFAULT: + kwargs["on_script_error"] = on_script_error + if time_series_dimension is not DEFAULT: + kwargs["time_series_dimension"] = time_series_dimension if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: @@ -1390,11 +1407,29 @@ def __init__( class DenseVector(Field): """ - :arg element_type: - :arg dims: - :arg similarity: - :arg index: - :arg index_options: + :arg dims: Number of vector dimensions. Can't exceed `4096`. If `dims` + is not specified, it will be set to the length of the first vector + added to the field. + :arg element_type: The data type used to encode vectors. The supported + data types are `float` (default), `byte`, and `bit`. Defaults to + `float` if omitted. + :arg index: If `true`, you can search this field using the kNN search + API. Defaults to `True` if omitted. + :arg index_options: An optional section that configures the kNN + indexing algorithm. The HNSW algorithm has two internal parameters + that influence how the data structure is built. These can be + adjusted to improve the accuracy of results, at the expense of + slower indexing speed. This parameter can only be specified when + `index` is `true`. + :arg similarity: The vector similarity metric to use in kNN search. + Documents are ranked by their vector field's similarity to the + query vector. The `_score` of each document will be derived from + the similarity, in a way that ensures scores are positive and that + a larger score corresponds to a higher ranking. Defaults to + `l2_norm` when `element_type` is `bit` otherwise defaults to + `cosine`. `bit` vectors only support `l2_norm` as their + similarity metric. This parameter can only be specified when + `index` is `true`. :arg meta: Metadata about the field. :arg properties: :arg ignore_above: @@ -1413,13 +1448,16 @@ class DenseVector(Field): def __init__( self, *args: Any, - element_type: Union[str, "DefaultType"] = DEFAULT, dims: Union[int, "DefaultType"] = DEFAULT, - similarity: Union[str, "DefaultType"] = DEFAULT, + element_type: Union[Literal["bit", "byte", "float"], "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, index_options: Union[ "types.DenseVectorIndexOptions", Dict[str, Any], "DefaultType" ] = DEFAULT, + similarity: Union[ + Literal["cosine", "dot_product", "l2_norm", "max_inner_product"], + "DefaultType", + ] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, @@ -1432,16 +1470,16 @@ def __init__( ] = DEFAULT, **kwargs: Any, ): - if element_type is not DEFAULT: - kwargs["element_type"] = element_type if dims is not DEFAULT: kwargs["dims"] = dims - if similarity is not DEFAULT: - kwargs["similarity"] = similarity + if element_type is not DEFAULT: + kwargs["element_type"] = element_type if index is not DEFAULT: kwargs["index"] = index if index_options is not DEFAULT: kwargs["index_options"] = index_options + if similarity is not DEFAULT: + kwargs["similarity"] = similarity if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: @@ -1905,6 +1943,7 @@ class GeoShape(Field): :arg coerce: :arg ignore_malformed: :arg ignore_z_value: + :arg index: :arg orientation: :arg strategy: :arg doc_values: @@ -1930,6 +1969,7 @@ def __init__( coerce: Union[bool, "DefaultType"] = DEFAULT, ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, ignore_z_value: Union[bool, "DefaultType"] = DEFAULT, + index: Union[bool, "DefaultType"] = DEFAULT, orientation: Union[Literal["right", "left"], "DefaultType"] = DEFAULT, strategy: Union[Literal["recursive", "term"], "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, @@ -1957,6 +1997,8 @@ def __init__( kwargs["ignore_malformed"] = ignore_malformed if ignore_z_value is not DEFAULT: kwargs["ignore_z_value"] = ignore_z_value + if index is not DEFAULT: + kwargs["index"] = index if orientation is not DEFAULT: kwargs["orientation"] = orientation if strategy is not DEFAULT: @@ -3497,8 +3539,18 @@ def __init__( class SemanticText(Field): """ - :arg inference_id: (required) :arg meta: + :arg inference_id: Inference endpoint that will be used to generate + embeddings for the field. This parameter cannot be updated. Use + the Create inference API to create the endpoint. If + `search_inference_id` is specified, the inference endpoint will + only be used at index time. Defaults to `.elser-2-elasticsearch` + if omitted. + :arg search_inference_id: Inference endpoint that will be used to + generate embeddings at query time. You can update this parameter + by using the Update mapping API. Use the Create inference API to + create the endpoint. If not specified, the inference endpoint + defined by inference_id will be used at both index and query time. """ name = "semantic_text" @@ -3506,14 +3558,17 @@ class SemanticText(Field): def __init__( self, *args: Any, - inference_id: Union[str, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + inference_id: Union[str, "DefaultType"] = DEFAULT, + search_inference_id: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): - if inference_id is not DEFAULT: - kwargs["inference_id"] = inference_id if meta is not DEFAULT: kwargs["meta"] = meta + if inference_id is not DEFAULT: + kwargs["inference_id"] = inference_id + if search_inference_id is not DEFAULT: + kwargs["search_inference_id"] = search_inference_id super().__init__(*args, **kwargs) diff --git a/elasticsearch/dsl/types.py b/elasticsearch/dsl/types.py index 4ea6d8361..7474769c6 100644 --- a/elasticsearch/dsl/types.py +++ b/elasticsearch/dsl/types.py @@ -364,34 +364,57 @@ def __init__( class DenseVectorIndexOptions(AttrDict[Any]): """ - :arg type: (required) - :arg m: - :arg ef_construction: - :arg confidence_interval: - """ - - type: Union[str, DefaultType] - m: Union[int, DefaultType] - ef_construction: Union[int, DefaultType] + :arg type: (required) The type of kNN algorithm to use. + :arg confidence_interval: The confidence interval to use when + quantizing the vectors. Can be any value between and including + `0.90` and `1.0` or exactly `0`. When the value is `0`, this + indicates that dynamic quantiles should be calculated for + optimized quantization. When between `0.90` and `1.0`, this value + restricts the values used when calculating the quantization + thresholds. For example, a value of `0.95` will only use the + middle `95%` of the values when calculating the quantization + thresholds (e.g. the highest and lowest `2.5%` of values will be + ignored). Defaults to `1/(dims + 1)` for `int8` quantized vectors + and `0` for `int4` for dynamic quantile calculation. Only + applicable to `int8_hnsw`, `int4_hnsw`, `int8_flat`, and + `int4_flat` index types. + :arg ef_construction: The number of candidates to track while + assembling the list of nearest neighbors for each new node. Only + applicable to `hnsw`, `int8_hnsw`, and `int4_hnsw` index types. + Defaults to `100` if omitted. + :arg m: The number of neighbors each node will be connected to in the + HNSW graph. Only applicable to `hnsw`, `int8_hnsw`, and + `int4_hnsw` index types. Defaults to `16` if omitted. + """ + + type: Union[ + Literal["flat", "hnsw", "int4_flat", "int4_hnsw", "int8_flat", "int8_hnsw"], + DefaultType, + ] confidence_interval: Union[float, DefaultType] + ef_construction: Union[int, DefaultType] + m: Union[int, DefaultType] def __init__( self, *, - type: Union[str, DefaultType] = DEFAULT, - m: Union[int, DefaultType] = DEFAULT, - ef_construction: Union[int, DefaultType] = DEFAULT, + type: Union[ + Literal["flat", "hnsw", "int4_flat", "int4_hnsw", "int8_flat", "int8_hnsw"], + DefaultType, + ] = DEFAULT, confidence_interval: Union[float, DefaultType] = DEFAULT, + ef_construction: Union[int, DefaultType] = DEFAULT, + m: Union[int, DefaultType] = DEFAULT, **kwargs: Any, ): if type is not DEFAULT: kwargs["type"] = type - if m is not DEFAULT: - kwargs["m"] = m - if ef_construction is not DEFAULT: - kwargs["ef_construction"] = ef_construction if confidence_interval is not DEFAULT: kwargs["confidence_interval"] = confidence_interval + if ef_construction is not DEFAULT: + kwargs["ef_construction"] = ef_construction + if m is not DEFAULT: + kwargs["m"] = m super().__init__(kwargs) From c1003ffe4bd269daa5bf9b35dd91b9147339db37 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 2 Apr 2025 20:23:51 +0400 Subject: [PATCH 43/65] Add back inference.inference API (#2873) (#2879) * Add back inference.inference API This function was removed from the specification in favor of one API per provider and task type, but the existing function was stable and widely used in Python. Still, we mark it as deprecated to encourage users to migrate to the new APIs. * Fix lint (cherry picked from commit eac539dcc301b6062716f1540dc0d1efe888671c) Co-authored-by: Quentin Pradet --- elasticsearch/_async/client/inference.py | 119 ++++++++++++++++++++++- elasticsearch/_sync/client/inference.py | 119 ++++++++++++++++++++++- elasticsearch/_sync/client/utils.py | 7 ++ 3 files changed, 243 insertions(+), 2 deletions(-) diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index ca1217207..c71151e40 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -20,7 +20,13 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) class InferenceClient(NamespacedClient): @@ -234,6 +240,117 @@ async def get( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("input", "query", "task_settings"), + ) + @_stability_warning( + Stability.DEPRECATED, + version="8.18.0", + message="inference.inference() is deprecated in favor of provider-specific APIs such as inference.put_elasticsearch() or inference.put_hugging_face()", + ) + async def inference( + self, + *, + inference_id: str, + input: t.Optional[t.Union[str, t.Sequence[str]]] = None, + task_type: t.Optional[ + t.Union[ + str, + t.Literal[ + "chat_completion", + "completion", + "rerank", + "sparse_embedding", + "text_embedding", + ], + ] + ] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + query: t.Optional[str] = None, + task_settings: t.Optional[t.Any] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Perform inference on the service.

        +

        This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. + It returns a response with the results of the tasks. + The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.

        +
        +

        info + The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

        +
        + + + ``_ + + :param inference_id: The unique identifier for the inference endpoint. + :param input: The text on which you want to perform the inference task. It can + be a single string or an array. > info > Inference endpoints for the `completion` + task type currently only support a single string as input. + :param task_type: The type of inference task that the model performs. + :param query: The query input, which is required only for the `rerank` task. + It is not required for other tasks. + :param task_settings: Task settings for the individual inference request. These + settings are specific to the task type you specified and override the task + settings specified when initializing the service. + :param timeout: The amount of time to wait for the inference request to complete. + """ + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") + if input is None and body is None: + raise ValueError("Empty value passed for parameter 'input'") + __path_parts: t.Dict[str, str] + if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: + __path_parts = { + "task_type": _quote(task_type), + "inference_id": _quote(inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' + elif inference_id not in SKIP_IN_PATH: + __path_parts = {"inference_id": _quote(inference_id)} + __path = f'/_inference/{__path_parts["inference_id"]}' + else: + raise ValueError("Couldn't find a path for the given parameters") + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + if not __body: + if input is not None: + __body["input"] = input + if query is not None: + __body["query"] = query + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.inference", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_name="chat_completion_request", ) diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index 15c996cd7..1a5611b16 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -20,7 +20,13 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters +from .utils import ( + SKIP_IN_PATH, + Stability, + _quote, + _rewrite_parameters, + _stability_warning, +) class InferenceClient(NamespacedClient): @@ -234,6 +240,117 @@ def get( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("input", "query", "task_settings"), + ) + @_stability_warning( + Stability.DEPRECATED, + version="8.18.0", + message="inference.inference() is deprecated in favor of provider-specific APIs such as inference.put_elasticsearch() or inference.put_hugging_face()", + ) + def inference( + self, + *, + inference_id: str, + input: t.Optional[t.Union[str, t.Sequence[str]]] = None, + task_type: t.Optional[ + t.Union[ + str, + t.Literal[ + "chat_completion", + "completion", + "rerank", + "sparse_embedding", + "text_embedding", + ], + ] + ] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + query: t.Optional[str] = None, + task_settings: t.Optional[t.Any] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Perform inference on the service.

        +

        This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. + It returns a response with the results of the tasks. + The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.

        +
        +

        info + The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

        +
        + + + ``_ + + :param inference_id: The unique identifier for the inference endpoint. + :param input: The text on which you want to perform the inference task. It can + be a single string or an array. > info > Inference endpoints for the `completion` + task type currently only support a single string as input. + :param task_type: The type of inference task that the model performs. + :param query: The query input, which is required only for the `rerank` task. + It is not required for other tasks. + :param task_settings: Task settings for the individual inference request. These + settings are specific to the task type you specified and override the task + settings specified when initializing the service. + :param timeout: The amount of time to wait for the inference request to complete. + """ + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") + if input is None and body is None: + raise ValueError("Empty value passed for parameter 'input'") + __path_parts: t.Dict[str, str] + if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: + __path_parts = { + "task_type": _quote(task_type), + "inference_id": _quote(inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' + elif inference_id not in SKIP_IN_PATH: + __path_parts = {"inference_id": _quote(inference_id)} + __path = f'/_inference/{__path_parts["inference_id"]}' + else: + raise ValueError("Couldn't find a path for the given parameters") + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + if not __body: + if input is not None: + __body["input"] = input + if query is not None: + __body["query"] = query + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.inference", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_name="chat_completion_request", ) diff --git a/elasticsearch/_sync/client/utils.py b/elasticsearch/_sync/client/utils.py index 51afe1c78..2d7a93d65 100644 --- a/elasticsearch/_sync/client/utils.py +++ b/elasticsearch/_sync/client/utils.py @@ -77,6 +77,7 @@ class Stability(Enum): STABLE = auto() BETA = auto() EXPERIMENTAL = auto() + DEPRECATED = auto() _TYPE_HOSTS = Union[ @@ -479,6 +480,12 @@ def wrapped(*args: Any, **kwargs: Any) -> Any: category=GeneralAvailabilityWarning, stacklevel=warn_stacklevel(), ) + elif stability == Stability.DEPRECATED and message and version: + warnings.warn( + f"In elasticsearch-py {version}, {message}.", + category=DeprecationWarning, + stacklevel=warn_stacklevel(), + ) return api(*args, **kwargs) From 2c1fd63bfda0502bcc298de3da2eba1de9268f58 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Mon, 7 Apr 2025 15:19:06 +0200 Subject: [PATCH 44/65] Auto-generated API code (#2890) --- elasticsearch/_async/client/__init__.py | 3 ++- elasticsearch/_async/client/async_search.py | 2 +- elasticsearch/_async/client/fleet.py | 2 +- elasticsearch/_async/client/watcher.py | 5 ++++- elasticsearch/_sync/client/__init__.py | 3 ++- elasticsearch/_sync/client/async_search.py | 2 +- elasticsearch/_sync/client/fleet.py | 2 +- elasticsearch/_sync/client/watcher.py | 5 ++++- 8 files changed, 16 insertions(+), 8 deletions(-) diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index 9a3f8509c..cf34c6284 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -4839,7 +4839,8 @@ async def search( :param min_compatible_shard_node: The minimum version of the node that can handle the request Any handling node with a lower version will fail the request. :param min_score: The minimum `_score` for matching documents. Documents with - a lower `_score` are not included in the search results. + a lower `_score` are not included in search results and results collected + by aggregations. :param pit: Limit the search to a point in time (PIT). If you provide a PIT, you cannot specify an `` in the request path. :param post_filter: Use the `post_filter` parameter to filter search results. diff --git a/elasticsearch/_async/client/async_search.py b/elasticsearch/_async/client/async_search.py index 1ca196f13..b480e199b 100644 --- a/elasticsearch/_async/client/async_search.py +++ b/elasticsearch/_async/client/async_search.py @@ -403,7 +403,7 @@ async def submit( of concurrent shard requests :param min_compatible_shard_node: :param min_score: Minimum _score for matching documents. Documents with a lower - _score are not included in the search results. + _score are not included in search results and results collected by aggregations. :param pit: Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. :param post_filter: diff --git a/elasticsearch/_async/client/fleet.py b/elasticsearch/_async/client/fleet.py index 2cc830efe..ff8449659 100644 --- a/elasticsearch/_async/client/fleet.py +++ b/elasticsearch/_async/client/fleet.py @@ -430,7 +430,7 @@ async def search( :param max_concurrent_shard_requests: :param min_compatible_shard_node: :param min_score: Minimum _score for matching documents. Documents with a lower - _score are not included in the search results. + _score are not included in search results and results collected by aggregations. :param pit: Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. :param post_filter: diff --git a/elasticsearch/_async/client/watcher.py b/elasticsearch/_async/client/watcher.py index be2f8265e..d16bc440b 100644 --- a/elasticsearch/_async/client/watcher.py +++ b/elasticsearch/_async/client/watcher.py @@ -845,7 +845,10 @@ async def update_settings(

        Update Watcher index settings. Update settings for the Watcher internal index (.watches). Only a subset of settings can be modified. - This includes index.auto_expand_replicas and index.number_of_replicas.

        + This includes index.auto_expand_replicas, index.number_of_replicas, index.routing.allocation.exclude.*, + index.routing.allocation.include.* and index.routing.allocation.require.*. + Modification of index.routing.allocation.include._tier_preference is an exception and is not allowed as the + Watcher shards must always be in the data_content tier.

        ``_ diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index b8d4b3297..bce9d43fb 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -4837,7 +4837,8 @@ def search( :param min_compatible_shard_node: The minimum version of the node that can handle the request Any handling node with a lower version will fail the request. :param min_score: The minimum `_score` for matching documents. Documents with - a lower `_score` are not included in the search results. + a lower `_score` are not included in search results and results collected + by aggregations. :param pit: Limit the search to a point in time (PIT). If you provide a PIT, you cannot specify an `` in the request path. :param post_filter: Use the `post_filter` parameter to filter search results. diff --git a/elasticsearch/_sync/client/async_search.py b/elasticsearch/_sync/client/async_search.py index 1a004a6f1..3759ab575 100644 --- a/elasticsearch/_sync/client/async_search.py +++ b/elasticsearch/_sync/client/async_search.py @@ -403,7 +403,7 @@ def submit( of concurrent shard requests :param min_compatible_shard_node: :param min_score: Minimum _score for matching documents. Documents with a lower - _score are not included in the search results. + _score are not included in search results and results collected by aggregations. :param pit: Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. :param post_filter: diff --git a/elasticsearch/_sync/client/fleet.py b/elasticsearch/_sync/client/fleet.py index 837e7b195..ba0285e46 100644 --- a/elasticsearch/_sync/client/fleet.py +++ b/elasticsearch/_sync/client/fleet.py @@ -430,7 +430,7 @@ def search( :param max_concurrent_shard_requests: :param min_compatible_shard_node: :param min_score: Minimum _score for matching documents. Documents with a lower - _score are not included in the search results. + _score are not included in search results and results collected by aggregations. :param pit: Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. :param post_filter: diff --git a/elasticsearch/_sync/client/watcher.py b/elasticsearch/_sync/client/watcher.py index 84e3b7f82..7efadfbc1 100644 --- a/elasticsearch/_sync/client/watcher.py +++ b/elasticsearch/_sync/client/watcher.py @@ -845,7 +845,10 @@ def update_settings(

        Update Watcher index settings. Update settings for the Watcher internal index (.watches). Only a subset of settings can be modified. - This includes index.auto_expand_replicas and index.number_of_replicas.

        + This includes index.auto_expand_replicas, index.number_of_replicas, index.routing.allocation.exclude.*, + index.routing.allocation.include.* and index.routing.allocation.require.*. + Modification of index.routing.allocation.include._tier_preference is an exception and is not allowed as the + Watcher shards must always be in the data_content tier.

        ``_ From 131a1bf2a022dd621e05c61afdde4db40b992456 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Thu, 10 Apr 2025 13:32:54 +0200 Subject: [PATCH 45/65] Auto-generated API code (#2895) --- elasticsearch/_async/client/inference.py | 138 ----------------------- elasticsearch/_sync/client/inference.py | 138 ----------------------- 2 files changed, 276 deletions(-) diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index c71151e40..c2ca3ef33 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -351,67 +351,6 @@ async def inference( path_parts=__path_parts, ) - @_rewrite_parameters( - body_name="chat_completion_request", - ) - async def post_eis_chat_completion( - self, - *, - eis_inference_id: str, - chat_completion_request: t.Optional[t.Mapping[str, t.Any]] = None, - body: t.Optional[t.Mapping[str, t.Any]] = None, - error_trace: t.Optional[bool] = None, - filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, - human: t.Optional[bool] = None, - pretty: t.Optional[bool] = None, - ) -> ObjectApiResponse[t.Any]: - """ - .. raw:: html - -

        Perform a chat completion task through the Elastic Inference Service (EIS).

        -

        Perform a chat completion inference task with the elastic service.

        - - - ``_ - - :param eis_inference_id: The unique identifier of the inference endpoint. - :param chat_completion_request: - """ - if eis_inference_id in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'eis_inference_id'") - if chat_completion_request is None and body is None: - raise ValueError( - "Empty value passed for parameters 'chat_completion_request' and 'body', one of them should be set." - ) - elif chat_completion_request is not None and body is not None: - raise ValueError("Cannot set both 'chat_completion_request' and 'body'") - __path_parts: t.Dict[str, str] = {"eis_inference_id": _quote(eis_inference_id)} - __path = ( - f'/_inference/chat_completion/{__path_parts["eis_inference_id"]}/_stream' - ) - __query: t.Dict[str, t.Any] = {} - if error_trace is not None: - __query["error_trace"] = error_trace - if filter_path is not None: - __query["filter_path"] = filter_path - if human is not None: - __query["human"] = human - if pretty is not None: - __query["pretty"] = pretty - __body = ( - chat_completion_request if chat_completion_request is not None else body - ) - __headers = {"accept": "application/json", "content-type": "application/json"} - return await self.perform_request( # type: ignore[return-value] - "POST", - __path, - params=__query, - headers=__headers, - body=__body, - endpoint_id="inference.post_eis_chat_completion", - path_parts=__path_parts, - ) - @_rewrite_parameters( body_name="inference_config", ) @@ -1088,83 +1027,6 @@ async def put_cohere( path_parts=__path_parts, ) - @_rewrite_parameters( - body_fields=("service", "service_settings"), - ) - async def put_eis( - self, - *, - task_type: t.Union[str, t.Literal["chat_completion"]], - eis_inference_id: str, - service: t.Optional[t.Union[str, t.Literal["elastic"]]] = None, - service_settings: t.Optional[t.Mapping[str, t.Any]] = None, - error_trace: t.Optional[bool] = None, - filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, - human: t.Optional[bool] = None, - pretty: t.Optional[bool] = None, - body: t.Optional[t.Dict[str, t.Any]] = None, - ) -> ObjectApiResponse[t.Any]: - """ - .. raw:: html - -

        Create an Elastic Inference Service (EIS) inference endpoint.

        -

        Create an inference endpoint to perform an inference task through the Elastic Inference Service (EIS).

        - - - ``_ - - :param task_type: The type of the inference task that the model will perform. - NOTE: The `chat_completion` task type only supports streaming and only through - the _stream API. - :param eis_inference_id: The unique identifier of the inference endpoint. - :param service: The type of service supported for the specified task type. In - this case, `elastic`. - :param service_settings: Settings used to install the inference model. These - settings are specific to the `elastic` service. - """ - if task_type in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'task_type'") - if eis_inference_id in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'eis_inference_id'") - if service is None and body is None: - raise ValueError("Empty value passed for parameter 'service'") - if service_settings is None and body is None: - raise ValueError("Empty value passed for parameter 'service_settings'") - __path_parts: t.Dict[str, str] = { - "task_type": _quote(task_type), - "eis_inference_id": _quote(eis_inference_id), - } - __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["eis_inference_id"]}' - __query: t.Dict[str, t.Any] = {} - __body: t.Dict[str, t.Any] = body if body is not None else {} - if error_trace is not None: - __query["error_trace"] = error_trace - if filter_path is not None: - __query["filter_path"] = filter_path - if human is not None: - __query["human"] = human - if pretty is not None: - __query["pretty"] = pretty - if not __body: - if service is not None: - __body["service"] = service - if service_settings is not None: - __body["service_settings"] = service_settings - if not __body: - __body = None # type: ignore[assignment] - __headers = {"accept": "application/json"} - if __body is not None: - __headers["content-type"] = "application/json" - return await self.perform_request( # type: ignore[return-value] - "PUT", - __path, - params=__query, - headers=__headers, - body=__body, - endpoint_id="inference.put_eis", - path_parts=__path_parts, - ) - @_rewrite_parameters( body_fields=( "service", diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index 1a5611b16..5bfe426da 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -351,67 +351,6 @@ def inference( path_parts=__path_parts, ) - @_rewrite_parameters( - body_name="chat_completion_request", - ) - def post_eis_chat_completion( - self, - *, - eis_inference_id: str, - chat_completion_request: t.Optional[t.Mapping[str, t.Any]] = None, - body: t.Optional[t.Mapping[str, t.Any]] = None, - error_trace: t.Optional[bool] = None, - filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, - human: t.Optional[bool] = None, - pretty: t.Optional[bool] = None, - ) -> ObjectApiResponse[t.Any]: - """ - .. raw:: html - -

        Perform a chat completion task through the Elastic Inference Service (EIS).

        -

        Perform a chat completion inference task with the elastic service.

        - - - ``_ - - :param eis_inference_id: The unique identifier of the inference endpoint. - :param chat_completion_request: - """ - if eis_inference_id in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'eis_inference_id'") - if chat_completion_request is None and body is None: - raise ValueError( - "Empty value passed for parameters 'chat_completion_request' and 'body', one of them should be set." - ) - elif chat_completion_request is not None and body is not None: - raise ValueError("Cannot set both 'chat_completion_request' and 'body'") - __path_parts: t.Dict[str, str] = {"eis_inference_id": _quote(eis_inference_id)} - __path = ( - f'/_inference/chat_completion/{__path_parts["eis_inference_id"]}/_stream' - ) - __query: t.Dict[str, t.Any] = {} - if error_trace is not None: - __query["error_trace"] = error_trace - if filter_path is not None: - __query["filter_path"] = filter_path - if human is not None: - __query["human"] = human - if pretty is not None: - __query["pretty"] = pretty - __body = ( - chat_completion_request if chat_completion_request is not None else body - ) - __headers = {"accept": "application/json", "content-type": "application/json"} - return self.perform_request( # type: ignore[return-value] - "POST", - __path, - params=__query, - headers=__headers, - body=__body, - endpoint_id="inference.post_eis_chat_completion", - path_parts=__path_parts, - ) - @_rewrite_parameters( body_name="inference_config", ) @@ -1088,83 +1027,6 @@ def put_cohere( path_parts=__path_parts, ) - @_rewrite_parameters( - body_fields=("service", "service_settings"), - ) - def put_eis( - self, - *, - task_type: t.Union[str, t.Literal["chat_completion"]], - eis_inference_id: str, - service: t.Optional[t.Union[str, t.Literal["elastic"]]] = None, - service_settings: t.Optional[t.Mapping[str, t.Any]] = None, - error_trace: t.Optional[bool] = None, - filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, - human: t.Optional[bool] = None, - pretty: t.Optional[bool] = None, - body: t.Optional[t.Dict[str, t.Any]] = None, - ) -> ObjectApiResponse[t.Any]: - """ - .. raw:: html - -

        Create an Elastic Inference Service (EIS) inference endpoint.

        -

        Create an inference endpoint to perform an inference task through the Elastic Inference Service (EIS).

        - - - ``_ - - :param task_type: The type of the inference task that the model will perform. - NOTE: The `chat_completion` task type only supports streaming and only through - the _stream API. - :param eis_inference_id: The unique identifier of the inference endpoint. - :param service: The type of service supported for the specified task type. In - this case, `elastic`. - :param service_settings: Settings used to install the inference model. These - settings are specific to the `elastic` service. - """ - if task_type in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'task_type'") - if eis_inference_id in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'eis_inference_id'") - if service is None and body is None: - raise ValueError("Empty value passed for parameter 'service'") - if service_settings is None and body is None: - raise ValueError("Empty value passed for parameter 'service_settings'") - __path_parts: t.Dict[str, str] = { - "task_type": _quote(task_type), - "eis_inference_id": _quote(eis_inference_id), - } - __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["eis_inference_id"]}' - __query: t.Dict[str, t.Any] = {} - __body: t.Dict[str, t.Any] = body if body is not None else {} - if error_trace is not None: - __query["error_trace"] = error_trace - if filter_path is not None: - __query["filter_path"] = filter_path - if human is not None: - __query["human"] = human - if pretty is not None: - __query["pretty"] = pretty - if not __body: - if service is not None: - __body["service"] = service - if service_settings is not None: - __body["service_settings"] = service_settings - if not __body: - __body = None # type: ignore[assignment] - __headers = {"accept": "application/json"} - if __body is not None: - __headers["content-type"] = "application/json" - return self.perform_request( # type: ignore[return-value] - "PUT", - __path, - params=__query, - headers=__headers, - body=__body, - endpoint_id="inference.put_eis", - path_parts=__path_parts, - ) - @_rewrite_parameters( body_fields=( "service", From 8ecd73f4d70a3f062dfce355604229ec78c96864 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 11 Apr 2025 16:38:35 +0400 Subject: [PATCH 46/65] Revert "Add back inference.inference API (#2873)" (#2899) (#2901) This reverts commit eac539dcc301b6062716f1540dc0d1efe888671c. This API will be added back through the specification and isn't deprecated anymore. (cherry picked from commit 0f845eaa78427c37231fdd43e0cb549c2acc72f1) Co-authored-by: Quentin Pradet --- elasticsearch/_async/client/inference.py | 119 +---------------------- elasticsearch/_sync/client/inference.py | 119 +---------------------- elasticsearch/_sync/client/utils.py | 7 -- 3 files changed, 2 insertions(+), 243 deletions(-) diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index c2ca3ef33..b86740319 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -20,13 +20,7 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import ( - SKIP_IN_PATH, - Stability, - _quote, - _rewrite_parameters, - _stability_warning, -) +from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class InferenceClient(NamespacedClient): @@ -240,117 +234,6 @@ async def get( path_parts=__path_parts, ) - @_rewrite_parameters( - body_fields=("input", "query", "task_settings"), - ) - @_stability_warning( - Stability.DEPRECATED, - version="8.18.0", - message="inference.inference() is deprecated in favor of provider-specific APIs such as inference.put_elasticsearch() or inference.put_hugging_face()", - ) - async def inference( - self, - *, - inference_id: str, - input: t.Optional[t.Union[str, t.Sequence[str]]] = None, - task_type: t.Optional[ - t.Union[ - str, - t.Literal[ - "chat_completion", - "completion", - "rerank", - "sparse_embedding", - "text_embedding", - ], - ] - ] = None, - error_trace: t.Optional[bool] = None, - filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, - human: t.Optional[bool] = None, - pretty: t.Optional[bool] = None, - query: t.Optional[str] = None, - task_settings: t.Optional[t.Any] = None, - timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - body: t.Optional[t.Dict[str, t.Any]] = None, - ) -> ObjectApiResponse[t.Any]: - """ - .. raw:: html - -

        Perform inference on the service.

        -

        This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. - It returns a response with the results of the tasks. - The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.

        -
        -

        info - The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

        -
        - - - ``_ - - :param inference_id: The unique identifier for the inference endpoint. - :param input: The text on which you want to perform the inference task. It can - be a single string or an array. > info > Inference endpoints for the `completion` - task type currently only support a single string as input. - :param task_type: The type of inference task that the model performs. - :param query: The query input, which is required only for the `rerank` task. - It is not required for other tasks. - :param task_settings: Task settings for the individual inference request. These - settings are specific to the task type you specified and override the task - settings specified when initializing the service. - :param timeout: The amount of time to wait for the inference request to complete. - """ - if inference_id in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'inference_id'") - if input is None and body is None: - raise ValueError("Empty value passed for parameter 'input'") - __path_parts: t.Dict[str, str] - if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: - __path_parts = { - "task_type": _quote(task_type), - "inference_id": _quote(inference_id), - } - __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' - elif inference_id not in SKIP_IN_PATH: - __path_parts = {"inference_id": _quote(inference_id)} - __path = f'/_inference/{__path_parts["inference_id"]}' - else: - raise ValueError("Couldn't find a path for the given parameters") - __query: t.Dict[str, t.Any] = {} - __body: t.Dict[str, t.Any] = body if body is not None else {} - if error_trace is not None: - __query["error_trace"] = error_trace - if filter_path is not None: - __query["filter_path"] = filter_path - if human is not None: - __query["human"] = human - if pretty is not None: - __query["pretty"] = pretty - if timeout is not None: - __query["timeout"] = timeout - if not __body: - if input is not None: - __body["input"] = input - if query is not None: - __body["query"] = query - if task_settings is not None: - __body["task_settings"] = task_settings - if not __body: - __body = None # type: ignore[assignment] - __headers = {"accept": "application/json"} - if __body is not None: - __headers["content-type"] = "application/json" - return await self.perform_request( # type: ignore[return-value] - "POST", - __path, - params=__query, - headers=__headers, - body=__body, - endpoint_id="inference.inference", - path_parts=__path_parts, - ) - @_rewrite_parameters( body_name="inference_config", ) diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index 5bfe426da..c98a22de8 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -20,13 +20,7 @@ from elastic_transport import ObjectApiResponse from ._base import NamespacedClient -from .utils import ( - SKIP_IN_PATH, - Stability, - _quote, - _rewrite_parameters, - _stability_warning, -) +from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class InferenceClient(NamespacedClient): @@ -240,117 +234,6 @@ def get( path_parts=__path_parts, ) - @_rewrite_parameters( - body_fields=("input", "query", "task_settings"), - ) - @_stability_warning( - Stability.DEPRECATED, - version="8.18.0", - message="inference.inference() is deprecated in favor of provider-specific APIs such as inference.put_elasticsearch() or inference.put_hugging_face()", - ) - def inference( - self, - *, - inference_id: str, - input: t.Optional[t.Union[str, t.Sequence[str]]] = None, - task_type: t.Optional[ - t.Union[ - str, - t.Literal[ - "chat_completion", - "completion", - "rerank", - "sparse_embedding", - "text_embedding", - ], - ] - ] = None, - error_trace: t.Optional[bool] = None, - filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, - human: t.Optional[bool] = None, - pretty: t.Optional[bool] = None, - query: t.Optional[str] = None, - task_settings: t.Optional[t.Any] = None, - timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, - body: t.Optional[t.Dict[str, t.Any]] = None, - ) -> ObjectApiResponse[t.Any]: - """ - .. raw:: html - -

        Perform inference on the service.

        -

        This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. - It returns a response with the results of the tasks. - The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.

        -
        -

        info - The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

        -
        - - - ``_ - - :param inference_id: The unique identifier for the inference endpoint. - :param input: The text on which you want to perform the inference task. It can - be a single string or an array. > info > Inference endpoints for the `completion` - task type currently only support a single string as input. - :param task_type: The type of inference task that the model performs. - :param query: The query input, which is required only for the `rerank` task. - It is not required for other tasks. - :param task_settings: Task settings for the individual inference request. These - settings are specific to the task type you specified and override the task - settings specified when initializing the service. - :param timeout: The amount of time to wait for the inference request to complete. - """ - if inference_id in SKIP_IN_PATH: - raise ValueError("Empty value passed for parameter 'inference_id'") - if input is None and body is None: - raise ValueError("Empty value passed for parameter 'input'") - __path_parts: t.Dict[str, str] - if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: - __path_parts = { - "task_type": _quote(task_type), - "inference_id": _quote(inference_id), - } - __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' - elif inference_id not in SKIP_IN_PATH: - __path_parts = {"inference_id": _quote(inference_id)} - __path = f'/_inference/{__path_parts["inference_id"]}' - else: - raise ValueError("Couldn't find a path for the given parameters") - __query: t.Dict[str, t.Any] = {} - __body: t.Dict[str, t.Any] = body if body is not None else {} - if error_trace is not None: - __query["error_trace"] = error_trace - if filter_path is not None: - __query["filter_path"] = filter_path - if human is not None: - __query["human"] = human - if pretty is not None: - __query["pretty"] = pretty - if timeout is not None: - __query["timeout"] = timeout - if not __body: - if input is not None: - __body["input"] = input - if query is not None: - __body["query"] = query - if task_settings is not None: - __body["task_settings"] = task_settings - if not __body: - __body = None # type: ignore[assignment] - __headers = {"accept": "application/json"} - if __body is not None: - __headers["content-type"] = "application/json" - return self.perform_request( # type: ignore[return-value] - "POST", - __path, - params=__query, - headers=__headers, - body=__body, - endpoint_id="inference.inference", - path_parts=__path_parts, - ) - @_rewrite_parameters( body_name="inference_config", ) diff --git a/elasticsearch/_sync/client/utils.py b/elasticsearch/_sync/client/utils.py index 2d7a93d65..51afe1c78 100644 --- a/elasticsearch/_sync/client/utils.py +++ b/elasticsearch/_sync/client/utils.py @@ -77,7 +77,6 @@ class Stability(Enum): STABLE = auto() BETA = auto() EXPERIMENTAL = auto() - DEPRECATED = auto() _TYPE_HOSTS = Union[ @@ -480,12 +479,6 @@ def wrapped(*args: Any, **kwargs: Any) -> Any: category=GeneralAvailabilityWarning, stacklevel=warn_stacklevel(), ) - elif stability == Stability.DEPRECATED and message and version: - warnings.warn( - f"In elasticsearch-py {version}, {message}.", - category=DeprecationWarning, - stacklevel=warn_stacklevel(), - ) return api(*args, **kwargs) From 29b53514fa11f61f9cbbe13bf08537af0ec5e5b9 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 11 Apr 2025 16:41:54 +0100 Subject: [PATCH 47/65] Only wipe user content between tests (#2871) (#2906) * Only wipe user content between tests * wipe data streams * do not wipe component templates * remove all references to unused is_xpack variable * remove dead code * remove unused imports (cherry picked from commit b45f50e19302c9bfeaacf374de6e90f1ddd67924) Co-authored-by: Miguel Grinberg --- test_elasticsearch/utils.py | 294 +----------------------------------- 1 file changed, 3 insertions(+), 291 deletions(-) diff --git a/test_elasticsearch/utils.py b/test_elasticsearch/utils.py index 8a13ff62f..021deb76e 100644 --- a/test_elasticsearch/utils.py +++ b/test_elasticsearch/utils.py @@ -22,10 +22,8 @@ from typing import Optional, Tuple from elasticsearch import ( - AuthorizationException, ConnectionError, Elasticsearch, - NotFoundError, ) SOURCE_DIR = Path(__file__).absolute().parent.parent @@ -118,40 +116,15 @@ def wipe_cluster(client): except ImportError: pass - is_xpack = True - if is_xpack: - wipe_rollup_jobs(client) - wait_for_pending_tasks(client, filter="xpack/rollup/job") - wipe_slm_policies(client) - - # Searchable snapshot indices start in 7.8+ - if es_version(client) >= (7, 8): - wipe_searchable_snapshot_indices(client) - wipe_snapshots(client) - if is_xpack: - wipe_data_streams(client) + wipe_data_streams(client) wipe_indices(client) - if is_xpack: - wipe_xpack_templates(client) - else: - client.indices.delete_template(name="*") - client.indices.delete_index_template(name="*") - client.cluster.delete_component_template(name="*") + client.indices.delete_template(name="*") + client.indices.delete_index_template(name="*") wipe_cluster_settings(client) - if is_xpack: - wipe_ilm_policies(client) - wipe_auto_follow_patterns(client) - wipe_tasks(client) - wipe_node_shutdown_metadata(client) - wait_for_pending_datafeeds_and_jobs(client) - wipe_calendars(client) - wipe_filters(client) - wipe_transforms(client) - wait_for_cluster_state_updates_to_finish(client) if close_after_wipe: client.close() @@ -169,16 +142,6 @@ def wipe_cluster_settings(client): client.cluster.put_settings(body=new_settings) -def wipe_rollup_jobs(client): - rollup_jobs = client.rollup.get_jobs(id="_all").get("jobs", ()) - for job in rollup_jobs: - job_id = job["config"]["id"] - client.options(ignore_status=404).rollup.stop_job( - id=job_id, wait_for_completion=True - ) - client.options(ignore_status=404).rollup.delete_job(id=job_id) - - def wipe_snapshots(client): """Deletes all the snapshots and repositories from the cluster""" in_progress_snapshots = [] @@ -223,259 +186,8 @@ def wipe_indices(client): ) -def wipe_searchable_snapshot_indices(client): - cluster_metadata = client.cluster.state( - metric="metadata", - filter_path="metadata.indices.*.settings.index.store.snapshot", - ) - if cluster_metadata: - for index in cluster_metadata["metadata"]["indices"].keys(): - client.indices.delete(index=index) - - -def wipe_xpack_templates(client): - # Delete index templates (including legacy) - templates = [ - x.strip() for x in client.cat.templates(h="name").split("\n") if x.strip() - ] - for template in templates: - if is_xpack_template(template): - continue - try: - client.indices.delete_template(name=template) - except NotFoundError as e: - if f"index_template [{template}] missing" in str(e): - client.indices.delete_index_template(name=template) - - # Delete component templates - templates = client.cluster.get_component_template()["component_templates"] - templates_to_delete = [ - template["name"] - for template in templates - if not is_xpack_template(template["name"]) - ] - if templates_to_delete: - client.cluster.delete_component_template(name=",".join(templates_to_delete)) - - -def wipe_ilm_policies(client): - for policy in client.ilm.get_lifecycle(): - if ( - policy - not in { - "ilm-history-ilm-policy", - "slm-history-ilm-policy", - "watch-history-ilm-policy", - "watch-history-ilm-policy-16", - "ml-size-based-ilm-policy", - "logs", - "metrics", - "synthetics", - "7-days-default", - "30-days-default", - "90-days-default", - "180-days-default", - "365-days-default", - ".fleet-actions-results-ilm-policy", - ".deprecation-indexing-ilm-policy", - ".monitoring-8-ilm-policy", - } - and "-history-ilm-polcy" not in policy - and "-meta-ilm-policy" not in policy - and "-data-ilm-policy" not in policy - and "@lifecycle" not in policy - ): - client.ilm.delete_lifecycle(name=policy) - - -def wipe_slm_policies(client): - policies = client.slm.get_lifecycle() - for policy in policies: - if policy not in {"cloud-snapshot-policy"}: - client.slm.delete_lifecycle(policy_id=policy) - - -def wipe_auto_follow_patterns(client): - for pattern in client.ccr.get_auto_follow_pattern()["patterns"]: - client.ccr.delete_auto_follow_pattern(name=pattern["name"]) - - -def wipe_node_shutdown_metadata(client): - try: - shutdown_status = client.shutdown.get_node() - # If response contains these two keys the feature flag isn't enabled - # on this cluster so skip this step now. - if "_nodes" in shutdown_status and "cluster_name" in shutdown_status: - return - - for shutdown_node in shutdown_status.get("nodes", []): - node_id = shutdown_node["node_id"] - client.shutdown.delete_node(node_id=node_id) - - # Elastic Cloud doesn't allow this so we skip. - except AuthorizationException: - pass - - -def wipe_tasks(client): - tasks = client.tasks.list() - for node_name, node in tasks.get("node", {}).items(): - for task_id in node.get("tasks", ()): - client.tasks.cancel(task_id=task_id, wait_for_completion=True) - - -def wait_for_pending_tasks(client, filter, timeout=30): - end_time = time.time() + timeout - while time.time() < end_time: - tasks = client.cat.tasks(detailed=True).split("\n") - if not any(filter in task for task in tasks): - break - - -def wait_for_pending_datafeeds_and_jobs(client: Elasticsearch, timeout=30): - end_time = time.time() + timeout - while time.time() < end_time: - resp = client.ml.get_datafeeds(datafeed_id="*", allow_no_match=True) - if resp["count"] == 0: - break - for datafeed in resp["datafeeds"]: - client.options(ignore_status=404).ml.delete_datafeed( - datafeed_id=datafeed["datafeed_id"] - ) - - end_time = time.time() + timeout - while time.time() < end_time: - resp = client.ml.get_jobs(job_id="*", allow_no_match=True) - if resp["count"] == 0: - break - for job in resp["jobs"]: - client.options(ignore_status=404).ml.close_job(job_id=job["job_id"]) - client.options(ignore_status=404).ml.delete_job(job_id=job["job_id"]) - - end_time = time.time() + timeout - while time.time() < end_time: - resp = client.ml.get_data_frame_analytics(id="*") - if resp["count"] == 0: - break - for job in resp["data_frame_analytics"]: - client.options(ignore_status=404).ml.stop_data_frame_analytics(id=job["id"]) - client.options(ignore_status=404).ml.delete_data_frame_analytics( - id=job["id"] - ) - - -def wipe_filters(client: Elasticsearch, timeout=30): - end_time = time.time() + timeout - while time.time() < end_time: - resp = client.ml.get_filters(filter_id="*") - if resp["count"] == 0: - break - for filter in resp["filters"]: - client.options(ignore_status=404).ml.delete_filter( - filter_id=filter["filter_id"] - ) - - -def wipe_calendars(client: Elasticsearch, timeout=30): - end_time = time.time() + timeout - while time.time() < end_time: - resp = client.ml.get_calendars(calendar_id="*") - if resp["count"] == 0: - break - for calendar in resp["calendars"]: - client.options(ignore_status=404).ml.delete_calendar( - calendar_id=calendar["calendar_id"] - ) - - -def wipe_transforms(client: Elasticsearch, timeout=30): - end_time = time.time() + timeout - while time.time() < end_time: - resp = client.transform.get_transform(transform_id="*") - if resp["count"] == 0: - break - for trasnform in resp["transforms"]: - client.options(ignore_status=404).transform.stop_transform( - transform_id=trasnform["id"] - ) - client.options(ignore_status=404).transform.delete_transform( - transform_id=trasnform["id"] - ) - - def wait_for_cluster_state_updates_to_finish(client, timeout=30): end_time = time.time() + timeout while time.time() < end_time: if not client.cluster.pending_tasks().get("tasks", ()): break - - -def is_xpack_template(name): - if name.startswith("."): - return True - elif name.startswith("behavioral_analytics-events"): - return True - elif name.startswith("elastic-connectors-"): - return True - elif name.startswith("entities_v1_"): - return True - elif name.endswith("@ilm"): - return True - elif name.endswith("@template"): - return True - - return name in { - "agentless", - "agentless@mappings", - "agentless@settings", - "apm-10d@lifecycle", - "apm-180d@lifecycle", - "apm-390d@lifecycle", - "apm-90d@lifecycle", - "apm@mappings", - "apm@settings", - "data-streams-mappings", - "data-streams@mappings", - "elastic-connectors", - "ecs@dynamic_templates", - "ecs@mappings", - "ilm-history-7", - "kibana-reporting@settings", - "logs", - "logs-apm.error@mappings", - "logs-apm@settings", - "logs-mappings", - "logs@mappings", - "logs-settings", - "logs@settings", - "metrics", - "metrics-apm@mappings", - "metrics-apm.service_destination@mappings", - "metrics-apm.service_summary@mappings", - "metrics-apm.service_transaction@mappings", - "metrics-apm@settings", - "metrics-apm.transaction@mappings", - "metrics-mappings", - "metrics@mappings", - "metrics-settings", - "metrics@settings", - "metrics-tsdb-settings", - "metrics@tsdb-settings", - "search-acl-filter", - "synthetics", - "synthetics-mappings", - "synthetics@mappings", - "synthetics-settings", - "synthetics@settings", - "traces-apm@mappings", - "traces-apm.rum@mappings", - "traces@mappings", - "traces@settings", - # otel - "metrics-otel@mappings", - "semconv-resource-to-ecs@mappings", - "traces-otel@mappings", - "ecs-tsdb@mappings", - "logs-otel@mappings", - "otel@mappings", - } From 79f185fe76ab988a45afe39097635da46e494e1f Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Mon, 14 Apr 2025 17:37:26 +0400 Subject: [PATCH 48/65] Try removing 8.x release notes table of contents (#2912) --- docs/guide/release-notes.asciidoc | 44 ------------------------------- 1 file changed, 44 deletions(-) diff --git a/docs/guide/release-notes.asciidoc b/docs/guide/release-notes.asciidoc index 069cedc39..d3c7867fb 100644 --- a/docs/guide/release-notes.asciidoc +++ b/docs/guide/release-notes.asciidoc @@ -1,50 +1,6 @@ [[release-notes]] == Release notes -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> - [discrete] [[rn-8-17-2]] === 8.17.2 (2025-03-04) From bd8d41e74bc0ce031a134ae3f7299dfb443a2dc1 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 14 Apr 2025 17:37:42 +0400 Subject: [PATCH 49/65] Fix release notes URL (#2911) (#2913) (cherry picked from commit fe1b3dc21c93f119aec9da51ac4176b792f7e09a) Co-authored-by: Quentin Pradet --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fb156ae2a..df468b466 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1 +1 @@ -See: https://www.elastic.co/guide/en/elasticsearch/client/python-api/master/release-notes.html +See: https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/release-notes.html From 572e022fbfb31df7bd773ed5d4b512bc862244bf Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 16 Apr 2025 11:40:56 +0100 Subject: [PATCH 50/65] 8.18 client documentation updates (#2925) (#2926) (cherry picked from commit 6074ecace404c82815ed9c95868746077d80b024) Co-authored-by: Miguel Grinberg --- docs/guide/async.asciidoc | 141 ++++++++++++++++++++++ docs/guide/examples.asciidoc | 176 ++++++++++++++++++++++++++++ docs/guide/getting-started.asciidoc | 58 ++++++++- 3 files changed, 370 insertions(+), 5 deletions(-) create mode 100644 docs/guide/async.asciidoc diff --git a/docs/guide/async.asciidoc b/docs/guide/async.asciidoc new file mode 100644 index 000000000..9f3c04acd --- /dev/null +++ b/docs/guide/async.asciidoc @@ -0,0 +1,141 @@ +[[async]] +== Using with asyncio + +The `elasticsearch` package supports async/await with +https://docs.python.org/3/library/asyncio.html[asyncio] and +https://docs.aiohttp.org[aiohttp]. You can either install `aiohttp` +directly or use the `[async]` extra: + +[source,bash] +---- +$ python -m pip install elasticsearch aiohttp + +# - OR - + +$ python -m pip install elasticsearch[async] +---- + +[discrete] +=== Getting Started with Async + +After installation all async API endpoints are available via +`~elasticsearch.AsyncElasticsearch` and are used in the same way as +other APIs, with an extra `await`: + +[source,python] +---- +import asyncio +from elasticsearch import AsyncElasticsearch + +client = AsyncElasticsearch() + +async def main(): + resp = await client.search( + index="documents", + body={"query": {"match_all": {}}}, + size=20, + ) + print(resp) + +loop = asyncio.get_event_loop() +loop.run_until_complete(main()) +---- + +All APIs that are available under the sync client are also available +under the async client. + +https://elasticsearch-py.readthedocs.io/en/latest/async.html#api-reference[Reference documentation] + +[discrete] +=== ASGI Applications and Elastic APM + +https://asgi.readthedocs.io[ASGI] (Asynchronous Server Gateway +Interface) is a way to serve Python web applications making use of +async I/O to achieve better performance. Some examples of ASGI +frameworks include FastAPI, Django 3.0+, and Starlette. If you're +using one of these frameworks along with Elasticsearch then you should +be using `~elasticsearch.AsyncElasticsearch` to avoid blocking the event +loop with synchronous network calls for optimal performance. + +https://www.elastic.co/guide/en/apm/agent/python/current/index.html[Elastic +APM] also supports tracing of async Elasticsearch queries just the same +as synchronous queries. For an example on how to configure +`AsyncElasticsearch` with a popular ASGI framework +https://fastapi.tiangolo.com/[FastAPI] and APM tracing there is a +https://github.com/elastic/elasticsearch-py/tree/master/examples/fastapi-apm[pre-built +example] in the `examples/fastapi-apm` directory. + +See also the <> page. + +[discrete] +=== Frequently Asked Questions + +[discrete] +==== ValueError when initializing `AsyncElasticsearch`? + +If when trying to use `AsyncElasticsearch` you receive +`ValueError: You must have 'aiohttp' installed to use AiohttpHttpNode` +you should ensure that you have `aiohttp` installed in your environment +(check with `$ python -m pip freeze | grep aiohttp`). Otherwise, +async support won't be available. + +[discrete] +==== What about the `elasticsearch-async` package? + +Previously asyncio was supported separately via the +https://github.com/elastic/elasticsearch-py-async[elasticsearch-async] +package. The `elasticsearch-async` package has been deprecated in favor +of `AsyncElasticsearch` provided by the `elasticsearch` package in v7.8 +and onwards. + +[discrete] +==== Receiving 'Unclosed client session / connector' warning? + +This warning is created by `aiohttp` when an open HTTP connection is +garbage collected. You'll typically run into this when closing your +application. To resolve the issue ensure that +`~elasticsearch.AsyncElasticsearch.close` is called before the +`~elasticsearch.AsyncElasticsearch` instance is garbage collected. + +For example if using FastAPI that might look like this: + +[source,python] +---- +import os +from contextlib import asynccontextmanager + +from fastapi import FastAPI +from elasticsearch import AsyncElasticsearch + +ELASTICSEARCH_URL = os.environ["ELASTICSEARCH_URL"] +client = None + +@asynccontextmanager +async def lifespan(app: FastAPI): + global client + client = AsyncElasticsearch(ELASTICSEARCH_URL) + yield + await client.close() + +app = FastAPI(lifespan=lifespan) + +@app.get("/") +async def main(): + return await client.info() +---- + +You can run this example by saving it to `main.py` and executing +`ELASTICSEARCH_URL=http://localhost:9200 uvicorn main:app`. + +[discrete] +=== Async Helpers + +Async variants of all helpers are available in `elasticsearch.helpers` +and are all prefixed with `async_*`. You'll notice that these APIs +are identical to the ones in the sync <> documentation. + +All async helpers that accept an iterator or generator also accept async +iterators and async generators. + +https://elasticsearch-py.readthedocs.io/en/latest/async.html#async-helpers[Reference documentation] + diff --git a/docs/guide/examples.asciidoc b/docs/guide/examples.asciidoc index b9a5650a6..575f43bbe 100644 --- a/docs/guide/examples.asciidoc +++ b/docs/guide/examples.asciidoc @@ -109,3 +109,179 @@ method: ---------------------------- client.delete(index="test-index", id=1) ---------------------------- + +[discrete] +[[ex-interactive]] +=== Interactive examples + +The https://github.com/elastic/elasticsearch-labs[elasticsearch-labs] +repo contains interactive and executable +https://github.com/elastic/elasticsearch-labs/tree/main/notebooks[Python +notebooks], sample apps, and resources for testing out Elasticsearch, +using the Python client. These examples are mainly focused on vector +search, hybrid search and generative AI use cases, but you'll also find +examples of basic operations like creating index mappings and performing +lexical search. + +[discrete] +==== Search notebooks + +The +https://github.com/elastic/elasticsearch-labs/tree/main/notebooks/search[Search] +folder is a good place to start if you're new to Elasticsearch. This +folder contains a number of notebooks that demonstrate the fundamentals +of Elasticsearch, like indexing vectors, running lexical, semantic and +_hybrid_ searches, and more. + +The following notebooks are available: + +[arabic, start=0] +* https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/00-quick-start.ipynb[Quick +start] +* https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/01-keyword-querying-filtering.ipynb[Keyword, +querying, filtering] +* https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/02-hybrid-search.ipynb[Hybrid +search] +* https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/03-ELSER.ipynb[Semantic +search with ELSER] +* https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/04-multilingual.ipynb[Multilingual +semantic search] +* https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/05-query-rules.ipynb[Query +rules] +* https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/06-synonyms-api.ipynb[Synonyms +API quick start] + +Here's a brief overview of what you'll learn in each notebook. + +[discrete] +===== Quick start + +In the +https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/00-quick-start.ipynb[00-quick-start.ipynb] +notebook you'll learn how to: + +* Use the Elasticsearch Python client for various operations. +* Create and define an index for a sample dataset with +`dense_vector` fields. +* Transform book titles into embeddings using +https://www.sbert.net[Sentence Transformers] and index them into +Elasticsearch. +* Perform k-nearest neighbors (knn) semantic searches. +* Integrate traditional text-based search with semantic search, for a +hybrid search system. +* Use reciprocal rank fusion (RRF) to intelligently combine search +results from different retrieval systems. + +[discrete] +===== Keyword, querying, filtering + +In the +https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/01-keyword-querying-filtering.ipynb[01-keyword-querying-filtering.ipynb] +notebook, you'll learn how to: + +* Use +https://www.elastic.co/guide/en/elasticsearch/reference/current/query-filter-context.html[query +and filter contexts] to search and filter documents in Elasticsearch. +* Execute full-text searches with `match` and `multi-match` queries. +* Query and filter documents based on `text`, `number`, `date`, or +`boolean` values. +* Run multi-field searches using the `multi-match` query. +* Prioritize specific fields in the `multi-match` query for tailored +results. + +[discrete] +===== Hybrid search + +In the +https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/02-hybrid-search.ipynb[02-hybrid-search.ipynb] +notebook, you'll learn how to: + +* Combine results of traditional text-based search with semantic search, +for a hybrid search system. +* Transform fields in the sample dataset into embeddings using the +Sentence Transformer model and index them into Elasticsearch. +* Use the +https://www.elastic.co/guide/en/elasticsearch/reference/current/rrf.html#rrf-api[RRF +API] to combine the results of a `match` query and a `kNN` semantic +search. +* Walk through a super simple toy example that demonstrates, step by +step, how RRF ranking works. + +[discrete] +===== Semantic search with ELSER + +In the +https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/03-ELSER.ipynb[03-ELSER.ipynb] +notebook, you'll learn how to: + +* Use the Elastic Learned Sparse Encoder (ELSER) for text +expansion-powered semantic search, out of the box — without training, +fine-tuning, or embeddings generation. +* Download and deploy the ELSER model in your Elastic environment. +* Create an Elasticsearch index named [.title-ref]#search-movies# with +specific mappings and index a dataset of movie descriptions. +* Create an ingest pipeline containing an inference processor for ELSER +model execution. +* Reindex the data from [.title-ref]#search-movies# into another index, +[.title-ref]#elser-movies#, using the ELSER pipeline for text expansion. +* Observe the results of running the documents through the model by +inspecting the additional terms it adds to documents, which enhance +searchability. +* Perform simple keyword searches on the [.title-ref]#elser-movies# +index to assess the impact of ELSER's text expansion. +* Execute ELSER-powered semantic searches using the `text_expansion` +query. + +[discrete] +===== Multilingual semantic search + +In the +https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/04-multilingual.ipynb[04-multilingual.ipynb] +notebook, you'll learn how to: + +* Use a multilingual embedding model for semantic search across +languages. +* Transform fields in the sample dataset into embeddings using the +Sentence Transformer model and index them into Elasticsearch. +* Use filtering with a `kNN` semantic search. +* Walk through a super simple toy example that demonstrates, step by +step, how multilingual search works across languages, and within +non-English languages. + +[discrete] +===== Query rules + +In the +https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/05-query-rules.ipynb[05-query-rules.ipynb] +notebook, you'll learn how to: + +* Use the query rules management APIs to create and edit promotional +rules based on contextual queries. +* Apply these query rules by using the `rule_query` in Query DSL. + +[discrete] +===== Synonyms API quick start + +In the +https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/06-synonyms-api.ipynb[06-synonyms-api.ipynb] +notebook, you'll learn how to: + +* Use the synonyms management API to create a synonyms set to enhance +your search recall. +* Configure an index to use search-time synonyms. +* Update synonyms in real time. +* Run queries that are enhanced by synonyms. + +[discrete] +==== Other notebooks + +* https://github.com/elastic/elasticsearch-labs/tree/main/notebooks/generative-ai[Generative +AI]. Notebooks that demonstrate various use cases for Elasticsearch as +the retrieval engine and vector store for LLM-powered applications. +* https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/integrations[Integrations]. +Notebooks that demonstrate how to integrate popular services and +projects with Elasticsearch, including OpenAI, Hugging Face, and +LlamaIndex +* https://github.com/elastic/elasticsearch-labs/tree/main/notebooks/langchain[Langchain]. +Notebooks that demonstrate how to integrate Elastic with LangChain, a +framework for developing applications powered by language models. diff --git a/docs/guide/getting-started.asciidoc b/docs/guide/getting-started.asciidoc index 1b964e50c..58b6f33a5 100644 --- a/docs/guide/getting-started.asciidoc +++ b/docs/guide/getting-started.asciidoc @@ -70,11 +70,33 @@ This is how you create the `my_index` index: client.indices.create(index="my_index") ---- +Optionally, you can first define the expected types of your features with a +custom mapping. + +[source,py] +---- +mappings = { + "properties": { + "foo": {"type": "text"}, + "bar": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256, + } + }, + }, + } +} + +client.indices.create(index="my_index", mappings=mappings) +---- [discrete] ==== Indexing documents -This is a simple way of indexing a document: +This indexes a document with the index API: [source,py] ---- @@ -88,6 +110,28 @@ client.index( ) ---- +You can also index multiple documents at once with the bulk helper function: + +[source,py] +---- +from elasticsearch import helpers + +def generate_docs(): + for i in range(10): + yield { + "_index": "my_index", + "foo": f"foo {i}", + "bar": "bar", + } + +helpers.bulk(client, generate_docs()) +---- + +These helpers are the recommended way to perform bulk ingestion. While it is +also possible to perform bulk ingestion using `client.bulk` directly, the +helpers handle retries, ingesting chunk by chunk and more. See the +<> page for more details. + [discrete] ==== Getting documents @@ -122,10 +166,14 @@ This is how you can update a document, for example to add a new field: [source,py] ---- -client.update(index="my_index", id="my_document_id", doc={ - "foo": "bar", - "new_field": "new value", -}) +client.update( + index="my_index", + id="my_document_id", + doc={ + "foo": "bar", + "new_field": "new value", + } +) ---- From 66baf16c074e9a6cce0b606103fbd935db6a70da Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 18 Apr 2025 13:13:31 +0400 Subject: [PATCH 51/65] Surface caused_by in ApiError (#2932) (#2934) In particular, this will give a clear error when using elasticsearch-py 9.0.0 on an 8.x cluster. (cherry picked from commit 72efd52628e889acd8b77147efaf901ebf4b9db7) Co-authored-by: Quentin Pradet --- elasticsearch/exceptions.py | 2 ++ test_elasticsearch/test_exceptions.py | 30 +++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/elasticsearch/exceptions.py b/elasticsearch/exceptions.py index dc410ae30..70738d5af 100644 --- a/elasticsearch/exceptions.py +++ b/elasticsearch/exceptions.py @@ -61,6 +61,7 @@ def __str__(self) -> str: if self.body and isinstance(self.body, dict) and "error" in self.body: if isinstance(self.body["error"], dict): root_cause = self.body["error"]["root_cause"][0] + caused_by = self.body["error"].get("caused_by", {}) cause = ", ".join( filter( None, @@ -68,6 +69,7 @@ def __str__(self) -> str: repr(root_cause["reason"]), root_cause.get("resource.id"), root_cause.get("resource.type"), + caused_by.get("reason"), ], ) ) diff --git a/test_elasticsearch/test_exceptions.py b/test_elasticsearch/test_exceptions.py index 00e8015c7..938aded3d 100644 --- a/test_elasticsearch/test_exceptions.py +++ b/test_elasticsearch/test_exceptions.py @@ -46,3 +46,33 @@ def test_transform_error_parse_with_error_string(self): assert ( str(e) == "ApiError(500, 'InternalServerError', 'something error message')" ) + + def test_transform_invalid_media_type_error(self): + e = ApiError( + message="InvalidMediaType", + meta=error_meta, + body={ + "error": { + "root_cause": [ + { + "type": "media_type_header_exception", + "reason": "Invalid media-type value on headers [Accept, Content-Type]", + } + ], + "type": "media_type_header_exception", + "reason": "Invalid media-type value on headers [Accept, Content-Type]", + "caused_by": { + "type": "status_exception", + "reason": "Accept version must be either version 8 or 7, but found 9. Accept=application/vnd.elasticsearch+json; compatible-with=9", + }, + }, + "status": 400, + }, + ) + + assert str(e) == ( + "ApiError(500, 'InvalidMediaType', " + "'Invalid media-type value on headers [Accept, Content-Type]', " + "Accept version must be either version 8 or 7, but found 9. " + "Accept=application/vnd.elasticsearch+json; compatible-with=9)" + ) From 67d0671aeec25c632085a8562c8e6496d55af374 Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Fri, 18 Apr 2025 16:48:57 +0400 Subject: [PATCH 52/65] Bring 8.18.0 release to parent (#2922) --- .../120fcf9f55128d6a81d5e87a9c235bbd.asciidoc | 19 +++--- .../13ecdf99114098c76b050397d9c3d4e6.asciidoc | 3 +- .../45954b8aaedfed57012be8b6538b0a24.asciidoc | 61 ++++++++++--------- .../4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc | 3 +- .../7429b16221fe741fd31b0584786dd0b0.asciidoc | 3 +- .../82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc | 45 +++++++------- .../b45a8c6fc746e9c90fd181e69a605fad.asciidoc | 3 +- .../f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc | 3 +- docs/guide/release-notes.asciidoc | 33 ++++++++++ elasticsearch/_version.py | 2 +- .../generate-docs-examples/package-lock.json | 6 +- 11 files changed, 106 insertions(+), 75 deletions(-) diff --git a/docs/examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc b/docs/examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc index dd2c9dc6b..2f9f59595 100644 --- a/docs/examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc +++ b/docs/examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc @@ -3,16 +3,17 @@ [source, python] ---- -resp = client.inference.stream_inference( - task_type="chat_completion", +resp = client.inference.chat_completion_unified( inference_id="openai-completion", - model="gpt-4o", - messages=[ - { - "role": "user", - "content": "What is Elastic?" - } - ], + chat_completion_request={ + "model": "gpt-4o", + "messages": [ + { + "role": "user", + "content": "What is Elastic?" + } + ] + }, ) print(resp) ---- diff --git a/docs/examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc b/docs/examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc index 442cbc631..4c4295aaf 100644 --- a/docs/examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc +++ b/docs/examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc @@ -3,8 +3,7 @@ [source, python] ---- -resp = client.inference.inference( - task_type="sparse_embedding", +resp = client.inference.sparse_embedding( inference_id="my-elser-model", input="The sky above the port was the color of television tuned to a dead channel.", ) diff --git a/docs/examples/45954b8aaedfed57012be8b6538b0a24.asciidoc b/docs/examples/45954b8aaedfed57012be8b6538b0a24.asciidoc index cdff938a9..a6a288c6c 100644 --- a/docs/examples/45954b8aaedfed57012be8b6538b0a24.asciidoc +++ b/docs/examples/45954b8aaedfed57012be8b6538b0a24.asciidoc @@ -3,41 +3,42 @@ [source, python] ---- -resp = client.inference.stream_inference( - task_type="chat_completion", +resp = client.inference.chat_completion_unified( inference_id="openai-completion", - messages=[ - { - "role": "user", - "content": [ - { - "type": "text", - "text": "What's the price of a scarf?" - } - ] - } - ], - tools=[ - { - "type": "function", - "function": { - "name": "get_current_price", - "description": "Get the current price of a item", - "parameters": { - "type": "object", - "properties": { - "item": { - "id": "123" + chat_completion_request={ + "messages": [ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "What's the price of a scarf?" + } + ] + } + ], + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_price", + "description": "Get the current price of a item", + "parameters": { + "type": "object", + "properties": { + "item": { + "id": "123" + } } } } } - } - ], - tool_choice={ - "type": "function", - "function": { - "name": "get_current_price" + ], + "tool_choice": { + "type": "function", + "function": { + "name": "get_current_price" + } } }, ) diff --git a/docs/examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc b/docs/examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc index b7583a76d..575393f08 100644 --- a/docs/examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc +++ b/docs/examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc @@ -3,8 +3,7 @@ [source, python] ---- -resp = client.inference.stream_inference( - task_type="completion", +resp = client.inference.stream_completion( inference_id="openai-completion", input="What is Elastic?", ) diff --git a/docs/examples/7429b16221fe741fd31b0584786dd0b0.asciidoc b/docs/examples/7429b16221fe741fd31b0584786dd0b0.asciidoc index 9e552ae3c..06d02bd82 100644 --- a/docs/examples/7429b16221fe741fd31b0584786dd0b0.asciidoc +++ b/docs/examples/7429b16221fe741fd31b0584786dd0b0.asciidoc @@ -3,8 +3,7 @@ [source, python] ---- -resp = client.inference.inference( - task_type="text_embedding", +resp = client.inference.text_embedding( inference_id="my-cohere-endpoint", input="The sky above the port was the color of television tuned to a dead channel.", task_settings={ diff --git a/docs/examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc b/docs/examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc index 8bbb6682c..b8574f3ff 100644 --- a/docs/examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc +++ b/docs/examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc @@ -3,30 +3,31 @@ [source, python] ---- -resp = client.inference.stream_inference( - task_type="chat_completion", +resp = client.inference.chat_completion_unified( inference_id="openai-completion", - messages=[ - { - "role": "assistant", - "content": "Let's find out what the weather is", - "tool_calls": [ - { - "id": "call_KcAjWtAww20AihPHphUh46Gd", - "type": "function", - "function": { - "name": "get_current_weather", - "arguments": "{\"location\":\"Boston, MA\"}" + chat_completion_request={ + "messages": [ + { + "role": "assistant", + "content": "Let's find out what the weather is", + "tool_calls": [ + { + "id": "call_KcAjWtAww20AihPHphUh46Gd", + "type": "function", + "function": { + "name": "get_current_weather", + "arguments": "{\"location\":\"Boston, MA\"}" + } } - } - ] - }, - { - "role": "tool", - "content": "The weather is cold", - "tool_call_id": "call_KcAjWtAww20AihPHphUh46Gd" - } - ], + ] + }, + { + "role": "tool", + "content": "The weather is cold", + "tool_call_id": "call_KcAjWtAww20AihPHphUh46Gd" + } + ] + }, ) print(resp) ---- diff --git a/docs/examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc b/docs/examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc index fe563aefe..a22a77c39 100644 --- a/docs/examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc +++ b/docs/examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc @@ -3,8 +3,7 @@ [source, python] ---- -resp = client.inference.inference( - task_type="completion", +resp = client.inference.completion( inference_id="openai_chat_completions", input="What is Elastic?", ) diff --git a/docs/examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc b/docs/examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc index a23aeb237..4f7e6b403 100644 --- a/docs/examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc +++ b/docs/examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc @@ -3,8 +3,7 @@ [source, python] ---- -resp = client.inference.inference( - task_type="rerank", +resp = client.inference.rerank( inference_id="cohere_rerank", input=[ "luke", diff --git a/docs/guide/release-notes.asciidoc b/docs/guide/release-notes.asciidoc index d3c7867fb..f9559db86 100644 --- a/docs/guide/release-notes.asciidoc +++ b/docs/guide/release-notes.asciidoc @@ -1,6 +1,39 @@ [[release-notes]] == Release notes +=== 8.18.0 (2025-04-15) + +- Merge `Elasticsearch-DSL `_ package (https://github.com/elastic/elasticsearch-py/pull/2736[#2736]) +- Add Python DSL documentation (https://github.com/elastic/elasticsearch-py/pull/2761[#2761]) +- Autogenerate DSL field classes from schema (https://github.com/elastic/elasticsearch-py/pull/2780[#2780]) +- Document use of sub-clients (https://github.com/elastic/elasticsearch-py/pull/2798[#2798]) +- Improve DSL documentation examples with class-based queries and type hints (https://github.com/elastic/elasticsearch-py/pull/2857[#2857]) +- Document the use of `param()` in Python DSL methods (https://github.com/elastic/elasticsearch-py/pull/2861[#2861]) +- Fix `simulate` sub-client documentation (https://github.com/elastic/elasticsearch-py/pull/2749[#2749])[#2749]) +- Update APIs + * Remove `wait_for_active_shards` from experimental Get field usage stats API + * Rename incorrect `access_token` to `token` in Logout of OpenID Connect API + * Add inference APIs: Alibaba Cloud AI Search, Amazon Bedrock, Anthropic, Azure AI Studio, Azure OpenAI, Cohere, Elasticsearch, ELSER, Google AI Studio, Google Vertex AI, Hugging Face, Jina AI, Mistral, OpenAI, and Voyage AI + * Add Reindex legacy backing indices APIs + * Add Create an index from a source index API + * Add `include_source_on_error` to Create, Index, Update and Bulk APIs + * Add Stop async ES|QL query API + * Add `timeout` to Resolve Cluster API + * Add `adaptive_allocations` body field to Start and Update a trained model deployment API + * Rename `index_template_subtitutions` to `index_template_substitutions` in Simulate data ingestion API* Add `if_primary_term`, `if_seq_no`, `op_type`, `require_alias` and `require_data_stream` to Create API + * Add `max_concurrent_shard_requests` to Open point in time API + * Add `local` and `flat_settings` to Check index templates API + * Add `reopen` to Update index settings API + * Add `resource` to Reload search analyzer API + * Add `lazy` to Roll over to a new index API + * Add `cause` and `create` to Simulate index template APIs + * Add Elastic Inference Service (EIS) chat completion + * Add inference APIs: Alibaba Cloud AI Search, Amazon Bedrock, Anthropic, Azure AI Studio, Azure OpenAI, Cohere, Elastic Inference Service (EIS), Elasticsearch, ELSER, Google AI Studio, Google Vertex AI, Hugging Face, Jina AI, Mistral, OpenAI, and Voyage AI +- Update DSL + * Add `ignore_malformed`, `script`, `on_script_error` and `time_series_dimension` to Boolean field + * Add `index` to GeoShape field + * Add `search_inference_id` to SemanticText field + [discrete] [[rn-8-17-2]] === 8.17.2 (2025-03-04) diff --git a/elasticsearch/_version.py b/elasticsearch/_version.py index 00e2789aa..030a7ff29 100644 --- a/elasticsearch/_version.py +++ b/elasticsearch/_version.py @@ -15,4 +15,4 @@ # specific language governing permissions and limitations # under the License. -__versionstr__ = "8.17.2" +__versionstr__ = "8.18.0" diff --git a/utils/generate-docs-examples/package-lock.json b/utils/generate-docs-examples/package-lock.json index f00b0308c..1d07e7c5e 100644 --- a/utils/generate-docs-examples/package-lock.json +++ b/utils/generate-docs-examples/package-lock.json @@ -17,9 +17,9 @@ } }, "node_modules/@elastic/request-converter": { - "version": "8.18.0", - "resolved": "https://registry.npmjs.org/@elastic/request-converter/-/request-converter-8.18.0.tgz", - "integrity": "sha512-xEIB17voGulAfBThFqqtk8Osc+dNHiCqN9GW0Nf6PunNdvmAT5YvMb6u4NNI+NPAxNu90ak396g+ThjH9VRGIw==", + "version": "8.18.1", + "resolved": "https://registry.npmjs.org/@elastic/request-converter/-/request-converter-8.18.1.tgz", + "integrity": "sha512-c5Q0aIxfK0RfkHhqX3sMsMmBwo1iNJviJezRNDZ006JCASGE3peAXKlyGiFcgV5MCxW3X0KHUdz/AEOdCCMXig==", "license": "Apache-2.0", "dependencies": { "base64url": "^3.0.1", From 84ae034b82ac017040f2bdbf4cbea022fc7ac2e8 Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Tue, 29 Apr 2025 09:20:39 +0200 Subject: [PATCH 53/65] Auto-generated API code (#2939) --- elasticsearch/_async/client/cluster.py | 4 +- elasticsearch/_async/client/inference.py | 107 +++++++++++++++++ elasticsearch/_async/client/license.py | 2 +- elasticsearch/_async/client/ml.py | 10 +- elasticsearch/_async/client/transform.py | 4 +- elasticsearch/_async/client/watcher.py | 26 ++--- elasticsearch/_async/client/xpack.py | 2 +- elasticsearch/_sync/client/cluster.py | 4 +- elasticsearch/_sync/client/inference.py | 107 +++++++++++++++++ elasticsearch/_sync/client/license.py | 2 +- elasticsearch/_sync/client/ml.py | 10 +- elasticsearch/_sync/client/transform.py | 4 +- elasticsearch/_sync/client/watcher.py | 26 ++--- elasticsearch/_sync/client/xpack.py | 2 +- elasticsearch/dsl/field.py | 140 ++++++++++++++++++++++- elasticsearch/dsl/query.py | 6 + elasticsearch/dsl/types.py | 50 +++++++- 17 files changed, 451 insertions(+), 55 deletions(-) diff --git a/elasticsearch/_async/client/cluster.py b/elasticsearch/_async/client/cluster.py index ee5ab1218..7847bbdb6 100644 --- a/elasticsearch/_async/client/cluster.py +++ b/elasticsearch/_async/client/cluster.py @@ -185,7 +185,7 @@ async def delete_voting_config_exclusions( Remove master-eligible nodes from the voting configuration exclusion list.

        - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. :param wait_for_removal: Specifies whether to wait for all excluded nodes to @@ -680,7 +680,7 @@ async def post_voting_config_exclusions( They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes.

        - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. :param node_ids: A comma-separated list of the persistent ids of the nodes to diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index b86740319..ce939ccf9 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -234,6 +234,113 @@ async def get( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("input", "query", "task_settings"), + ) + async def inference( + self, + *, + inference_id: str, + input: t.Optional[t.Union[str, t.Sequence[str]]] = None, + task_type: t.Optional[ + t.Union[ + str, + t.Literal[ + "chat_completion", + "completion", + "rerank", + "sparse_embedding", + "text_embedding", + ], + ] + ] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + query: t.Optional[str] = None, + task_settings: t.Optional[t.Any] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Perform inference on the service.

        +

        This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. + It returns a response with the results of the tasks. + The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.

        +

        For details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation.

        +
        +

        info + The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

        +
        + + + ``_ + + :param inference_id: The unique identifier for the inference endpoint. + :param input: The text on which you want to perform the inference task. It can + be a single string or an array. > info > Inference endpoints for the `completion` + task type currently only support a single string as input. + :param task_type: The type of inference task that the model performs. + :param query: The query input, which is required only for the `rerank` task. + It is not required for other tasks. + :param task_settings: Task settings for the individual inference request. These + settings are specific to the task type you specified and override the task + settings specified when initializing the service. + :param timeout: The amount of time to wait for the inference request to complete. + """ + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") + if input is None and body is None: + raise ValueError("Empty value passed for parameter 'input'") + __path_parts: t.Dict[str, str] + if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: + __path_parts = { + "task_type": _quote(task_type), + "inference_id": _quote(inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' + elif inference_id not in SKIP_IN_PATH: + __path_parts = {"inference_id": _quote(inference_id)} + __path = f'/_inference/{__path_parts["inference_id"]}' + else: + raise ValueError("Couldn't find a path for the given parameters") + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + if not __body: + if input is not None: + __body["input"] = input + if query is not None: + __body["query"] = query + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.inference", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_name="inference_config", ) diff --git a/elasticsearch/_async/client/license.py b/elasticsearch/_async/client/license.py index 61d5865da..84c91196a 100644 --- a/elasticsearch/_async/client/license.py +++ b/elasticsearch/_async/client/license.py @@ -237,7 +237,7 @@ async def post( If the operator privileges feature is enabled, only operator users can use this API.

        - ``_ + ``_ :param acknowledge: Specifies whether you acknowledge the license changes. :param license: diff --git a/elasticsearch/_async/client/ml.py b/elasticsearch/_async/client/ml.py index f6d8142c7..c97cbda3a 100644 --- a/elasticsearch/_async/client/ml.py +++ b/elasticsearch/_async/client/ml.py @@ -1676,7 +1676,7 @@ async def get_data_frame_analytics_stats( """ .. raw:: html -

        Get data frame analytics jobs usage info.

        +

        Get data frame analytics job stats.

        ``_ @@ -1744,7 +1744,7 @@ async def get_datafeed_stats( """ .. raw:: html -

        Get datafeeds usage info. +

        Get datafeed stats. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using _all, by specifying * as the @@ -2033,7 +2033,7 @@ async def get_job_stats( """ .. raw:: html -

        Get anomaly detection jobs usage info.

        +

        Get anomaly detection job stats.

        ``_ @@ -5004,7 +5004,7 @@ async def update_data_frame_analytics(

        Update a data frame analytics job.

        - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -5577,7 +5577,7 @@ async def update_trained_model_deployment(

        Update a trained model deployment.

        - ``_ + ``_ :param model_id: The unique identifier of the trained model. Currently, only PyTorch models are supported. diff --git a/elasticsearch/_async/client/transform.py b/elasticsearch/_async/client/transform.py index 56ebaf10b..3d037fdb9 100644 --- a/elasticsearch/_async/client/transform.py +++ b/elasticsearch/_async/client/transform.py @@ -795,7 +795,7 @@ async def update_transform( time of update and runs with those privileges.

        - ``_ + ``_ :param transform_id: Identifier for the transform. :param defer_validation: When true, deferrable validations are not run. This @@ -890,7 +890,7 @@ async def upgrade_transforms( You may want to perform a recent cluster backup prior to the upgrade.

        - ``_ + ``_ :param dry_run: When true, the request checks for updates but does not run them. :param timeout: Period to wait for a response. If no response is received before diff --git a/elasticsearch/_async/client/watcher.py b/elasticsearch/_async/client/watcher.py index d16bc440b..32ece1d70 100644 --- a/elasticsearch/_async/client/watcher.py +++ b/elasticsearch/_async/client/watcher.py @@ -48,7 +48,7 @@ async def ack_watch( This happens when the condition of the watch is not met (the condition evaluates to false).

        - ``_ + ``_ :param watch_id: The watch identifier. :param action_id: A comma-separated list of the action identifiers to acknowledge. @@ -104,7 +104,7 @@ async def activate_watch( A watch can be either active or inactive.

        - ``_ + ``_ :param watch_id: The watch identifier. """ @@ -148,7 +148,7 @@ async def deactivate_watch( A watch can be either active or inactive.

        - ``_ + ``_ :param watch_id: The watch identifier. """ @@ -196,7 +196,7 @@ async def delete_watch( When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the .watches index.

        - ``_ + ``_ :param id: The watch identifier. """ @@ -277,7 +277,7 @@ async def execute_watch(

        When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch.

        - ``_ + ``_ :param id: The watch identifier. :param action_modes: Determines how to handle the watch actions as part of the @@ -365,7 +365,7 @@ async def get_settings( Only a subset of settings are shown, for example index.auto_expand_replicas and index.number_of_replicas.

        - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails @@ -410,7 +410,7 @@ async def get_watch(

        Get a watch.

        - ``_ + ``_ :param id: The watch identifier. """ @@ -485,7 +485,7 @@ async def put_watch( If the user is able to read index a, but not index b, the same will apply when the watch runs.

        - ``_ + ``_ :param id: The identifier for the watch. :param actions: The list of actions that will be run if the condition matches. @@ -598,7 +598,7 @@ async def query_watches(

        Note that only the _id and metadata.* fields are queryable or sortable.

        - ``_ + ``_ :param from_: The offset from the first result to fetch. It must be non-negative. :param query: A query that filters the watches to be returned. @@ -673,7 +673,7 @@ async def start( Start the Watcher service if it is not already running.

        - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. """ @@ -739,7 +739,7 @@ async def stats( You retrieve more metrics by using the metric parameter.

        - ``_ + ``_ :param metric: Defines which additional metrics are included in the response. :param emit_stacktraces: Defines whether stack traces are generated for each @@ -790,7 +790,7 @@ async def stop( Stop the Watcher service if it is running.

        - ``_ + ``_ :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns @@ -851,7 +851,7 @@ async def update_settings( Watcher shards must always be in the data_content tier.

        - ``_ + ``_ :param index_auto_expand_replicas: :param index_number_of_replicas: diff --git a/elasticsearch/_async/client/xpack.py b/elasticsearch/_async/client/xpack.py index 2fc8f27bf..c9a314fbc 100644 --- a/elasticsearch/_async/client/xpack.py +++ b/elasticsearch/_async/client/xpack.py @@ -103,7 +103,7 @@ async def usage( The API also provides some usage statistics.

        - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails diff --git a/elasticsearch/_sync/client/cluster.py b/elasticsearch/_sync/client/cluster.py index a0652ff92..77b11fe93 100644 --- a/elasticsearch/_sync/client/cluster.py +++ b/elasticsearch/_sync/client/cluster.py @@ -185,7 +185,7 @@ def delete_voting_config_exclusions( Remove master-eligible nodes from the voting configuration exclusion list.

        - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. :param wait_for_removal: Specifies whether to wait for all excluded nodes to @@ -680,7 +680,7 @@ def post_voting_config_exclusions( They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes.

        - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. :param node_ids: A comma-separated list of the persistent ids of the nodes to diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index c98a22de8..e15e6e226 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -234,6 +234,113 @@ def get( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=("input", "query", "task_settings"), + ) + def inference( + self, + *, + inference_id: str, + input: t.Optional[t.Union[str, t.Sequence[str]]] = None, + task_type: t.Optional[ + t.Union[ + str, + t.Literal[ + "chat_completion", + "completion", + "rerank", + "sparse_embedding", + "text_embedding", + ], + ] + ] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + query: t.Optional[str] = None, + task_settings: t.Optional[t.Any] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

        Perform inference on the service.

        +

        This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. + It returns a response with the results of the tasks. + The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.

        +

        For details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation.

        +
        +

        info + The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

        +
        + + + ``_ + + :param inference_id: The unique identifier for the inference endpoint. + :param input: The text on which you want to perform the inference task. It can + be a single string or an array. > info > Inference endpoints for the `completion` + task type currently only support a single string as input. + :param task_type: The type of inference task that the model performs. + :param query: The query input, which is required only for the `rerank` task. + It is not required for other tasks. + :param task_settings: Task settings for the individual inference request. These + settings are specific to the task type you specified and override the task + settings specified when initializing the service. + :param timeout: The amount of time to wait for the inference request to complete. + """ + if inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'inference_id'") + if input is None and body is None: + raise ValueError("Empty value passed for parameter 'input'") + __path_parts: t.Dict[str, str] + if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: + __path_parts = { + "task_type": _quote(task_type), + "inference_id": _quote(inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' + elif inference_id not in SKIP_IN_PATH: + __path_parts = {"inference_id": _quote(inference_id)} + __path = f'/_inference/{__path_parts["inference_id"]}' + else: + raise ValueError("Couldn't find a path for the given parameters") + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + if not __body: + if input is not None: + __body["input"] = input + if query is not None: + __body["query"] = query + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "POST", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.inference", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_name="inference_config", ) diff --git a/elasticsearch/_sync/client/license.py b/elasticsearch/_sync/client/license.py index 302ae7ea6..2b1174121 100644 --- a/elasticsearch/_sync/client/license.py +++ b/elasticsearch/_sync/client/license.py @@ -237,7 +237,7 @@ def post( If the operator privileges feature is enabled, only operator users can use this API.

        - ``_ + ``_ :param acknowledge: Specifies whether you acknowledge the license changes. :param license: diff --git a/elasticsearch/_sync/client/ml.py b/elasticsearch/_sync/client/ml.py index 46104a32e..15ebddb41 100644 --- a/elasticsearch/_sync/client/ml.py +++ b/elasticsearch/_sync/client/ml.py @@ -1676,7 +1676,7 @@ def get_data_frame_analytics_stats( """ .. raw:: html -

        Get data frame analytics jobs usage info.

        +

        Get data frame analytics job stats.

        ``_ @@ -1744,7 +1744,7 @@ def get_datafeed_stats( """ .. raw:: html -

        Get datafeeds usage info. +

        Get datafeed stats. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using _all, by specifying * as the @@ -2033,7 +2033,7 @@ def get_job_stats( """ .. raw:: html -

        Get anomaly detection jobs usage info.

        +

        Get anomaly detection job stats.

        ``_ @@ -5004,7 +5004,7 @@ def update_data_frame_analytics(

        Update a data frame analytics job.

        - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -5577,7 +5577,7 @@ def update_trained_model_deployment(

        Update a trained model deployment.

        - ``_ + ``_ :param model_id: The unique identifier of the trained model. Currently, only PyTorch models are supported. diff --git a/elasticsearch/_sync/client/transform.py b/elasticsearch/_sync/client/transform.py index 184662ab5..13ad2c232 100644 --- a/elasticsearch/_sync/client/transform.py +++ b/elasticsearch/_sync/client/transform.py @@ -795,7 +795,7 @@ def update_transform( time of update and runs with those privileges.

        - ``_ + ``_ :param transform_id: Identifier for the transform. :param defer_validation: When true, deferrable validations are not run. This @@ -890,7 +890,7 @@ def upgrade_transforms( You may want to perform a recent cluster backup prior to the upgrade.

        - ``_ + ``_ :param dry_run: When true, the request checks for updates but does not run them. :param timeout: Period to wait for a response. If no response is received before diff --git a/elasticsearch/_sync/client/watcher.py b/elasticsearch/_sync/client/watcher.py index 7efadfbc1..68f3aa926 100644 --- a/elasticsearch/_sync/client/watcher.py +++ b/elasticsearch/_sync/client/watcher.py @@ -48,7 +48,7 @@ def ack_watch( This happens when the condition of the watch is not met (the condition evaluates to false).

        - ``_ + ``_ :param watch_id: The watch identifier. :param action_id: A comma-separated list of the action identifiers to acknowledge. @@ -104,7 +104,7 @@ def activate_watch( A watch can be either active or inactive.

        - ``_ + ``_ :param watch_id: The watch identifier. """ @@ -148,7 +148,7 @@ def deactivate_watch( A watch can be either active or inactive.

        - ``_ + ``_ :param watch_id: The watch identifier. """ @@ -196,7 +196,7 @@ def delete_watch( When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the .watches index.

        - ``_ + ``_ :param id: The watch identifier. """ @@ -277,7 +277,7 @@ def execute_watch(

        When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch.

        - ``_ + ``_ :param id: The watch identifier. :param action_modes: Determines how to handle the watch actions as part of the @@ -365,7 +365,7 @@ def get_settings( Only a subset of settings are shown, for example index.auto_expand_replicas and index.number_of_replicas.

        - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails @@ -410,7 +410,7 @@ def get_watch(

        Get a watch.

        - ``_ + ``_ :param id: The watch identifier. """ @@ -485,7 +485,7 @@ def put_watch( If the user is able to read index a, but not index b, the same will apply when the watch runs.

        - ``_ + ``_ :param id: The identifier for the watch. :param actions: The list of actions that will be run if the condition matches. @@ -598,7 +598,7 @@ def query_watches(

        Note that only the _id and metadata.* fields are queryable or sortable.

        - ``_ + ``_ :param from_: The offset from the first result to fetch. It must be non-negative. :param query: A query that filters the watches to be returned. @@ -673,7 +673,7 @@ def start( Start the Watcher service if it is not already running.

        - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. """ @@ -739,7 +739,7 @@ def stats( You retrieve more metrics by using the metric parameter.

        - ``_ + ``_ :param metric: Defines which additional metrics are included in the response. :param emit_stacktraces: Defines whether stack traces are generated for each @@ -790,7 +790,7 @@ def stop( Stop the Watcher service if it is running.

        - ``_ + ``_ :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns @@ -851,7 +851,7 @@ def update_settings( Watcher shards must always be in the data_content tier.

        - ``_ + ``_ :param index_auto_expand_replicas: :param index_number_of_replicas: diff --git a/elasticsearch/_sync/client/xpack.py b/elasticsearch/_sync/client/xpack.py index 182715cf7..73121d5cb 100644 --- a/elasticsearch/_sync/client/xpack.py +++ b/elasticsearch/_sync/client/xpack.py @@ -103,7 +103,7 @@ def usage( The API also provides some usage statistics.

        - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails diff --git a/elasticsearch/dsl/field.py b/elasticsearch/dsl/field.py index eb61be48a..726fbe358 100644 --- a/elasticsearch/dsl/field.py +++ b/elasticsearch/dsl/field.py @@ -437,7 +437,9 @@ def __init__( doc_class: Union[Type["InnerDoc"], "DefaultType"] = DEFAULT, *args: Any, enabled: Union[bool, "DefaultType"] = DEFAULT, - subobjects: Union[bool, "DefaultType"] = DEFAULT, + subobjects: Union[ + Literal["true", "false", "auto"], bool, "DefaultType" + ] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], @@ -1109,6 +1111,56 @@ def __init__( super().__init__(*args, **kwargs) +class CountedKeyword(Field): + """ + :arg index: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "counted_keyword" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + index: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if index is not DEFAULT: + kwargs["index"] = index + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) + + class Date(Field): """ :arg default_timezone: timezone that will be automatically used for tz-naive values @@ -1118,6 +1170,8 @@ class Date(Field): :arg format: :arg ignore_malformed: :arg index: + :arg script: + :arg on_script_error: :arg null_value: :arg precision_step: :arg locale: @@ -1150,6 +1204,8 @@ def __init__( format: Union[str, "DefaultType"] = DEFAULT, ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, null_value: Any = DEFAULT, precision_step: Union[int, "DefaultType"] = DEFAULT, locale: Union[str, "DefaultType"] = DEFAULT, @@ -1182,6 +1238,10 @@ def __init__( kwargs["ignore_malformed"] = ignore_malformed if index is not DEFAULT: kwargs["index"] = index + if script is not DEFAULT: + kwargs["script"] = script + if on_script_error is not DEFAULT: + kwargs["on_script_error"] = on_script_error if null_value is not DEFAULT: kwargs["null_value"] = null_value if precision_step is not DEFAULT: @@ -1246,6 +1306,8 @@ class DateNanos(Field): :arg format: :arg ignore_malformed: :arg index: + :arg script: + :arg on_script_error: :arg null_value: :arg precision_step: :arg doc_values: @@ -1272,6 +1334,8 @@ def __init__( format: Union[str, "DefaultType"] = DEFAULT, ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, + script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, null_value: Any = DEFAULT, precision_step: Union[int, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, @@ -1301,6 +1365,10 @@ def __init__( kwargs["ignore_malformed"] = ignore_malformed if index is not DEFAULT: kwargs["index"] = index + if script is not DEFAULT: + kwargs["script"] = script + if on_script_error is not DEFAULT: + kwargs["on_script_error"] = on_script_error if null_value is not DEFAULT: kwargs["null_value"] = null_value if precision_step is not DEFAULT: @@ -3085,6 +3153,76 @@ def __init__( super().__init__(*args, **kwargs) +class Passthrough(Field): + """ + :arg enabled: + :arg priority: + :arg time_series_dimension: + :arg copy_to: + :arg store: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "passthrough" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + enabled: Union[bool, "DefaultType"] = DEFAULT, + priority: Union[int, "DefaultType"] = DEFAULT, + time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, + copy_to: Union[ + Union[str, "InstrumentedField"], + Sequence[Union[str, "InstrumentedField"]], + "DefaultType", + ] = DEFAULT, + store: Union[bool, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if enabled is not DEFAULT: + kwargs["enabled"] = enabled + if priority is not DEFAULT: + kwargs["priority"] = priority + if time_series_dimension is not DEFAULT: + kwargs["time_series_dimension"] = time_series_dimension + if copy_to is not DEFAULT: + kwargs["copy_to"] = str(copy_to) + if store is not DEFAULT: + kwargs["store"] = store + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) + + class Percolator(Field): """ :arg meta: Metadata about the field. diff --git a/elasticsearch/dsl/query.py b/elasticsearch/dsl/query.py index 6e87f926c..c58d3080a 100644 --- a/elasticsearch/dsl/query.py +++ b/elasticsearch/dsl/query.py @@ -1083,6 +1083,8 @@ class Knn(Query): :arg filter: Filters for the kNN search query :arg similarity: The minimum similarity for a vector to be considered a match + :arg rescore_vector: Apply oversampling and rescoring to quantized + vectors * :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases @@ -1108,6 +1110,9 @@ def __init__( k: Union[int, "DefaultType"] = DEFAULT, filter: Union[Query, Sequence[Query], "DefaultType"] = DEFAULT, similarity: Union[float, "DefaultType"] = DEFAULT, + rescore_vector: Union[ + "types.RescoreVector", Dict[str, Any], "DefaultType" + ] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, @@ -1120,6 +1125,7 @@ def __init__( k=k, filter=filter, similarity=similarity, + rescore_vector=rescore_vector, boost=boost, _name=_name, **kwargs, diff --git a/elasticsearch/dsl/types.py b/elasticsearch/dsl/types.py index 7474769c6..0cd673bda 100644 --- a/elasticsearch/dsl/types.py +++ b/elasticsearch/dsl/types.py @@ -380,15 +380,24 @@ class DenseVectorIndexOptions(AttrDict[Any]): `int4_flat` index types. :arg ef_construction: The number of candidates to track while assembling the list of nearest neighbors for each new node. Only - applicable to `hnsw`, `int8_hnsw`, and `int4_hnsw` index types. - Defaults to `100` if omitted. + applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`, and `int4_hnsw` + index types. Defaults to `100` if omitted. :arg m: The number of neighbors each node will be connected to in the - HNSW graph. Only applicable to `hnsw`, `int8_hnsw`, and - `int4_hnsw` index types. Defaults to `16` if omitted. + HNSW graph. Only applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`, + and `int4_hnsw` index types. Defaults to `16` if omitted. """ type: Union[ - Literal["flat", "hnsw", "int4_flat", "int4_hnsw", "int8_flat", "int8_hnsw"], + Literal[ + "bbq_flat", + "bbq_hnsw", + "flat", + "hnsw", + "int4_flat", + "int4_hnsw", + "int8_flat", + "int8_hnsw", + ], DefaultType, ] confidence_interval: Union[float, DefaultType] @@ -399,7 +408,16 @@ def __init__( self, *, type: Union[ - Literal["flat", "hnsw", "int4_flat", "int4_hnsw", "int8_flat", "int8_hnsw"], + Literal[ + "bbq_flat", + "bbq_hnsw", + "flat", + "hnsw", + "int4_flat", + "int4_hnsw", + "int8_flat", + "int8_hnsw", + ], DefaultType, ] = DEFAULT, confidence_interval: Union[float, DefaultType] = DEFAULT, @@ -591,6 +609,7 @@ class FieldSort(AttrDict[Any]): "completion", "nested", "object", + "passthrough", "version", "murmur3", "token_count", @@ -617,6 +636,7 @@ class FieldSort(AttrDict[Any]): "shape", "histogram", "constant_keyword", + "counted_keyword", "aggregate_metric_double", "dense_vector", "semantic_text", @@ -654,6 +674,7 @@ def __init__( "completion", "nested", "object", + "passthrough", "version", "murmur3", "token_count", @@ -680,6 +701,7 @@ def __init__( "shape", "histogram", "constant_keyword", + "counted_keyword", "aggregate_metric_double", "dense_vector", "semantic_text", @@ -2850,6 +2872,22 @@ def __init__( super().__init__(kwargs) +class RescoreVector(AttrDict[Any]): + """ + :arg oversample: (required) Applies the specified oversample factor to + k on the approximate kNN search + """ + + oversample: Union[float, DefaultType] + + def __init__( + self, *, oversample: Union[float, DefaultType] = DEFAULT, **kwargs: Any + ): + if oversample is not DEFAULT: + kwargs["oversample"] = oversample + super().__init__(kwargs) + + class ScoreSort(AttrDict[Any]): """ :arg order: From 82c9d9854c333802d0a80ef912e76e28a1a92bc2 Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Tue, 29 Apr 2025 15:38:15 +0400 Subject: [PATCH 54/65] Bring 8.18.1 release to parent (#2950) --- docs/guide/release-notes.asciidoc | 10 ++++++++++ elasticsearch/_version.py | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/docs/guide/release-notes.asciidoc b/docs/guide/release-notes.asciidoc index f9559db86..6973dc807 100644 --- a/docs/guide/release-notes.asciidoc +++ b/docs/guide/release-notes.asciidoc @@ -1,6 +1,16 @@ [[release-notes]] == Release notes +=== 8.18.1 (2025-04-29) + +- Update APIs + * Add back `inference.inference` APIs that were removed by mistake +- Update DSL + * Add `CountedKeyword` and `Passthrough` fields + * Add `rescore_vector` to Knn query + * Add `bbq_flat` and `bbq_hnsw` vector index types + * Add `script` and `on_script_error` to `Date` and `DateNanos` fields + === 8.18.0 (2025-04-15) - Merge `Elasticsearch-DSL `_ package (https://github.com/elastic/elasticsearch-py/pull/2736[#2736]) diff --git a/elasticsearch/_version.py b/elasticsearch/_version.py index 030a7ff29..29b586d2c 100644 --- a/elasticsearch/_version.py +++ b/elasticsearch/_version.py @@ -15,4 +15,4 @@ # specific language governing permissions and limitations # under the License. -__versionstr__ = "8.18.0" +__versionstr__ = "8.18.1" From 7443d3b59a1f56f0f708db5c4554d253d968f565 Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Fri, 30 May 2025 12:48:46 +0400 Subject: [PATCH 55/65] Stop using event_loop fixture (#2969) (#2972) It was removed in pytest-asyncio 1.0. (cherry picked from commit 3c9680a5cf0b67a56356bf73173a7d5eabb2e552) # Conflicts: # docs/reference/async.md --- docs/guide/async.asciidoc | 3 +-- test_elasticsearch/test_async/test_transport.py | 6 ++++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/docs/guide/async.asciidoc b/docs/guide/async.asciidoc index 9f3c04acd..22799a80c 100644 --- a/docs/guide/async.asciidoc +++ b/docs/guide/async.asciidoc @@ -37,8 +37,7 @@ async def main(): ) print(resp) -loop = asyncio.get_event_loop() -loop.run_until_complete(main()) +asyncio.run(main()) ---- All APIs that are available under the sync client are also available diff --git a/test_elasticsearch/test_async/test_transport.py b/test_elasticsearch/test_async/test_transport.py index 76a71f50b..70d67765f 100644 --- a/test_elasticsearch/test_async/test_transport.py +++ b/test_elasticsearch/test_async/test_transport.py @@ -527,7 +527,8 @@ async def test_sniff_on_node_failure_triggers(self, extra_key, extra_value): assert request_failed_in_error assert len(client.transport.node_pool) == 3 - async def test_sniff_after_n_seconds(self, event_loop): + async def test_sniff_after_n_seconds(self): + event_loop = asyncio.get_running_loop() client = AsyncElasticsearch( # noqa: F821 [NodeConfig("http", "localhost", 9200, _extras={"data": CLUSTER_NODES})], node_class=DummyNode, @@ -581,7 +582,8 @@ async def test_sniffing_disabled_on_elastic_cloud(self, kwargs): == "Sniffing should not be enabled when connecting to Elastic Cloud" ) - async def test_sniff_on_start_close_unlocks_async_calls(self, event_loop): + async def test_sniff_on_start_close_unlocks_async_calls(self): + event_loop = asyncio.get_running_loop() client = AsyncElasticsearch( # noqa: F821 [ NodeConfig( From 605cd09f08899bddf6130c3821348e09b631d57f Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 30 May 2025 12:27:21 +0100 Subject: [PATCH 56/65] Fix some new type warnings from mypy (#2974) (#2975) (cherry picked from commit 63efa48aabc353f806ef0a0b07add5130136fc5d) Co-authored-by: Miguel Grinberg --- elasticsearch/dsl/_async/document.py | 2 +- elasticsearch/dsl/_sync/document.py | 2 +- elasticsearch/dsl/field.py | 12 ++++++- elasticsearch/dsl/query.py | 44 ++++++++++++++++++++++- elasticsearch/dsl/types.py | 54 ++++++++++++++++++++++------ 5 files changed, 100 insertions(+), 14 deletions(-) diff --git a/elasticsearch/dsl/_async/document.py b/elasticsearch/dsl/_async/document.py index 4b7654761..de6e9eecc 100644 --- a/elasticsearch/dsl/_async/document.py +++ b/elasticsearch/dsl/_async/document.py @@ -96,7 +96,7 @@ class AsyncDocument(DocumentBase, metaclass=AsyncIndexMeta): @classmethod def _get_using(cls, using: Optional[AsyncUsingType] = None) -> AsyncUsingType: - return cast(AsyncUsingType, using or cls._index._using) + return using or cls._index._using @classmethod def _get_connection( diff --git a/elasticsearch/dsl/_sync/document.py b/elasticsearch/dsl/_sync/document.py index 316ece5cb..f68be4aae 100644 --- a/elasticsearch/dsl/_sync/document.py +++ b/elasticsearch/dsl/_sync/document.py @@ -92,7 +92,7 @@ class Document(DocumentBase, metaclass=IndexMeta): @classmethod def _get_using(cls, using: Optional[UsingType] = None) -> UsingType: - return cast(UsingType, using or cls._index._using) + return using or cls._index._using @classmethod def _get_connection(cls, using: Optional[UsingType] = None) -> "Elasticsearch": diff --git a/elasticsearch/dsl/field.py b/elasticsearch/dsl/field.py index 726fbe358..e3ed5dfcd 100644 --- a/elasticsearch/dsl/field.py +++ b/elasticsearch/dsl/field.py @@ -1290,7 +1290,7 @@ def _deserialize(self, data: Any) -> Union[datetime, date]: if isinstance(data, datetime): if self._default_timezone and data.tzinfo is None: data = data.replace(tzinfo=self._default_timezone) - return data + return cast(datetime, data) if isinstance(data, date): return data if isinstance(data, int): @@ -3689,6 +3689,11 @@ class SemanticText(Field): by using the Update mapping API. Use the Create inference API to create the endpoint. If not specified, the inference endpoint defined by inference_id will be used at both index and query time. + :arg chunking_settings: Settings for chunking text into smaller + passages. If specified, these will override the chunking settings + sent in the inference endpoint associated with inference_id. If + chunking settings are updated, they will not be applied to + existing documents until they are reindexed. """ name = "semantic_text" @@ -3699,6 +3704,9 @@ def __init__( meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, inference_id: Union[str, "DefaultType"] = DEFAULT, search_inference_id: Union[str, "DefaultType"] = DEFAULT, + chunking_settings: Union[ + "types.ChunkingSettings", Dict[str, Any], "DefaultType" + ] = DEFAULT, **kwargs: Any, ): if meta is not DEFAULT: @@ -3707,6 +3715,8 @@ def __init__( kwargs["inference_id"] = inference_id if search_inference_id is not DEFAULT: kwargs["search_inference_id"] = search_inference_id + if chunking_settings is not DEFAULT: + kwargs["chunking_settings"] = chunking_settings super().__init__(*args, **kwargs) diff --git a/elasticsearch/dsl/query.py b/elasticsearch/dsl/query.py index c58d3080a..720f49f78 100644 --- a/elasticsearch/dsl/query.py +++ b/elasticsearch/dsl/query.py @@ -1382,7 +1382,49 @@ def __init__( min_term_freq: Union[int, "DefaultType"] = DEFAULT, min_word_length: Union[int, "DefaultType"] = DEFAULT, routing: Union[str, "DefaultType"] = DEFAULT, - stop_words: Union[str, Sequence[str], "DefaultType"] = DEFAULT, + stop_words: Union[ + Literal[ + "_arabic_", + "_armenian_", + "_basque_", + "_bengali_", + "_brazilian_", + "_bulgarian_", + "_catalan_", + "_cjk_", + "_czech_", + "_danish_", + "_dutch_", + "_english_", + "_estonian_", + "_finnish_", + "_french_", + "_galician_", + "_german_", + "_greek_", + "_hindi_", + "_hungarian_", + "_indonesian_", + "_irish_", + "_italian_", + "_latvian_", + "_lithuanian_", + "_norwegian_", + "_persian_", + "_portuguese_", + "_romanian_", + "_russian_", + "_serbian_", + "_sorani_", + "_spanish_", + "_swedish_", + "_thai_", + "_turkish_", + "_none_", + ], + Sequence[str], + "DefaultType", + ] = DEFAULT, unlike: Union[ Union[str, "types.LikeDocument"], Sequence[Union[str, "types.LikeDocument"]], diff --git a/elasticsearch/dsl/types.py b/elasticsearch/dsl/types.py index 0cd673bda..d21f70698 100644 --- a/elasticsearch/dsl/types.py +++ b/elasticsearch/dsl/types.py @@ -170,6 +170,48 @@ def __init__( super().__init__(kwargs) +class ChunkingSettings(AttrDict[Any]): + """ + :arg strategy: (required) The chunking strategy: `sentence` or `word`. + Defaults to `sentence` if omitted. + :arg max_chunk_size: (required) The maximum size of a chunk in words. + This value cannot be higher than `300` or lower than `20` (for + `sentence` strategy) or `10` (for `word` strategy). Defaults to + `250` if omitted. + :arg overlap: The number of overlapping words for chunks. It is + applicable only to a `word` chunking strategy. This value cannot + be higher than half the `max_chunk_size` value. Defaults to `100` + if omitted. + :arg sentence_overlap: The number of overlapping sentences for chunks. + It is applicable only for a `sentence` chunking strategy. It can + be either `1` or `0`. Defaults to `1` if omitted. + """ + + strategy: Union[str, DefaultType] + max_chunk_size: Union[int, DefaultType] + overlap: Union[int, DefaultType] + sentence_overlap: Union[int, DefaultType] + + def __init__( + self, + *, + strategy: Union[str, DefaultType] = DEFAULT, + max_chunk_size: Union[int, DefaultType] = DEFAULT, + overlap: Union[int, DefaultType] = DEFAULT, + sentence_overlap: Union[int, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if strategy is not DEFAULT: + kwargs["strategy"] = strategy + if max_chunk_size is not DEFAULT: + kwargs["max_chunk_size"] = max_chunk_size + if overlap is not DEFAULT: + kwargs["overlap"] = overlap + if sentence_overlap is not DEFAULT: + kwargs["sentence_overlap"] = sentence_overlap + super().__init__(kwargs) + + class ClassificationInferenceOptions(AttrDict[Any]): """ :arg num_top_classes: Specifies the number of top class predictions to @@ -1617,11 +1659,7 @@ class InnerHits(AttrDict[Any]): DefaultType, ] seq_no_primary_term: Union[bool, DefaultType] - fields: Union[ - Union[str, InstrumentedField], - Sequence[Union[str, InstrumentedField]], - DefaultType, - ] + fields: Union[Sequence[Union[str, InstrumentedField]], DefaultType] sort: Union[ Union[Union[str, InstrumentedField], "SortOptions"], Sequence[Union[Union[str, InstrumentedField], "SortOptions"]], @@ -1656,11 +1694,7 @@ def __init__( DefaultType, ] = DEFAULT, seq_no_primary_term: Union[bool, DefaultType] = DEFAULT, - fields: Union[ - Union[str, InstrumentedField], - Sequence[Union[str, InstrumentedField]], - DefaultType, - ] = DEFAULT, + fields: Union[Sequence[Union[str, InstrumentedField]], DefaultType] = DEFAULT, sort: Union[ Union[Union[str, InstrumentedField], "SortOptions"], Sequence[Union[Union[str, InstrumentedField], "SortOptions"]], From 178a3d04a68772057d254065e0f05faa2b558c29 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 4 Jun 2025 12:59:25 +0100 Subject: [PATCH 57/65] Add the recent mypy fix to field.py to the template for this file (#2979) (#2980) (cherry picked from commit e616da96335f59849e193716132de8769acc9813) Co-authored-by: Miguel Grinberg --- utils/templates/field.py.tpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/templates/field.py.tpl b/utils/templates/field.py.tpl index 95ee2f391..030060d23 100644 --- a/utils/templates/field.py.tpl +++ b/utils/templates/field.py.tpl @@ -367,7 +367,7 @@ class {{ k.name }}({{ k.parent }}): if isinstance(data, datetime): if self._default_timezone and data.tzinfo is None: data = data.replace(tzinfo=self._default_timezone) - return data + return cast(datetime, data) if isinstance(data, date): return data if isinstance(data, int): From 5d86ccd002852dce223fce59ae8346d306be8f52 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 30 Jun 2025 17:20:07 +0100 Subject: [PATCH 58/65] [Backport 8.19] Handle lists in `copy_to` field option correctly (Fixes #2992) (#2994) * Handle lists in `copy_to` field option correctly (Fixes #2992) (#2993) * Handle lists in `copy_to` field option correctly (Fixes #2992) * add integration test (cherry picked from commit 44cbf67bf9cfc29bf4253cff3e48fb0286e471ff) * update generated files --------- Co-authored-by: Miguel Grinberg --- elasticsearch/dsl/aggs.py | 5 +- elasticsearch/dsl/field.py | 269 +++++++++++++++--- elasticsearch/dsl/query.py | 9 +- elasticsearch/dsl/types.py | 151 ++++++---- .../test_integration/_async/test_document.py | 53 +++- .../test_integration/_sync/test_document.py | 49 +++- utils/templates/field.py.tpl | 7 + 7 files changed, 440 insertions(+), 103 deletions(-) diff --git a/elasticsearch/dsl/aggs.py b/elasticsearch/dsl/aggs.py index ba5150803..3b7a8e8ba 100644 --- a/elasticsearch/dsl/aggs.py +++ b/elasticsearch/dsl/aggs.py @@ -678,9 +678,8 @@ class CategorizeText(Bucket[_R]): :arg categorization_analyzer: The categorization analyzer specifies how the text is analyzed and tokenized before being categorized. The syntax is very similar to that used to define the analyzer in - the [Analyze endpoint](https://www.elastic.co/guide/en/elasticsear - ch/reference/8.0/indices-analyze.html). This property cannot be - used at the same time as categorization_filters. + the `_analyze` endpoint. This property cannot be used at the same + time as categorization_filters. :arg shard_size: The number of categorization buckets to return from each shard before merging all the results. :arg size: The number of buckets to return. Defaults to `10` if diff --git a/elasticsearch/dsl/field.py b/elasticsearch/dsl/field.py index e3ed5dfcd..1aa7a4bca 100644 --- a/elasticsearch/dsl/field.py +++ b/elasticsearch/dsl/field.py @@ -280,7 +280,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -387,7 +390,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -463,7 +469,10 @@ def __init__( if subobjects is not DEFAULT: kwargs["subobjects"] = subobjects if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -575,6 +584,7 @@ class AggregateMetricDouble(Field): """ :arg default_metric: (required) :arg metrics: (required) + :arg ignore_malformed: :arg time_series_metric: :arg meta: Metadata about the field. :arg properties: @@ -595,6 +605,7 @@ def __init__( *args: Any, default_metric: Union[str, "DefaultType"] = DEFAULT, metrics: Union[Sequence[str], "DefaultType"] = DEFAULT, + ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, time_series_metric: Union[ Literal["gauge", "counter", "summary", "histogram", "position"], "DefaultType", @@ -615,6 +626,8 @@ def __init__( kwargs["default_metric"] = default_metric if metrics is not DEFAULT: kwargs["metrics"] = metrics + if ignore_malformed is not DEFAULT: + kwargs["ignore_malformed"] = ignore_malformed if time_series_metric is not DEFAULT: kwargs["time_series_metric"] = time_series_metric if meta is not DEFAULT: @@ -727,7 +740,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -838,7 +854,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -953,7 +972,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -1043,7 +1065,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -1251,7 +1276,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -1376,7 +1404,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -1455,7 +1486,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -1658,7 +1692,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -1733,7 +1770,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -1762,6 +1802,7 @@ class Flattened(Field): :arg null_value: :arg similarity: :arg split_queries_on_whitespace: + :arg time_series_dimensions: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: @@ -1790,6 +1831,7 @@ def __init__( null_value: Union[str, "DefaultType"] = DEFAULT, similarity: Union[str, "DefaultType"] = DEFAULT, split_queries_on_whitespace: Union[bool, "DefaultType"] = DEFAULT, + time_series_dimensions: Union[Sequence[str], "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, @@ -1820,6 +1862,8 @@ def __init__( kwargs["similarity"] = similarity if split_queries_on_whitespace is not DEFAULT: kwargs["split_queries_on_whitespace"] = split_queries_on_whitespace + if time_series_dimensions is not DEFAULT: + kwargs["time_series_dimensions"] = time_series_dimensions if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: @@ -1892,7 +1936,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -1918,6 +1965,7 @@ class GeoPoint(Field): :arg index: :arg on_script_error: :arg script: + :arg time_series_metric: :arg doc_values: :arg copy_to: :arg store: @@ -1951,6 +1999,9 @@ def __init__( index: Union[bool, "DefaultType"] = DEFAULT, on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, + time_series_metric: Union[ + Literal["gauge", "counter", "position"], "DefaultType" + ] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], @@ -1982,10 +2033,15 @@ def __init__( kwargs["on_script_error"] = on_script_error if script is not DEFAULT: kwargs["script"] = script + if time_series_metric is not DEFAULT: + kwargs["time_series_metric"] = time_series_metric if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -2074,7 +2130,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -2177,7 +2236,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -2360,7 +2422,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -2435,7 +2500,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -2527,7 +2595,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -2611,7 +2682,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -2781,7 +2855,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -2884,7 +2961,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -2959,7 +3039,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -3016,7 +3099,10 @@ def __init__( if meta is not DEFAULT: kwargs["meta"] = meta if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) super().__init__(*args, **kwargs) @@ -3064,7 +3150,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -3134,7 +3223,10 @@ def __init__( if include_in_root is not DEFAULT: kwargs["include_in_root"] = include_in_root if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -3205,7 +3297,10 @@ def __init__( if time_series_dimension is not DEFAULT: kwargs["time_series_dimension"] = time_series_dimension if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -3334,7 +3429,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -3452,6 +3550,62 @@ def __init__( super().__init__(*args, **kwargs) +class RankVectors(Field): + """ + Technical preview + + :arg element_type: + :arg dims: + :arg meta: Metadata about the field. + :arg properties: + :arg ignore_above: + :arg dynamic: + :arg fields: + :arg synthetic_source_keep: + """ + + name = "rank_vectors" + _param_defs = { + "properties": {"type": "field", "hash": True}, + "fields": {"type": "field", "hash": True}, + } + + def __init__( + self, + *args: Any, + element_type: Union[Literal["byte", "float", "bit"], "DefaultType"] = DEFAULT, + dims: Union[int, "DefaultType"] = DEFAULT, + meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, + properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + ignore_above: Union[int, "DefaultType"] = DEFAULT, + dynamic: Union[ + Literal["strict", "runtime", "true", "false"], bool, "DefaultType" + ] = DEFAULT, + fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, + synthetic_source_keep: Union[ + Literal["none", "arrays", "all"], "DefaultType" + ] = DEFAULT, + **kwargs: Any, + ): + if element_type is not DEFAULT: + kwargs["element_type"] = element_type + if dims is not DEFAULT: + kwargs["dims"] = dims + if meta is not DEFAULT: + kwargs["meta"] = meta + if properties is not DEFAULT: + kwargs["properties"] = properties + if ignore_above is not DEFAULT: + kwargs["ignore_above"] = ignore_above + if dynamic is not DEFAULT: + kwargs["dynamic"] = dynamic + if fields is not DEFAULT: + kwargs["fields"] = fields + if synthetic_source_keep is not DEFAULT: + kwargs["synthetic_source_keep"] = synthetic_source_keep + super().__init__(*args, **kwargs) + + class ScaledFloat(Float): """ :arg null_value: @@ -3541,7 +3695,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -3657,7 +3814,10 @@ def __init__( if term_vector is not DEFAULT: kwargs["term_vector"] = term_vector if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -3689,11 +3849,6 @@ class SemanticText(Field): by using the Update mapping API. Use the Create inference API to create the endpoint. If not specified, the inference endpoint defined by inference_id will be used at both index and query time. - :arg chunking_settings: Settings for chunking text into smaller - passages. If specified, these will override the chunking settings - sent in the inference endpoint associated with inference_id. If - chunking settings are updated, they will not be applied to - existing documents until they are reindexed. """ name = "semantic_text" @@ -3704,9 +3859,6 @@ def __init__( meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, inference_id: Union[str, "DefaultType"] = DEFAULT, search_inference_id: Union[str, "DefaultType"] = DEFAULT, - chunking_settings: Union[ - "types.ChunkingSettings", Dict[str, Any], "DefaultType" - ] = DEFAULT, **kwargs: Any, ): if meta is not DEFAULT: @@ -3715,8 +3867,6 @@ def __init__( kwargs["inference_id"] = inference_id if search_inference_id is not DEFAULT: kwargs["search_inference_id"] = search_inference_id - if chunking_settings is not DEFAULT: - kwargs["chunking_settings"] = chunking_settings super().__init__(*args, **kwargs) @@ -3783,7 +3933,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -3886,7 +4039,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -3906,6 +4062,7 @@ def __init__( class SparseVector(Field): """ + :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: @@ -3923,6 +4080,7 @@ class SparseVector(Field): def __init__( self, *args: Any, + store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, @@ -3935,6 +4093,8 @@ def __init__( ] = DEFAULT, **kwargs: Any, ): + if store is not DEFAULT: + kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: @@ -4070,7 +4230,10 @@ def __init__( if term_vector is not DEFAULT: kwargs["term_vector"] = term_vector if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -4153,7 +4316,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -4256,7 +4422,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -4318,7 +4487,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: @@ -4384,7 +4556,10 @@ def __init__( if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: - kwargs["copy_to"] = str(copy_to) + if isinstance(copy_to, list): + kwargs["copy_to"] = [str(field) for field in copy_to] + else: + kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: diff --git a/elasticsearch/dsl/query.py b/elasticsearch/dsl/query.py index 720f49f78..f203332e4 100644 --- a/elasticsearch/dsl/query.py +++ b/elasticsearch/dsl/query.py @@ -2034,8 +2034,9 @@ def __init__( class Rule(Query): """ :arg organic: (required) - :arg ruleset_ids: (required) :arg match_criteria: (required) + :arg ruleset_ids: + :arg ruleset_id: :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases @@ -2053,16 +2054,18 @@ def __init__( self, *, organic: Union[Query, "DefaultType"] = DEFAULT, - ruleset_ids: Union[Sequence[str], "DefaultType"] = DEFAULT, match_criteria: Any = DEFAULT, + ruleset_ids: Union[str, Sequence[str], "DefaultType"] = DEFAULT, + ruleset_id: Union[str, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( organic=organic, - ruleset_ids=ruleset_ids, match_criteria=match_criteria, + ruleset_ids=ruleset_ids, + ruleset_id=ruleset_id, boost=boost, _name=_name, **kwargs, diff --git a/elasticsearch/dsl/types.py b/elasticsearch/dsl/types.py index d21f70698..5d7b88b32 100644 --- a/elasticsearch/dsl/types.py +++ b/elasticsearch/dsl/types.py @@ -170,48 +170,6 @@ def __init__( super().__init__(kwargs) -class ChunkingSettings(AttrDict[Any]): - """ - :arg strategy: (required) The chunking strategy: `sentence` or `word`. - Defaults to `sentence` if omitted. - :arg max_chunk_size: (required) The maximum size of a chunk in words. - This value cannot be higher than `300` or lower than `20` (for - `sentence` strategy) or `10` (for `word` strategy). Defaults to - `250` if omitted. - :arg overlap: The number of overlapping words for chunks. It is - applicable only to a `word` chunking strategy. This value cannot - be higher than half the `max_chunk_size` value. Defaults to `100` - if omitted. - :arg sentence_overlap: The number of overlapping sentences for chunks. - It is applicable only for a `sentence` chunking strategy. It can - be either `1` or `0`. Defaults to `1` if omitted. - """ - - strategy: Union[str, DefaultType] - max_chunk_size: Union[int, DefaultType] - overlap: Union[int, DefaultType] - sentence_overlap: Union[int, DefaultType] - - def __init__( - self, - *, - strategy: Union[str, DefaultType] = DEFAULT, - max_chunk_size: Union[int, DefaultType] = DEFAULT, - overlap: Union[int, DefaultType] = DEFAULT, - sentence_overlap: Union[int, DefaultType] = DEFAULT, - **kwargs: Any, - ): - if strategy is not DEFAULT: - kwargs["strategy"] = strategy - if max_chunk_size is not DEFAULT: - kwargs["max_chunk_size"] = max_chunk_size - if overlap is not DEFAULT: - kwargs["overlap"] = overlap - if sentence_overlap is not DEFAULT: - kwargs["sentence_overlap"] = sentence_overlap - super().__init__(kwargs) - - class ClassificationInferenceOptions(AttrDict[Any]): """ :arg num_top_classes: Specifies the number of top class predictions to @@ -969,7 +927,7 @@ def __init__( class GeoGridQuery(AttrDict[Any]): """ - :arg geogrid: + :arg geotile: :arg geohash: :arg geohex: :arg boost: Floating point number used to decrease or increase the @@ -980,7 +938,7 @@ class GeoGridQuery(AttrDict[Any]): :arg _name: """ - geogrid: Union[str, DefaultType] + geotile: Union[str, DefaultType] geohash: Union[str, DefaultType] geohex: Union[str, DefaultType] boost: Union[float, DefaultType] @@ -989,15 +947,15 @@ class GeoGridQuery(AttrDict[Any]): def __init__( self, *, - geogrid: Union[str, DefaultType] = DEFAULT, + geotile: Union[str, DefaultType] = DEFAULT, geohash: Union[str, DefaultType] = DEFAULT, geohex: Union[str, DefaultType] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): - if geogrid is not DEFAULT: - kwargs["geogrid"] = geogrid + if geotile is not DEFAULT: + kwargs["geotile"] = geotile if geohash is not DEFAULT: kwargs["geohash"] = geohash if geohex is not DEFAULT: @@ -1823,6 +1781,8 @@ class IntervalsContainer(AttrDict[Any]): :arg match: Matches analyzed text. :arg prefix: Matches terms that start with a specified set of characters. + :arg range: + :arg regexp: :arg wildcard: Matches terms using a wildcard pattern. """ @@ -1831,6 +1791,8 @@ class IntervalsContainer(AttrDict[Any]): fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType] match: Union["IntervalsMatch", Dict[str, Any], DefaultType] prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType] + range: Union["IntervalsRange", Dict[str, Any], DefaultType] + regexp: Union["IntervalsRegexp", Dict[str, Any], DefaultType] wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType] def __init__( @@ -1841,6 +1803,8 @@ def __init__( fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType] = DEFAULT, match: Union["IntervalsMatch", Dict[str, Any], DefaultType] = DEFAULT, prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType] = DEFAULT, + range: Union["IntervalsRange", Dict[str, Any], DefaultType] = DEFAULT, + regexp: Union["IntervalsRegexp", Dict[str, Any], DefaultType] = DEFAULT, wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType] = DEFAULT, **kwargs: Any, ): @@ -1854,6 +1818,10 @@ def __init__( kwargs["match"] = match if prefix is not DEFAULT: kwargs["prefix"] = prefix + if range is not DEFAULT: + kwargs["range"] = range + if regexp is not DEFAULT: + kwargs["regexp"] = regexp if wildcard is not DEFAULT: kwargs["wildcard"] = wildcard super().__init__(kwargs) @@ -2074,6 +2042,8 @@ class IntervalsQuery(AttrDict[Any]): :arg match: Matches analyzed text. :arg prefix: Matches terms that start with a specified set of characters. + :arg range: + :arg regexp: :arg wildcard: Matches terms using a wildcard pattern. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the @@ -2088,6 +2058,8 @@ class IntervalsQuery(AttrDict[Any]): fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType] match: Union["IntervalsMatch", Dict[str, Any], DefaultType] prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType] + range: Union["IntervalsRange", Dict[str, Any], DefaultType] + regexp: Union["IntervalsRegexp", Dict[str, Any], DefaultType] wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] @@ -2100,6 +2072,8 @@ def __init__( fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType] = DEFAULT, match: Union["IntervalsMatch", Dict[str, Any], DefaultType] = DEFAULT, prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType] = DEFAULT, + range: Union["IntervalsRange", Dict[str, Any], DefaultType] = DEFAULT, + regexp: Union["IntervalsRegexp", Dict[str, Any], DefaultType] = DEFAULT, wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, @@ -2115,6 +2089,10 @@ def __init__( kwargs["match"] = match if prefix is not DEFAULT: kwargs["prefix"] = prefix + if range is not DEFAULT: + kwargs["range"] = range + if regexp is not DEFAULT: + kwargs["regexp"] = regexp if wildcard is not DEFAULT: kwargs["wildcard"] = wildcard if boost is not DEFAULT: @@ -2124,6 +2102,83 @@ def __init__( super().__init__(kwargs) +class IntervalsRange(AttrDict[Any]): + """ + :arg analyzer: Analyzer used to analyze the `prefix`. + :arg gte: Lower term, either gte or gt must be provided. + :arg gt: Lower term, either gte or gt must be provided. + :arg lte: Upper term, either lte or lt must be provided. + :arg lt: Upper term, either lte or lt must be provided. + :arg use_field: If specified, match intervals from this field rather + than the top-level field. The `prefix` is normalized using the + search analyzer from this field, unless `analyzer` is specified + separately. + """ + + analyzer: Union[str, DefaultType] + gte: Union[str, DefaultType] + gt: Union[str, DefaultType] + lte: Union[str, DefaultType] + lt: Union[str, DefaultType] + use_field: Union[str, InstrumentedField, DefaultType] + + def __init__( + self, + *, + analyzer: Union[str, DefaultType] = DEFAULT, + gte: Union[str, DefaultType] = DEFAULT, + gt: Union[str, DefaultType] = DEFAULT, + lte: Union[str, DefaultType] = DEFAULT, + lt: Union[str, DefaultType] = DEFAULT, + use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if gte is not DEFAULT: + kwargs["gte"] = gte + if gt is not DEFAULT: + kwargs["gt"] = gt + if lte is not DEFAULT: + kwargs["lte"] = lte + if lt is not DEFAULT: + kwargs["lt"] = lt + if use_field is not DEFAULT: + kwargs["use_field"] = str(use_field) + super().__init__(kwargs) + + +class IntervalsRegexp(AttrDict[Any]): + """ + :arg pattern: (required) Regex pattern. + :arg analyzer: Analyzer used to analyze the `prefix`. + :arg use_field: If specified, match intervals from this field rather + than the top-level field. The `prefix` is normalized using the + search analyzer from this field, unless `analyzer` is specified + separately. + """ + + pattern: Union[str, DefaultType] + analyzer: Union[str, DefaultType] + use_field: Union[str, InstrumentedField, DefaultType] + + def __init__( + self, + *, + pattern: Union[str, DefaultType] = DEFAULT, + analyzer: Union[str, DefaultType] = DEFAULT, + use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT, + **kwargs: Any, + ): + if pattern is not DEFAULT: + kwargs["pattern"] = pattern + if analyzer is not DEFAULT: + kwargs["analyzer"] = analyzer + if use_field is not DEFAULT: + kwargs["use_field"] = str(use_field) + super().__init__(kwargs) + + class IntervalsWildcard(AttrDict[Any]): """ :arg pattern: (required) Wildcard pattern used to find matching terms. @@ -4828,7 +4883,7 @@ class ErrorCause(AttrDict[Any]): """ type: str - reason: str + reason: Union[str, None] stack_trace: str caused_by: "ErrorCause" root_cause: Sequence["ErrorCause"] diff --git a/test_elasticsearch/test_dsl/test_integration/_async/test_document.py b/test_elasticsearch/test_dsl/test_integration/_async/test_document.py index e72955a0a..99f475cf1 100644 --- a/test_elasticsearch/test_dsl/test_integration/_async/test_document.py +++ b/test_elasticsearch/test_dsl/test_integration/_async/test_document.py @@ -23,7 +23,7 @@ from datetime import datetime from ipaddress import ip_address -from typing import TYPE_CHECKING, Any, AsyncIterator, Dict, List, Tuple, Union +from typing import TYPE_CHECKING, Any, AsyncIterator, Dict, List, Optional, Tuple, Union import pytest from pytest import raises @@ -42,6 +42,7 @@ Ip, Keyword, Long, + M, Mapping, MetaField, Nested, @@ -52,6 +53,8 @@ analyzer, mapped_field, ) +from elasticsearch.dsl.query import Match +from elasticsearch.dsl.types import MatchQuery from elasticsearch.dsl.utils import AttrList from elasticsearch.helpers.errors import BulkIndexError @@ -850,3 +853,51 @@ class Index: assert docs[0].float_vector == doc.float_vector assert docs[0].byte_vector == doc.byte_vector assert docs[0].bit_vector == doc.bit_vector + + +@pytest.mark.asyncio +async def test_copy_to(async_client: AsyncElasticsearch) -> None: + class Person(AsyncDocument): + first_name: M[str] = mapped_field(Text(copy_to=["full_name", "all"])) + last_name: M[str] = mapped_field(Text(copy_to=["full_name", "all"])) + birth_place: M[str] = mapped_field(Text(copy_to="all")) + full_name: M[Optional[str]] = mapped_field(init=False) + all: M[Optional[str]] = mapped_field(init=False) + + class Index: + name = "people" + + await Person._index.delete(ignore_unavailable=True) + await Person.init() + + person = Person(first_name="Jane", last_name="Doe", birth_place="Springfield") + await person.save() + await Person._index.refresh() + + match = ( + await Person.search() + .query(Match(Person.full_name, MatchQuery(query="Jane"))) + .execute() + ) + assert len(match) == 1 + + match = ( + await Person.search() + .query(Match(Person.all, MatchQuery(query="Doe"))) + .execute() + ) + assert len(match) == 1 + + match = ( + await Person.search() + .query(Match(Person.full_name, MatchQuery(query="Springfield"))) + .execute() + ) + assert len(match) == 0 + + match = ( + await Person.search() + .query(Match(Person.all, MatchQuery(query="Springfield"))) + .execute() + ) + assert len(match) == 1 diff --git a/test_elasticsearch/test_dsl/test_integration/_sync/test_document.py b/test_elasticsearch/test_dsl/test_integration/_sync/test_document.py index 13b60f71b..05dd05fd9 100644 --- a/test_elasticsearch/test_dsl/test_integration/_sync/test_document.py +++ b/test_elasticsearch/test_dsl/test_integration/_sync/test_document.py @@ -23,7 +23,7 @@ from datetime import datetime from ipaddress import ip_address -from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Tuple, Union +from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Tuple, Union import pytest from pytest import raises @@ -41,6 +41,7 @@ Ip, Keyword, Long, + M, Mapping, MetaField, Nested, @@ -52,6 +53,8 @@ analyzer, mapped_field, ) +from elasticsearch.dsl.query import Match +from elasticsearch.dsl.types import MatchQuery from elasticsearch.dsl.utils import AttrList from elasticsearch.helpers.errors import BulkIndexError @@ -842,3 +845,47 @@ class Index: assert docs[0].float_vector == doc.float_vector assert docs[0].byte_vector == doc.byte_vector assert docs[0].bit_vector == doc.bit_vector + + +@pytest.mark.sync +def test_copy_to(client: Elasticsearch) -> None: + class Person(Document): + first_name: M[str] = mapped_field(Text(copy_to=["full_name", "all"])) + last_name: M[str] = mapped_field(Text(copy_to=["full_name", "all"])) + birth_place: M[str] = mapped_field(Text(copy_to="all")) + full_name: M[Optional[str]] = mapped_field(init=False) + all: M[Optional[str]] = mapped_field(init=False) + + class Index: + name = "people" + + Person._index.delete(ignore_unavailable=True) + Person.init() + + person = Person(first_name="Jane", last_name="Doe", birth_place="Springfield") + person.save() + Person._index.refresh() + + match = ( + Person.search() + .query(Match(Person.full_name, MatchQuery(query="Jane"))) + .execute() + ) + assert len(match) == 1 + + match = Person.search().query(Match(Person.all, MatchQuery(query="Doe"))).execute() + assert len(match) == 1 + + match = ( + Person.search() + .query(Match(Person.full_name, MatchQuery(query="Springfield"))) + .execute() + ) + assert len(match) == 0 + + match = ( + Person.search() + .query(Match(Person.all, MatchQuery(query="Springfield"))) + .execute() + ) + assert len(match) == 1 diff --git a/utils/templates/field.py.tpl b/utils/templates/field.py.tpl index 030060d23..8a4c73f33 100644 --- a/utils/templates/field.py.tpl +++ b/utils/templates/field.py.tpl @@ -245,7 +245,14 @@ class {{ k.name }}({{ k.parent }}): {% if not arg.positional %} if {{ arg.name }} is not DEFAULT: {% if "InstrumentedField" in arg.type %} + {% if "Sequence" in arg.type %} + if isinstance({{ arg.name }}, list): + kwargs["{{ arg.name }}"] = [str(field) for field in {{ arg.name }}] + else: + kwargs["{{ arg.name }}"] = str({{ arg.name }}) + {% else %} kwargs["{{ arg.name }}"] = str({{ arg.name }}) + {% endif %} {% else %} kwargs["{{ arg.name }}"] = {{ arg.name }} {% endif %} From 218565c3894cf0bc3ef5628e1c4aee28cf6bb0e4 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 28 Jul 2025 17:08:01 +0100 Subject: [PATCH 59/65] Add option to disable accurate reporting of file and line location in warnings (Fixes #3003) (#3006) (#3007) (cherry picked from commit ee3f2d9b5262273da425048ca01cfc7ea0c5c8c4) Co-authored-by: Miguel Grinberg --- elasticsearch/compat.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/elasticsearch/compat.py b/elasticsearch/compat.py index 7639fd2bd..007971306 100644 --- a/elasticsearch/compat.py +++ b/elasticsearch/compat.py @@ -16,12 +16,15 @@ # under the License. import inspect +import os import sys from pathlib import Path from typing import Tuple, Type, Union string_types: Tuple[Type[str], Type[bytes]] = (str, bytes) +DISABLE_WARN_STACKLEVEL_ENV_VAR = "DISABLE_WARN_STACKLEVEL" + def to_str(x: Union[str, bytes], encoding: str = "ascii") -> str: if not isinstance(x, str): @@ -37,6 +40,8 @@ def to_bytes(x: Union[str, bytes], encoding: str = "ascii") -> bytes: def warn_stacklevel() -> int: """Dynamically determine warning stacklevel for warnings based on the call stack""" + if os.environ.get(DISABLE_WARN_STACKLEVEL_ENV_VAR) in ["1", "true", "True"]: + return 0 try: # Grab the root module from the current module '__name__' module_name = __name__.partition(".")[0] From beb3644c0f0a138213e0fcd0d8c622dddfc9f2aa Mon Sep 17 00:00:00 2001 From: Miguel Grinberg Date: Tue, 29 Jul 2025 10:28:12 +0100 Subject: [PATCH 60/65] [8.19] ES|QL query builder (#2997) (#3010) * ES|QL query builder (#2997) * ES|QL query builder * add missing esql api documentation * add FORK command * initial attempt at generating all functions * unit tests * more operators * documentation * integration tests * add new COMPLETION command * show ES|QL in all docs examples * Docstring fixes * add technical preview warning * docs * md to asciidoc docs translation --- docs/guide/esql-query-builder.asciidoc | 253 +++ docs/guide/index.asciidoc | 6 +- docs/sphinx/esql.rst | 100 + docs/sphinx/index.rst | 1 + elasticsearch/dsl/__init__.py | 3 +- elasticsearch/dsl/document_base.py | 192 +- elasticsearch/dsl/utils.py | 2 +- elasticsearch/esql/__init__.py | 18 + elasticsearch/esql/esql.py | 1105 +++++++++++ elasticsearch/esql/functions.py | 1738 +++++++++++++++++ .../test_dsl/_async/test_esql.py | 93 + .../test_dsl/_sync/test_esql.py | 93 + test_elasticsearch/test_esql.py | 715 +++++++ 13 files changed, 4300 insertions(+), 19 deletions(-) create mode 100644 docs/guide/esql-query-builder.asciidoc create mode 100644 docs/sphinx/esql.rst create mode 100644 elasticsearch/esql/__init__.py create mode 100644 elasticsearch/esql/esql.py create mode 100644 elasticsearch/esql/functions.py create mode 100644 test_elasticsearch/test_dsl/_async/test_esql.py create mode 100644 test_elasticsearch/test_dsl/_sync/test_esql.py create mode 100644 test_elasticsearch/test_esql.py diff --git a/docs/guide/esql-query-builder.asciidoc b/docs/guide/esql-query-builder.asciidoc new file mode 100644 index 000000000..bf254cba1 --- /dev/null +++ b/docs/guide/esql-query-builder.asciidoc @@ -0,0 +1,253 @@ +[[esql-query-builder]] +== ES|QL Query Builder + +WARNING: This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features. + +The ES|QL Query Builder allows you to construct ES|QL queries using Python syntax. Consider the following example: + +[source, python] +---------------------------- +>>> from elasticsearch.esql import ESQL +>>> query = ( + ESQL.from_("employees") + .sort("emp_no") + .keep("first_name", "last_name", "height") + .eval(height_feet="height * 3.281", height_cm="height * 100") + .limit(3) +) +---------------------------- + +You can then see the assembled ES|QL query by printing the resulting query object: + +[source, python] +---------------------------- +>>> query +FROM employees +| SORT emp_no +| KEEP first_name, last_name, height +| EVAL height_feet = height * 3.281, height_cm = height * 100 +| LIMIT 3 +---------------------------- + +To execute this query, you can cast it to a string and pass the string to the `client.esql.query()` endpoint: + +[source, python] +---------------------------- +>>> from elasticsearch import Elasticsearch +>>> client = Elasticsearch(hosts=[os.environ['ELASTICSEARCH_URL']]) +>>> response = client.esql.query(query=str(query)) +---------------------------- + +The response body contains a `columns` attribute with the list of columns included in the results, and a `values` attribute with the list of results for the query, each given as a list of column values. Here is a possible response body returned by the example query given above: + +[source, python] +---------------------------- +>>> from pprint import pprint +>>> pprint(response.body) +{'columns': [{'name': 'first_name', 'type': 'text'}, + {'name': 'last_name', 'type': 'text'}, + {'name': 'height', 'type': 'double'}, + {'name': 'height_feet', 'type': 'double'}, + {'name': 'height_cm', 'type': 'double'}], + 'is_partial': False, + 'took': 11, + 'values': [['Adrian', 'Wells', 2.424, 7.953144, 242.4], + ['Aaron', 'Gonzalez', 1.584, 5.1971, 158.4], + ['Miranda', 'Kramer', 1.55, 5.08555, 155]]} +---------------------------- + +=== Creating an ES|QL query + +To construct an ES|QL query you start from one of the ES|QL source commands: + +==== `ESQL.from_` + +The `FROM` command selects the indices, data streams or aliases to be queried. + +Examples: + +[source, python] +---------------------------- +from elasticsearch.esql import ESQL + +# FROM employees +query1 = ESQL.from_("employees") + +# FROM +query2 = ESQL.from_("") + +# FROM employees-00001, other-employees-* +query3 = ESQL.from_("employees-00001", "other-employees-*") + +# FROM cluster_one:employees-00001, cluster_two:other-employees-* +query4 = ESQL.from_("cluster_one:employees-00001", "cluster_two:other-employees-*") + +# FROM employees METADATA _id +query5 = ESQL.from_("employees").metadata("_id") +---------------------------- + +Note how in the last example the optional `METADATA` clause of the `FROM` command is added as a chained method. + +==== `ESQL.row` + +The `ROW` command produces a row with one or more columns, with the values that you specify. + +Examples: + +[source, python] +---------------------------- +from elasticsearch.esql import ESQL, functions + +# ROW a = 1, b = "two", c = null +query1 = ESQL.row(a=1, b="two", c=None) + +# ROW a = [1, 2] +query2 = ESQL.row(a=[1, 2]) + +# ROW a = ROUND(1.23, 0) +query3 = ESQL.row(a=functions.round(1.23, 0)) +---------------------------- + +==== `ESQL.show` + +The `SHOW` command returns information about the deployment and its capabilities. + +Example: + +[source, python] +---------------------------- +from elasticsearch.esql import ESQL + +# SHOW INFO +query = ESQL.show("INFO") +---------------------------- + +=== Adding processing commands + +Once you have a query object, you can add one or more processing commands to it. The following +example shows how to create a query that uses the `WHERE` and `LIMIT` commands to filter the +results: + +[source, python] +---------------------------- +from elasticsearch.esql import ESQL + +# FROM employees +# | WHERE still_hired == true +# | LIMIT 10 +query = ESQL.from_("employees").where("still_hired == true").limit(10) +---------------------------- + +For a complete list of available commands, review the methods of the https://elasticsearch-py.readthedocs.io/en/stable/esql.html[`ESQLBase` class] in the Elasticsearch Python API documentation. + +=== Creating ES|QL Expressions and Conditions + +The ES|QL query builder for Python provides two ways to create expressions and conditions in ES|QL queries. + +The simplest option is to provide all ES|QL expressions and conditionals as strings. The following example uses this approach to add two calculated columns to the results using the `EVAL` command: + +[source, python] +---------------------------- +from elasticsearch.esql import ESQL + +# FROM employees +# | SORT emp_no +# | KEEP first_name, last_name, height +# | EVAL height_feet = height * 3.281, height_cm = height * 100 +query = ( + ESQL.from_("employees") + .sort("emp_no") + .keep("first_name", "last_name", "height") + .eval(height_feet="height * 3.281", height_cm="height * 100") +) +---------------------------- + +A more advanced alternative is to replace the strings with Python expressions, which are automatically translated to ES|QL when the query object is rendered to a string. The following example is functionally equivalent to the one above: + +[source, python] +---------------------------- +from elasticsearch.esql import ESQL, E + +# FROM employees +# | SORT emp_no +# | KEEP first_name, last_name, height +# | EVAL height_feet = height * 3.281, height_cm = height * 100 +query = ( + ESQL.from_("employees") + .sort("emp_no") + .keep("first_name", "last_name", "height") + .eval(height_feet=E("height") * 3.281, height_cm=E("height") * 100) +) +---------------------------- + +Here the `E()` helper function is used as a wrapper to the column name that initiates an ES|QL expression. The `E()` function transforms the given column into an ES|QL expression that can be modified with Python operators. + +Here is a second example, which uses a conditional expression in the `WHERE` command: + +[source, python] +---------------------------- +from elasticsearch.esql import ESQL + +# FROM employees +# | KEEP first_name, last_name, height +# | WHERE first_name == "Larry" +query = ( + ESQL.from_("employees") + .keep("first_name", "last_name", "height") + .where('first_name == "Larry"') +) +---------------------------- + +Using Python syntax, the condition can be rewritten as follows: + +[source, python] +---------------------------- +from elasticsearch.esql import ESQL, E + +# FROM employees +# | KEEP first_name, last_name, height +# | WHERE first_name == "Larry" +query = ( + ESQL.from_("employees") + .keep("first_name", "last_name", "height") + .where(E("first_name") == "Larry") +) +---------------------------- + +=== Using ES|QL functions + +The ES|QL language includes a rich set of functions that can be used in expressions and conditionals. These can be included in expressions given as strings, as shown in the example below: + +[source, python] +---------------------------- +from elasticsearch.esql import ESQL + +# FROM employees +# | KEEP first_name, last_name, height +# | WHERE LENGTH(first_name) < 4" +query = ( + ESQL.from_("employees") + .keep("first_name", "last_name", "height") + .where("LENGTH(first_name) < 4") +) +---------------------------- + +All available ES|QL functions have Python wrappers in the `elasticsearch.esql.functions` module, which can be used when building expressions using Python syntax. Below is the example above coded using Python syntax: + +[source, python] +---------------------------- +from elasticsearch.esql import ESQL, functions + +# FROM employees +# | KEEP first_name, last_name, height +# | WHERE LENGTH(first_name) < 4" +query = ( + ESQL.from_("employees") + .keep("first_name", "last_name", "height") + .where(functions.length(E("first_name")) < 4) +) +---------------------------- + +Note that arguments passed to functions are assumed to be literals. When passing field names, it is necessary to wrap them with the `E()` helper function so that they are interpreted correctly. + +You can find the complete list of available functions in the Python client's https://elasticsearch-py.readthedocs.io/en/stable/esql.html#module-elasticsearch.esql.functions[ES|QL API reference documentation]. diff --git a/docs/guide/index.asciidoc b/docs/guide/index.asciidoc index 5607a9f24..8df2f3982 100644 --- a/docs/guide/index.asciidoc +++ b/docs/guide/index.asciidoc @@ -22,8 +22,12 @@ include::integrations.asciidoc[] include::examples.asciidoc[] +include::helpers.asciidoc[] + include::elasticsearch-dsl.asciidoc[] -include::helpers.asciidoc[] +include::esql-query-builder.asciidoc[] + +include::async.asciidoc[] include::release-notes.asciidoc[] diff --git a/docs/sphinx/esql.rst b/docs/sphinx/esql.rst new file mode 100644 index 000000000..1104b5b97 --- /dev/null +++ b/docs/sphinx/esql.rst @@ -0,0 +1,100 @@ +ES|QL Query Builder +=================== + +Commands +-------- + +.. autoclass:: elasticsearch.esql.ESQL + :inherited-members: + :members: + +.. autoclass:: elasticsearch.esql.esql.ESQLBase + :inherited-members: + :members: + :exclude-members: __init__ + +.. autoclass:: elasticsearch.esql.esql.From + :members: + :exclude-members: __init__ + +.. autoclass:: elasticsearch.esql.esql.Row + :members: + :exclude-members: __init__ + +.. autoclass:: elasticsearch.esql.esql.Show + :members: + :exclude-members: __init__ + +.. autoclass:: elasticsearch.esql.esql.ChangePoint + :members: + :exclude-members: __init__ + +.. autoclass:: elasticsearch.esql.esql.Completion + :members: + :exclude-members: __init__ + +.. autoclass:: elasticsearch.esql.esql.Dissect + :members: + :exclude-members: __init__ + +.. autoclass:: elasticsearch.esql.esql.Drop + :members: + :exclude-members: __init__ + +.. autoclass:: elasticsearch.esql.esql.Enrich + :members: + :exclude-members: __init__ + +.. autoclass:: elasticsearch.esql.esql.Eval + :members: + :exclude-members: __init__ + +.. autoclass:: elasticsearch.esql.esql.Fork + :members: + :exclude-members: __init__ + +.. autoclass:: elasticsearch.esql.esql.Grok + :members: + :exclude-members: __init__ + +.. autoclass:: elasticsearch.esql.esql.Keep + :members: + :exclude-members: __init__ + +.. autoclass:: elasticsearch.esql.esql.Limit + :members: + :exclude-members: __init__ + +.. autoclass:: elasticsearch.esql.esql.LookupJoin + :members: + :exclude-members: __init__ + +.. autoclass:: elasticsearch.esql.esql.MvExpand + :members: + :exclude-members: __init__ + +.. autoclass:: elasticsearch.esql.esql.Rename + :members: + :exclude-members: __init__ + +.. autoclass:: elasticsearch.esql.esql.Sample + :members: + :exclude-members: __init__ + +.. autoclass:: elasticsearch.esql.esql.Sort + :members: + :exclude-members: __init__ + +.. autoclass:: elasticsearch.esql.esql.Stats + :members: + :exclude-members: __init__ + +.. autoclass:: elasticsearch.esql.esql.Where + :members: + :exclude-members: __init__ + +Functions +--------- + +.. automodule:: elasticsearch.esql.functions + :members: diff --git a/docs/sphinx/index.rst b/docs/sphinx/index.rst index 70ab257d3..290d047a7 100644 --- a/docs/sphinx/index.rst +++ b/docs/sphinx/index.rst @@ -131,6 +131,7 @@ Contents quickstart interactive api + esql exceptions async helpers diff --git a/elasticsearch/dsl/__init__.py b/elasticsearch/dsl/__init__.py index 860e2b761..e109db219 100644 --- a/elasticsearch/dsl/__init__.py +++ b/elasticsearch/dsl/__init__.py @@ -19,7 +19,7 @@ from .aggs import A, Agg from .analysis import analyzer, char_filter, normalizer, token_filter, tokenizer from .document import AsyncDocument, Document -from .document_base import InnerDoc, M, MetaField, mapped_field +from .document_base import E, InnerDoc, M, MetaField, mapped_field from .exceptions import ( ElasticsearchDslException, IllegalOperation, @@ -135,6 +135,7 @@ "Double", "DoubleRange", "DslBase", + "E", "ElasticsearchDslException", "EmptySearch", "Facet", diff --git a/elasticsearch/dsl/document_base.py b/elasticsearch/dsl/document_base.py index b5e373741..d8f9c265a 100644 --- a/elasticsearch/dsl/document_base.py +++ b/elasticsearch/dsl/document_base.py @@ -15,6 +15,7 @@ # specific language governing permissions and limitations # under the License. +import json from datetime import date, datetime from fnmatch import fnmatch from typing import ( @@ -56,7 +57,163 @@ def __init__(self, *args: Any, **kwargs: Any): self.args, self.kwargs = args, kwargs -class InstrumentedField: +class InstrumentedExpression: + """Proxy object for a ES|QL expression.""" + + def __init__(self, expr: str): + self._expr = expr + + def _render_value(self, value: Any) -> str: + if isinstance(value, InstrumentedExpression): + return str(value) + return json.dumps(value) + + def __str__(self) -> str: + return self._expr + + def __repr__(self) -> str: + return f"InstrumentedExpression[{self._expr}]" + + def __pos__(self) -> "InstrumentedExpression": + return self + + def __neg__(self) -> "InstrumentedExpression": + return InstrumentedExpression(f"-({self._expr})") + + def __eq__(self, value: Any) -> "InstrumentedExpression": # type: ignore[override] + return InstrumentedExpression(f"{self._expr} == {self._render_value(value)}") + + def __ne__(self, value: Any) -> "InstrumentedExpression": # type: ignore[override] + return InstrumentedExpression(f"{self._expr} != {self._render_value(value)}") + + def __lt__(self, value: Any) -> "InstrumentedExpression": + return InstrumentedExpression(f"{self._expr} < {self._render_value(value)}") + + def __gt__(self, value: Any) -> "InstrumentedExpression": + return InstrumentedExpression(f"{self._expr} > {self._render_value(value)}") + + def __le__(self, value: Any) -> "InstrumentedExpression": + return InstrumentedExpression(f"{self._expr} <= {self._render_value(value)}") + + def __ge__(self, value: Any) -> "InstrumentedExpression": + return InstrumentedExpression(f"{self._expr} >= {self._render_value(value)}") + + def __add__(self, value: Any) -> "InstrumentedExpression": + return InstrumentedExpression(f"{self._expr} + {self._render_value(value)}") + + def __radd__(self, value: Any) -> "InstrumentedExpression": + return InstrumentedExpression(f"{self._render_value(value)} + {self._expr}") + + def __sub__(self, value: Any) -> "InstrumentedExpression": + return InstrumentedExpression(f"{self._expr} - {self._render_value(value)}") + + def __rsub__(self, value: Any) -> "InstrumentedExpression": + return InstrumentedExpression(f"{self._render_value(value)} - {self._expr}") + + def __mul__(self, value: Any) -> "InstrumentedExpression": + return InstrumentedExpression(f"{self._expr} * {self._render_value(value)}") + + def __rmul__(self, value: Any) -> "InstrumentedExpression": + return InstrumentedExpression(f"{self._render_value(value)} * {self._expr}") + + def __truediv__(self, value: Any) -> "InstrumentedExpression": + return InstrumentedExpression(f"{self._expr} / {self._render_value(value)}") + + def __rtruediv__(self, value: Any) -> "InstrumentedExpression": + return InstrumentedExpression(f"{self._render_value(value)} / {self._expr}") + + def __mod__(self, value: Any) -> "InstrumentedExpression": + return InstrumentedExpression(f"{self._expr} % {self._render_value(value)}") + + def __rmod__(self, value: Any) -> "InstrumentedExpression": + return InstrumentedExpression(f"{self._render_value(value)} % {self._expr}") + + def is_null(self) -> "InstrumentedExpression": + """Compare the expression against NULL.""" + return InstrumentedExpression(f"{self._expr} IS NULL") + + def is_not_null(self) -> "InstrumentedExpression": + """Compare the expression against NOT NULL.""" + return InstrumentedExpression(f"{self._expr} IS NOT NULL") + + def in_(self, *values: Any) -> "InstrumentedExpression": + """Test if the expression equals one of the given values.""" + rendered_values = ", ".join([f"{value}" for value in values]) + return InstrumentedExpression(f"{self._expr} IN ({rendered_values})") + + def like(self, *patterns: str) -> "InstrumentedExpression": + """Filter the expression using a string pattern.""" + if len(patterns) == 1: + return InstrumentedExpression( + f"{self._expr} LIKE {self._render_value(patterns[0])}" + ) + else: + return InstrumentedExpression( + f'{self._expr} LIKE ({", ".join([self._render_value(p) for p in patterns])})' + ) + + def rlike(self, *patterns: str) -> "InstrumentedExpression": + """Filter the expression using a regular expression.""" + if len(patterns) == 1: + return InstrumentedExpression( + f"{self._expr} RLIKE {self._render_value(patterns[0])}" + ) + else: + return InstrumentedExpression( + f'{self._expr} RLIKE ({", ".join([self._render_value(p) for p in patterns])})' + ) + + def match(self, query: str) -> "InstrumentedExpression": + """Perform a match query on the field.""" + return InstrumentedExpression(f"{self._expr}:{self._render_value(query)}") + + def asc(self) -> "InstrumentedExpression": + """Return the field name representation for ascending sort order. + + For use in ES|QL queries only. + """ + return InstrumentedExpression(f"{self._expr} ASC") + + def desc(self) -> "InstrumentedExpression": + """Return the field name representation for descending sort order. + + For use in ES|QL queries only. + """ + return InstrumentedExpression(f"{self._expr} DESC") + + def nulls_first(self) -> "InstrumentedExpression": + """Return the field name representation for nulls first sort order. + + For use in ES|QL queries only. + """ + return InstrumentedExpression(f"{self._expr} NULLS FIRST") + + def nulls_last(self) -> "InstrumentedExpression": + """Return the field name representation for nulls last sort order. + + For use in ES|QL queries only. + """ + return InstrumentedExpression(f"{self._expr} NULLS LAST") + + def where( + self, *expressions: Union[str, "InstrumentedExpression"] + ) -> "InstrumentedExpression": + """Add a condition to be met for the row to be included. + + Use only in expressions given in the ``STATS`` command. + """ + if len(expressions) == 1: + return InstrumentedExpression(f"{self._expr} WHERE {expressions[0]}") + else: + return InstrumentedExpression( + f'{self._expr} WHERE {" AND ".join([f"({expr})" for expr in expressions])}' + ) + + +E = InstrumentedExpression + + +class InstrumentedField(InstrumentedExpression): """Proxy object for a mapped document field. An object of this instance is returned when a field is accessed as a class @@ -71,8 +228,8 @@ class MyDocument(Document): s = s.sort(-MyDocument.name) # sort by name in descending order """ - def __init__(self, name: str, field: Field): - self._name = name + def __init__(self, name: str, field: Optional[Field]): + super().__init__(name) self._field = field # note that the return value type here assumes classes will only be used to @@ -83,26 +240,29 @@ def __getattr__(self, attr: str) -> "InstrumentedField": # first let's see if this is an attribute of this object return super().__getattribute__(attr) # type: ignore[no-any-return] except AttributeError: - try: - # next we see if we have a sub-field with this name - return InstrumentedField(f"{self._name}.{attr}", self._field[attr]) - except KeyError: - # lastly we let the wrapped field resolve this attribute - return getattr(self._field, attr) # type: ignore[no-any-return] - - def __pos__(self) -> str: + if self._field: + try: + # next we see if we have a sub-field with this name + return InstrumentedField(f"{self._expr}.{attr}", self._field[attr]) + except KeyError: + # lastly we let the wrapped field resolve this attribute + return getattr(self._field, attr) # type: ignore[no-any-return] + else: + raise + + def __pos__(self) -> str: # type: ignore[override] """Return the field name representation for ascending sort order""" - return f"{self._name}" + return f"{self._expr}" - def __neg__(self) -> str: + def __neg__(self) -> str: # type: ignore[override] """Return the field name representation for descending sort order""" - return f"-{self._name}" + return f"-{self._expr}" def __str__(self) -> str: - return self._name + return self._expr def __repr__(self) -> str: - return f"InstrumentedField[{self._name}]" + return f"InstrumentedField[{self._expr}]" class DocumentMeta(type): diff --git a/elasticsearch/dsl/utils.py b/elasticsearch/dsl/utils.py index b52ec63a0..127a48cc2 100644 --- a/elasticsearch/dsl/utils.py +++ b/elasticsearch/dsl/utils.py @@ -333,7 +333,7 @@ def __init__(self, _expand__to_dot: Optional[bool] = None, **params: Any) -> Non _expand__to_dot = EXPAND__TO_DOT self._params: Dict[str, Any] = {} for pname, pvalue in params.items(): - if pvalue == DEFAULT: + if pvalue is DEFAULT: continue # expand "__" to dots if "__" in pname and _expand__to_dot: diff --git a/elasticsearch/esql/__init__.py b/elasticsearch/esql/__init__.py new file mode 100644 index 000000000..d872c329a --- /dev/null +++ b/elasticsearch/esql/__init__.py @@ -0,0 +1,18 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from .esql import ESQL, and_, not_, or_ # noqa: F401 diff --git a/elasticsearch/esql/esql.py b/elasticsearch/esql/esql.py new file mode 100644 index 000000000..07ccdf839 --- /dev/null +++ b/elasticsearch/esql/esql.py @@ -0,0 +1,1105 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import json +from abc import ABC, abstractmethod +from typing import Any, Dict, Optional, Tuple, Type, Union + +from ..dsl.document_base import DocumentBase, InstrumentedExpression, InstrumentedField + +FieldType = Union[InstrumentedField, str] +IndexType = Union[Type[DocumentBase], str] +ExpressionType = Any + + +class ESQL(ABC): + """The static methods of the ``ESQL`` class provide access to the ES|QL source + commands, used to create ES|QL queries. + + These methods return an instance of class ``ESQLBase``, which provides access to + the ES|QL processing commands. + """ + + @staticmethod + def from_(*indices: IndexType) -> "From": + """The ``FROM`` source command returns a table with data from a data stream, index, or alias. + + :param indices: A list of indices, data streams or aliases. Supports wildcards and date math. + + Examples:: + + query1 = ESQL.from_("employees") + query2 = ESQL.from_("") + query3 = ESQL.from_("employees-00001", "other-employees-*") + query4 = ESQL.from_("cluster_one:employees-00001", "cluster_two:other-employees-*") + query5 = ESQL.from_("employees").metadata("_id") + """ + return From(*indices) + + @staticmethod + def row(**params: ExpressionType) -> "Row": + """The ``ROW`` source command produces a row with one or more columns with values that you specify. + This can be useful for testing. + + :param params: the column values to produce, given as keyword arguments. + + Examples:: + + query1 = ESQL.row(a=1, b="two", c=None) + query2 = ESQL.row(a=[1, 2]) + query3 = ESQL.row(a=functions.round(1.23, 0)) + """ + return Row(**params) + + @staticmethod + def show(item: str) -> "Show": + """The ``SHOW`` source command returns information about the deployment and its capabilities. + + :param item: Can only be ``INFO``. + + Examples:: + + query = ESQL.show("INFO") + """ + return Show(item) + + @staticmethod + def branch() -> "Branch": + """This method can only be used inside a ``FORK`` command to create each branch. + + Examples:: + + query = ESQL.from_("employees").fork( + ESQL.branch().where("emp_no == 10001"), + ESQL.branch().where("emp_no == 10002"), + ) + """ + return Branch() + + +class ESQLBase(ABC): + """The methods of the ``ESQLBase`` class provide access to the ES|QL processing + commands, used to build ES|QL queries. + """ + + def __init__(self, parent: Optional["ESQLBase"] = None): + self._parent = parent + + def __repr__(self) -> str: + return self.render() + + def render(self) -> str: + return ( + self._parent.render() + "\n| " if self._parent else "" + ) + self._render_internal() + + @abstractmethod + def _render_internal(self) -> str: + pass + + def _is_forked(self) -> bool: + if self.__class__.__name__ == "Fork": + return True + if self._parent: + return self._parent._is_forked() + return False + + def change_point(self, value: FieldType) -> "ChangePoint": + """`CHANGE_POINT` detects spikes, dips, and change points in a metric. + + :param value: The column with the metric in which you want to detect a change point. + + Examples:: + + query = ( + ESQL.row(key=list(range(1, 26))) + .mv_expand("key") + .eval(value=functions.case("key<13", 0, 42)) + .change_point("value") + .on("key") + .where("type IS NOT NULL") + ) + """ + return ChangePoint(self, value) + + def completion( + self, *prompt: ExpressionType, **named_prompt: ExpressionType + ) -> "Completion": + """The `COMPLETION` command allows you to send prompts and context to a Large + Language Model (LLM) directly within your ES|QL queries, to perform text + generation tasks. + + :param prompt: The input text or expression used to prompt the LLM. This can + be a string literal or a reference to a column containing text. + :param named_prompt: The input text or expresion, given as a keyword argument. + The argument name is used for the column name. If not + specified, the results will be stored in a column named + `completion`. If the specified column already exists, it + will be overwritten with the new results. + + Examples:: + + query1 = ( + ESQL.row(question="What is Elasticsearch?") + .completion("question").with_("test_completion_model") + .keep("question", "completion") + ) + query2 = ( + ESQL.row(question="What is Elasticsearch?") + .completion(answer="question").with_("test_completion_model") + .keep("question", "answer") + ) + query3 = ( + ESQL.from_("movies") + .sort("rating DESC") + .limit(10) + .eval(prompt=\"\"\"CONCAT( + "Summarize this movie using the following information: \\n", + "Title: ", title, "\\n", + "Synopsis: ", synopsis, "\\n", + "Actors: ", MV_CONCAT(actors, ", "), "\\n", + )\"\"\") + .completion(summary="prompt").with_("test_completion_model") + .keep("title", "summary", "rating") + ) + """ + return Completion(self, *prompt, **named_prompt) + + def dissect(self, input: FieldType, pattern: str) -> "Dissect": + """``DISSECT`` enables you to extract structured data out of a string. + + :param input: The column that contains the string you want to structure. If + the column has multiple values, ``DISSECT`` will process each value. + :param pattern: A dissect pattern. If a field name conflicts with an existing + column, the existing column is dropped. If a field name is used + more than once, only the rightmost duplicate creates a column. + + Examples:: + + query = ( + ESQL.row(a="2023-01-23T12:15:00.000Z - some text - 127.0.0.1") + .dissect("a", "%{date} - %{msg} - %{ip}") + .keep("date", "msg", "ip") + .eval(date="TO_DATETIME(date)") + ) + """ + return Dissect(self, input, pattern) + + def drop(self, *columns: FieldType) -> "Drop": + """The ``DROP`` processing command removes one or more columns. + + :param columns: The columns to drop, given as positional arguments. Supports wildcards. + + Examples:: + + query1 = ESQL.from_("employees").drop("height") + query2 = ESQL.from_("employees").drop("height*") + """ + return Drop(self, *columns) + + def enrich(self, policy: str) -> "Enrich": + """``ENRICH`` enables you to add data from existing indices as new columns using an + enrich policy. + + :param policy: The name of the enrich policy. You need to create and execute the + enrich policy first. + + Examples:: + + query1 = ( + ESQL.row(a="1") + .enrich("languages_policy").on("a").with_("language_name") + ) + query2 = ( + ESQL.row(a="1") + .enrich("languages_policy").on("a").with_(name="language_name") + ) + """ + return Enrich(self, policy) + + def eval(self, *columns: ExpressionType, **named_columns: ExpressionType) -> "Eval": + """The ``EVAL`` processing command enables you to append new columns with calculated values. + + :param columns: The values for the columns, given as positional arguments. Can be literals, + expressions, or functions. Can use columns defined left of this one. + :param named_columns: The values for the new columns, given as keyword arguments. The name + of the arguments is used as column name. If a column with the same + name already exists, the existing column is dropped. If a column name + is used more than once, only the rightmost duplicate creates a column. + + Examples:: + + query1 = ( + ESQL.from_("employees") + .sort("emp_no") + .keep("first_name", "last_name", "height") + .eval(height_feet="height * 3.281", height_cm="height * 100") + ) + query2 = ( + ESQL.from_("employees") + .eval("height * 3.281") + .stats(avg_height_feet=functions.avg("`height * 3.281`")) + ) + """ + return Eval(self, *columns, **named_columns) + + def fork( + self, + fork1: "ESQLBase", + fork2: Optional["ESQLBase"] = None, + fork3: Optional["ESQLBase"] = None, + fork4: Optional["ESQLBase"] = None, + fork5: Optional["ESQLBase"] = None, + fork6: Optional["ESQLBase"] = None, + fork7: Optional["ESQLBase"] = None, + fork8: Optional["ESQLBase"] = None, + ) -> "Fork": + """The ``FORK`` processing command creates multiple execution branches to operate on the + same input data and combines the results in a single output table. + + :param fork: Up to 8 execution branches, created with the ``ESQL.branch()`` method. + + Examples:: + + query = ( + ESQL.from_("employees") + .fork( + ESQL.branch().where("emp_no == 10001"), + ESQL.branch().where("emp_no == 10002"), + ) + .keep("emp_no", "_fork") + .sort("emp_no") + ) + """ + if self._is_forked(): + raise ValueError("a query can only have one fork") + return Fork(self, fork1, fork2, fork3, fork4, fork5, fork6, fork7, fork8) + + def grok(self, input: FieldType, pattern: str) -> "Grok": + """``GROK`` enables you to extract structured data out of a string. + + :param input: The column that contains the string you want to structure. If the + column has multiple values, ``GROK`` will process each value. + :param pattern: A grok pattern. If a field name conflicts with an existing column, + the existing column is discarded. If a field name is used more than + once, a multi-valued column will be created with one value per each + occurrence of the field name. + + Examples:: + + query1 = ( + ESQL.row(a="2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42") + .grok("a", "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num}") + .keep("date", "ip", "email", "num") + ) + query2 = ( + ESQL.row(a="2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42") + .grok( + "a", + "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}", + ) + .keep("date", "ip", "email", "num") + .eval(date=functions.to_datetime("date")) + ) + query3 = ( + ESQL.from_("addresses") + .keep("city.name", "zip_code") + .grok("zip_code", "%{WORD:zip_parts} %{WORD:zip_parts}") + ) + """ + return Grok(self, input, pattern) + + def keep(self, *columns: FieldType) -> "Keep": + """The ``KEEP`` processing command enables you to specify what columns are returned + and the order in which they are returned. + + :param columns: The columns to keep, given as positional arguments. Supports + wildcards. + + Examples:: + + query1 = ESQL.from_("employees").keep("emp_no", "first_name", "last_name", "height") + query2 = ESQL.from_("employees").keep("h*") + query3 = ESQL.from_("employees").keep("h*", "*") + """ + return Keep(self, *columns) + + def limit(self, max_number_of_rows: int) -> "Limit": + """The ``LIMIT`` processing command enables you to limit the number of rows that are + returned. + + :param max_number_of_rows: The maximum number of rows to return. + + Examples:: + + query1 = ESQL.from_("employees").sort("emp_no ASC").limit(5) + query2 = ESQL.from_("index").stats(functions.avg("field1")).by("field2").limit(20000) + """ + return Limit(self, max_number_of_rows) + + def lookup_join(self, lookup_index: IndexType) -> "LookupJoin": + """`LOOKUP JOIN` enables you to add data from another index, AKA a 'lookup' index, + to your ES|QL query results, simplifying data enrichment and analysis workflows. + + :param lookup_index: The name of the lookup index. This must be a specific index + name - wildcards, aliases, and remote cluster references are + not supported. Indices used for lookups must be configured + with the lookup index mode. + + Examples:: + + query1 = ( + ESQL.from_("firewall_logs") + .lookup_join("threat_list").on("source.IP") + .where("threat_level IS NOT NULL") + ) + query2 = ( + ESQL.from_("system_metrics") + .lookup_join("host_inventory").on("host.name") + .lookup_join("ownerships").on("host.name") + ) + query3 = ( + ESQL.from_("app_logs") + .lookup_join("service_owners").on("service_id") + ) + query4 = ( + ESQL.from_("employees") + .eval(language_code="languages") + .where("emp_no >= 10091 AND emp_no < 10094") + .lookup_join("languages_lookup").on("language_code") + ) + """ + return LookupJoin(self, lookup_index) + + def mv_expand(self, column: FieldType) -> "MvExpand": + """The `MV_EXPAND` processing command expands multivalued columns into one row per + value, duplicating other columns. + + :param column: The multivalued column to expand. + + Examples:: + + query = ESQL.row(a=[1, 2, 3], b="b", j=["a", "b"]).mv_expand("a") + """ + return MvExpand(self, column) + + def rename(self, **columns: FieldType) -> "Rename": + """The ``RENAME`` processing command renames one or more columns. + + :param columns: The old and new column name pairs, given as keyword arguments. + If a name conflicts with an existing column name, the existing column + is dropped. If multiple columns are renamed to the same name, all but + the rightmost column with the same new name are dropped. + + Examples:: + + query = ( + ESQL.from_("employees") + .keep("first_name", "last_name", "still_hired") + .rename(still_hired="employed") + ) + """ + return Rename(self, **columns) + + def sample(self, probability: float) -> "Sample": + """The ``SAMPLE`` command samples a fraction of the table rows. + + :param probability: The probability that a row is included in the sample. The value + must be between 0 and 1, exclusive. + + Examples:: + + query = ESQL.from_("employees").keep("emp_no").sample(0.05) + """ + return Sample(self, probability) + + def sort(self, *columns: FieldType) -> "Sort": + """The ``SORT`` processing command sorts a table on one or more columns. + + :param columns: The columns to sort on. + + Examples:: + + query1 = ( + ESQL.from_("employees") + .keep("first_name", "last_name", "height") + .sort("height") + ) + query2 = ( + ESQL.from_("employees") + .keep("first_name", "last_name", "height") + .sort("height DESC") + ) + query3 = ( + ESQL.from_("employees") + .keep("first_name", "last_name", "height") + .sort("height DESC", "first_name ASC") + ) + query4 = ( + ESQL.from_("employees") + .keep("first_name", "last_name", "height") + .sort("first_name ASC NULLS FIRST") + ) + """ + return Sort(self, *columns) + + def stats( + self, *expressions: ExpressionType, **named_expressions: ExpressionType + ) -> "Stats": + """The ``STATS`` processing command groups rows according to a common value and + calculates one or more aggregated values over the grouped rows. + + :param expressions: A list of expressions, given as positional arguments. + :param named_expressions: A list of expressions, given as keyword arguments. The + argument names are used for the returned aggregated values. + + Note that only one of `expressions` and `named_expressions` must be provided. + + Examples:: + + query1 = ( + ESQL.from_("employees") + .stats(count=functions.count("emp_no")).by("languages") + .sort("languages") + ) + query2 = ( + ESQL.from_("employees") + .stats(avg_lang=functions.avg("languages")) + ) + query3 = ( + ESQL.from_("employees") + .stats( + avg_lang=functions.avg("languages"), + max_lang=functions.max("languages") + ) + ) + query4 = ( + ESQL.from_("employees") + .stats( + avg50s=functions.avg("salary").where('birth_date < "1960-01-01"'), + avg60s=functions.avg("salary").where('birth_date >= "1960-01-01"'), + ).by("gender") + .sort("gender") + ) + query5 = ( + ESQL.from_("employees") + .eval(Ks="salary / 1000") + .stats( + under_40K=functions.count("*").where("Ks < 40"), + inbetween=functions.count("*").where("40 <= Ks AND Ks < 60"), + over_60K=functions.count("*").where("60 <= Ks"), + total=f.count("*") + ) + ) + query6 = ( + ESQL.row(i=1, a=["a", "b"]) + .stats(functions.min("i")).by("a") + .sort("a ASC") + ) + query7 = ( + ESQL.from_("employees") + .eval(hired=functions.date_format("hire_date", "yyyy")) + .stats(avg_salary=functions.avg("salary")).by("hired", "languages.long") + .eval(avg_salary=functions.round("avg_salary")) + .sort("hired", "languages.long") + + ) + """ + return Stats(self, *expressions, **named_expressions) + + def where(self, *expressions: ExpressionType) -> "Where": + """The ``WHERE`` processing command produces a table that contains all the rows + from the input table for which the provided condition evaluates to `true`. + + :param expressions: A list of boolean expressions, given as positional arguments. + These expressions are combined with an ``AND`` logical operator. + + Examples:: + + query1 = ( + ESQL.from_("employees") + .keep("first_name", "last_name", "still_hired") + .where("still_hired == true") + ) + query2 = ( + ESQL.from_("sample_data") + .where("@timestamp > NOW() - 1 hour") + ) + query3 = ( + ESQL.from_("employees") + .keep("first_name", "last_name", "height") + .where("LENGTH(first_name) < 4") + ) + """ + return Where(self, *expressions) + + +class From(ESQLBase): + """Implementation of the ``FROM`` source command. + + This class inherits from :class:`ESQLBase `, + to make it possible to chain all the commands that belong to an ES|QL query + in a single expression. + """ + + def __init__(self, *indices: IndexType): + super().__init__() + self._indices = indices + self._metadata_fields: Tuple[FieldType, ...] = tuple() + + def metadata(self, *fields: FieldType) -> "From": + """Continuation of the ``FROM`` source command. + + :param fields: metadata fields to retrieve, given as positional arguments. + """ + self._metadata_fields = fields + return self + + def _render_internal(self) -> str: + indices = [ + index if isinstance(index, str) else index._index._name + for index in self._indices + ] + s = f'{self.__class__.__name__.upper()} {", ".join(indices)}' + if self._metadata_fields: + s = ( + s + + f' METADATA {", ".join([str(field) for field in self._metadata_fields])}' + ) + return s + + +class Row(ESQLBase): + """Implementation of the ``ROW`` source command. + + This class inherits from :class:`ESQLBase `, + to make it possible to chain all the commands that belong to an ES|QL query + in a single expression. + """ + + def __init__(self, **params: ExpressionType): + super().__init__() + self._params = { + k: json.dumps(v) if not isinstance(v, InstrumentedExpression) else v + for k, v in params.items() + } + + def _render_internal(self) -> str: + return "ROW " + ", ".join([f"{k} = {v}" for k, v in self._params.items()]) + + +class Show(ESQLBase): + """Implementation of the ``SHOW`` source command. + + This class inherits from :class:`ESQLBase `, + which makes it possible to chain all the commands that belong to an ES|QL query + in a single expression. + """ + + def __init__(self, item: str): + super().__init__() + self._item = item + + def _render_internal(self) -> str: + return f"SHOW {self._item}" + + +class Branch(ESQLBase): + """Implementation of a branch inside a ``FORK`` processing command. + + This class inherits from :class:`ESQLBase `, + which makes it possible to chain all the commands that belong to the branch + in a single expression. + """ + + def _render_internal(self) -> str: + return "" + + +class ChangePoint(ESQLBase): + """Implementation of the ``CHANGE POINT`` processing command. + + This class inherits from :class:`ESQLBase `, + to make it possible to chain all the commands that belong to an ES|QL query + in a single expression. + """ + + def __init__(self, parent: ESQLBase, value: FieldType): + super().__init__(parent) + self._value = value + self._key: Optional[FieldType] = None + self._type_name: Optional[str] = None + self._pvalue_name: Optional[str] = None + + def on(self, key: FieldType) -> "ChangePoint": + """Continuation of the `CHANGE_POINT` command. + + :param key: The column with the key to order the values by. If not specified, + `@timestamp` is used. + """ + self._key = key + return self + + def as_(self, type_name: str, pvalue_name: str) -> "ChangePoint": + """Continuation of the `CHANGE_POINT` command. + + :param type_name: The name of the output column with the change point type. + If not specified, `type` is used. + :param pvalue_name: The name of the output column with the p-value that indicates + how extreme the change point is. If not specified, `pvalue` is used. + """ + self._type_name = type_name + self._pvalue_name = pvalue_name + return self + + def _render_internal(self) -> str: + key = "" if not self._key else f" ON {self._key}" + names = ( + "" + if not self._type_name and not self._pvalue_name + else f' AS {self._type_name or "type"}, {self._pvalue_name or "pvalue"}' + ) + return f"CHANGE_POINT {self._value}{key}{names}" + + +class Completion(ESQLBase): + """Implementation of the ``COMPLETION`` processing command. + + This class inherits from :class:`ESQLBase `, + to make it possible to chain all the commands that belong to an ES|QL query + in a single expression. + """ + + def __init__( + self, parent: ESQLBase, *prompt: ExpressionType, **named_prompt: ExpressionType + ): + if len(prompt) + len(named_prompt) > 1: + raise ValueError( + "this method requires either one positional or one keyword argument only" + ) + super().__init__(parent) + self._prompt = prompt + self._named_prompt = named_prompt + self._inference_id: Optional[str] = None + + def with_(self, inference_id: str) -> "Completion": + """Continuation of the `COMPLETION` command. + + :param inference_id: The ID of the inference endpoint to use for the task. The + inference endpoint must be configured with the completion + task type. + """ + self._inference_id = inference_id + return self + + def _render_internal(self) -> str: + if self._inference_id is None: + raise ValueError("The completion command requires an inference ID") + if self._named_prompt: + column = list(self._named_prompt.keys())[0] + prompt = list(self._named_prompt.values())[0] + return f"COMPLETION {column} = {prompt} WITH {self._inference_id}" + else: + return f"COMPLETION {self._prompt[0]} WITH {self._inference_id}" + + +class Dissect(ESQLBase): + """Implementation of the ``DISSECT`` processing command. + + This class inherits from :class:`ESQLBase `, + to make it possible to chain all the commands that belong to an ES|QL query + in a single expression. + """ + + def __init__(self, parent: ESQLBase, input: FieldType, pattern: str): + super().__init__(parent) + self._input = input + self._pattern = pattern + self._separator: Optional[str] = None + + def append_separator(self, separator: str) -> "Dissect": + """Continuation of the ``DISSECT`` command. + + :param separator: A string used as the separator between appended values, + when using the append modifier. + """ + self._separator = separator + return self + + def _render_internal(self) -> str: + sep = ( + "" if self._separator is None else f' APPEND_SEPARATOR="{self._separator}"' + ) + return f"DISSECT {self._input} {json.dumps(self._pattern)}{sep}" + + +class Drop(ESQLBase): + """Implementation of the ``DROP`` processing command. + + This class inherits from :class:`ESQLBase `, + to make it possible to chain all the commands that belong to an ES|QL query + in a single expression. + """ + + def __init__(self, parent: ESQLBase, *columns: FieldType): + super().__init__(parent) + self._columns = columns + + def _render_internal(self) -> str: + return f'DROP {", ".join([str(col) for col in self._columns])}' + + +class Enrich(ESQLBase): + """Implementation of the ``ENRICH`` processing command. + + This class inherits from :class:`ESQLBase `, + to make it possible to chain all the commands that belong to an ES|QL query + in a single expression. + """ + + def __init__(self, parent: ESQLBase, policy: str): + super().__init__(parent) + self._policy = policy + self._match_field: Optional[FieldType] = None + self._fields: Optional[Tuple[FieldType, ...]] = None + self._named_fields: Optional[Dict[str, FieldType]] = None + + def on(self, match_field: FieldType) -> "Enrich": + """Continuation of the ``ENRICH`` command. + + :param match_field: The match field. ``ENRICH`` uses its value to look for records + in the enrich index. If not specified, the match will be + performed on the column with the same name as the + `match_field` defined in the enrich policy. + """ + self._match_field = match_field + return self + + def with_(self, *fields: FieldType, **named_fields: FieldType) -> "Enrich": + """Continuation of the ``ENRICH`` command. + + :param fields: The enrich fields from the enrich index that are added to the result + as new columns, given as positional arguments. If a column with the + same name as the enrich field already exists, the existing column will + be replaced by the new column. If not specified, each of the enrich + fields defined in the policy is added. A column with the same name as + the enrich field will be dropped unless the enrich field is renamed. + :param named_fields: The enrich fields from the enrich index that are added to the + result as new columns, given as keyword arguments. The name of + the keyword arguments are used as column names. If a column has + the same name as the new name, it will be discarded. If a name + (new or original) occurs more than once, only the rightmost + duplicate creates a new column. + """ + if fields and named_fields: + raise ValueError( + "this method supports positional or keyword arguments but not both" + ) + self._fields = fields + self._named_fields = named_fields + return self + + def _render_internal(self) -> str: + on = "" if self._match_field is None else f" ON {self._match_field}" + with_ = "" + if self._named_fields: + with_ = f' WITH {", ".join([f"{name} = {field}" for name, field in self._named_fields.items()])}' + elif self._fields is not None: + with_ = f' WITH {", ".join([str(field) for field in self._fields])}' + return f"ENRICH {self._policy}{on}{with_}" + + +class Eval(ESQLBase): + """Implementation of the ``EVAL`` processing command. + + This class inherits from :class:`ESQLBase `, + to make it possible to chain all the commands that belong to an ES|QL query + in a single expression. + """ + + def __init__( + self, parent: ESQLBase, *columns: FieldType, **named_columns: FieldType + ): + if columns and named_columns: + raise ValueError( + "this method supports positional or keyword arguments but not both" + ) + super().__init__(parent) + self._columns = columns or named_columns + + def _render_internal(self) -> str: + if isinstance(self._columns, dict): + cols = ", ".join( + [f"{name} = {value}" for name, value in self._columns.items()] + ) + else: + cols = ", ".join([f"{col}" for col in self._columns]) + return f"EVAL {cols}" + + +class Fork(ESQLBase): + """Implementation of the ``FORK`` processing command. + + This class inherits from :class:`ESQLBase `, + to make it possible to chain all the commands that belong to an ES|QL query + in a single expression. + """ + + def __init__( + self, + parent: ESQLBase, + fork1: ESQLBase, + fork2: Optional[ESQLBase] = None, + fork3: Optional[ESQLBase] = None, + fork4: Optional[ESQLBase] = None, + fork5: Optional[ESQLBase] = None, + fork6: Optional[ESQLBase] = None, + fork7: Optional[ESQLBase] = None, + fork8: Optional[ESQLBase] = None, + ): + super().__init__(parent) + self._branches = [fork1, fork2, fork3, fork4, fork5, fork6, fork7, fork8] + + def _render_internal(self) -> str: + cmds = "" + for branch in self._branches: + if branch: + cmd = branch.render()[3:].replace("\n", " ") + if cmds == "": + cmds = f"( {cmd} )" + else: + cmds += f"\n ( {cmd} )" + return f"FORK {cmds}" + + +class Grok(ESQLBase): + """Implementation of the ``GROK`` processing command. + + This class inherits from :class:`ESQLBase `, + to make it possible to chain all the commands that belong to an ES|QL query + in a single expression. + """ + + def __init__(self, parent: ESQLBase, input: FieldType, pattern: str): + super().__init__(parent) + self._input = input + self._pattern = pattern + + def _render_internal(self) -> str: + return f"GROK {self._input} {json.dumps(self._pattern)}" + + +class Keep(ESQLBase): + """Implementation of the ``KEEP`` processing command. + + This class inherits from :class:`ESQLBase `, + to make it possible to chain all the commands that belong to an ES|QL query + in a single expression. + """ + + def __init__(self, parent: ESQLBase, *columns: FieldType): + super().__init__(parent) + self._columns = columns + + def _render_internal(self) -> str: + return f'KEEP {", ".join([f"{col}" for col in self._columns])}' + + +class Limit(ESQLBase): + """Implementation of the ``LIMIT`` processing command. + + This class inherits from :class:`ESQLBase `, + to make it possible to chain all the commands that belong to an ES|QL query + in a single expression. + """ + + def __init__(self, parent: ESQLBase, max_number_of_rows: int): + super().__init__(parent) + self._max_number_of_rows = max_number_of_rows + + def _render_internal(self) -> str: + return f"LIMIT {self._max_number_of_rows}" + + +class LookupJoin(ESQLBase): + """Implementation of the ``LOOKUP JOIN`` processing command. + + This class inherits from :class:`ESQLBase `, + to make it possible to chain all the commands that belong to an ES|QL query + in a single expression. + """ + + def __init__(self, parent: ESQLBase, lookup_index: IndexType): + super().__init__(parent) + self._lookup_index = lookup_index + self._field: Optional[FieldType] = None + + def on(self, field: FieldType) -> "LookupJoin": + """Continuation of the `LOOKUP_JOIN` command. + + :param field: The field to join on. This field must exist in both your current query + results and in the lookup index. If the field contains multi-valued + entries, those entries will not match anything (the added fields will + contain null for those rows). + """ + self._field = field + return self + + def _render_internal(self) -> str: + if self._field is None: + raise ValueError("Joins require a field to join on.") + index = ( + self._lookup_index + if isinstance(self._lookup_index, str) + else self._lookup_index._index._name + ) + return f"LOOKUP JOIN {index} ON {self._field}" + + +class MvExpand(ESQLBase): + """Implementation of the ``MV_EXPAND`` processing command. + + This class inherits from :class:`ESQLBase `, + to make it possible to chain all the commands that belong to an ES|QL query + in a single expression. + """ + + def __init__(self, parent: ESQLBase, column: FieldType): + super().__init__(parent) + self._column = column + + def _render_internal(self) -> str: + return f"MV_EXPAND {self._column}" + + +class Rename(ESQLBase): + """Implementation of the ``RENAME`` processing command. + + This class inherits from :class:`ESQLBase `, + to make it possible to chain all the commands that belong to an ES|QL query + in a single expression. + """ + + def __init__(self, parent: ESQLBase, **columns: FieldType): + super().__init__(parent) + self._columns = columns + + def _render_internal(self) -> str: + return f'RENAME {", ".join([f"{old_name} AS {new_name}" for old_name, new_name in self._columns.items()])}' + + +class Sample(ESQLBase): + """Implementation of the ``SAMPLE`` processing command. + + This class inherits from :class:`ESQLBase `, + to make it possible to chain all the commands that belong to an ES|QL query + in a single expression. + """ + + def __init__(self, parent: ESQLBase, probability: float): + super().__init__(parent) + self._probability = probability + + def _render_internal(self) -> str: + return f"SAMPLE {self._probability}" + + +class Sort(ESQLBase): + """Implementation of the ``SORT`` processing command. + + This class inherits from :class:`ESQLBase `, + to make it possible to chain all the commands that belong to an ES|QL query + in a single expression. + """ + + def __init__(self, parent: ESQLBase, *columns: FieldType): + super().__init__(parent) + self._columns = columns + + def _render_internal(self) -> str: + return f'SORT {", ".join([f"{col}" for col in self._columns])}' + + +class Stats(ESQLBase): + """Implementation of the ``STATS`` processing command. + + This class inherits from :class:`ESQLBase `, + to make it possible to chain all the commands that belong to an ES|QL query + in a single expression. + """ + + def __init__( + self, + parent: ESQLBase, + *expressions: ExpressionType, + **named_expressions: ExpressionType, + ): + if expressions and named_expressions: + raise ValueError( + "this method supports positional or keyword arguments but not both" + ) + super().__init__(parent) + self._expressions = expressions or named_expressions + self._grouping_expressions: Optional[Tuple[ExpressionType, ...]] = None + + def by(self, *grouping_expressions: ExpressionType) -> "Stats": + self._grouping_expressions = grouping_expressions + return self + + def _render_internal(self) -> str: + if isinstance(self._expressions, dict): + exprs = [f"{key} = {value}" for key, value in self._expressions.items()] + else: + exprs = [f"{expr}" for expr in self._expressions] + expression_separator = ",\n " + by = ( + "" + if self._grouping_expressions is None + else f'\n BY {", ".join([f"{expr}" for expr in self._grouping_expressions])}' + ) + return f'STATS {expression_separator.join([f"{expr}" for expr in exprs])}{by}' + + +class Where(ESQLBase): + """Implementation of the ``WHERE`` processing command. + + This class inherits from :class:`ESQLBase `, + to make it possible to chain all the commands that belong to an ES|QL query + in a single expression. + """ + + def __init__(self, parent: ESQLBase, *expressions: ExpressionType): + super().__init__(parent) + self._expressions = expressions + + def _render_internal(self) -> str: + return f'WHERE {" AND ".join([f"{expr}" for expr in self._expressions])}' + + +def and_(*expressions: InstrumentedExpression) -> "InstrumentedExpression": + """Combine two or more expressions with the AND operator.""" + return InstrumentedExpression(" AND ".join([f"({expr})" for expr in expressions])) + + +def or_(*expressions: InstrumentedExpression) -> "InstrumentedExpression": + """Combine two or more expressions with the OR operator.""" + return InstrumentedExpression(" OR ".join([f"({expr})" for expr in expressions])) + + +def not_(expression: InstrumentedExpression) -> "InstrumentedExpression": + """Negate an expression.""" + return InstrumentedExpression(f"NOT ({expression})") diff --git a/elasticsearch/esql/functions.py b/elasticsearch/esql/functions.py new file mode 100644 index 000000000..515e3ddfc --- /dev/null +++ b/elasticsearch/esql/functions.py @@ -0,0 +1,1738 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import json +from typing import Any + +from elasticsearch.dsl.document_base import InstrumentedExpression +from elasticsearch.esql.esql import ExpressionType + + +def _render(v: Any) -> str: + return json.dumps(v) if not isinstance(v, InstrumentedExpression) else str(v) + + +def abs(number: ExpressionType) -> InstrumentedExpression: + """Returns the absolute value. + + :param number: Numeric expression. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"ABS({_render(number)})") + + +def acos(number: ExpressionType) -> InstrumentedExpression: + """Returns the arccosine of `n` as an angle, expressed in radians. + + :param number: Number between -1 and 1. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"ACOS({_render(number)})") + + +def asin(number: ExpressionType) -> InstrumentedExpression: + """Returns the arcsine of the input numeric expression as an angle, + expressed in radians. + + :param number: Number between -1 and 1. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"ASIN({_render(number)})") + + +def atan(number: ExpressionType) -> InstrumentedExpression: + """Returns the arctangent of the input numeric expression as an angle, + expressed in radians. + + :param number: Numeric expression. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"ATAN({_render(number)})") + + +def atan2( + y_coordinate: ExpressionType, x_coordinate: ExpressionType +) -> InstrumentedExpression: + """The angle between the positive x-axis and the ray from the origin to the + point (x , y) in the Cartesian plane, expressed in radians. + + :param y_coordinate: y coordinate. If `null`, the function returns `null`. + :param x_coordinate: x coordinate. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"ATAN2({y_coordinate}, {x_coordinate})") + + +def avg(number: ExpressionType) -> InstrumentedExpression: + """The average of a numeric field. + + :param number: Expression that outputs values to average. + """ + return InstrumentedExpression(f"AVG({_render(number)})") + + +def avg_over_time(number: ExpressionType) -> InstrumentedExpression: + """The average over time of a numeric field. + + :param number: Expression that outputs values to average. + """ + return InstrumentedExpression(f"AVG_OVER_TIME({_render(number)})") + + +def bit_length(string: ExpressionType) -> InstrumentedExpression: + """Returns the bit length of a string. + + :param string: String expression. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"BIT_LENGTH({_render(string)})") + + +def bucket( + field: ExpressionType, + buckets: ExpressionType, + from_: ExpressionType, + to: ExpressionType, +) -> InstrumentedExpression: + """Creates groups of values - buckets - out of a datetime or numeric input. + The size of the buckets can either be provided directly, or chosen based on + a recommended count and values range. + + :param field: Numeric or date expression from which to derive buckets. + :param buckets: Target number of buckets, or desired bucket size if `from` + and `to` parameters are omitted. + :param from_: Start of the range. Can be a number, a date or a date expressed + as a string. + :param to: End of the range. Can be a number, a date or a date expressed as a string. + """ + return InstrumentedExpression( + f"BUCKET({_render(field)}, {_render(buckets)}, {from_}, {_render(to)})" + ) + + +def byte_length(string: ExpressionType) -> InstrumentedExpression: + """Returns the byte length of a string. + + :param string: String expression. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"BYTE_LENGTH({_render(string)})") + + +def case(*conditions: ExpressionType) -> InstrumentedExpression: + """Accepts pairs of conditions and values. The function returns the value + that belongs to the first condition that evaluates to `true`. If the + number of arguments is odd, the last argument is the default value which is + returned when no condition matches. If the number of arguments is even, and + no condition matches, the function returns `null`. + """ + return InstrumentedExpression( + f'CASE({", ".join([_render(c) for c in conditions])})' + ) + + +def categorize(field: ExpressionType) -> InstrumentedExpression: + """Groups text messages into categories of similarly formatted text values. + + :param field: Expression to categorize + """ + return InstrumentedExpression(f"CATEGORIZE({_render(field)})") + + +def cbrt(number: ExpressionType) -> InstrumentedExpression: + """Returns the cube root of a number. The input can be any numeric value, + the return value is always a double. Cube roots of infinities are null. + + :param number: Numeric expression. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"CBRT({_render(number)})") + + +def ceil(number: ExpressionType) -> InstrumentedExpression: + """Round a number up to the nearest integer. + + :param number: Numeric expression. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"CEIL({_render(number)})") + + +def cidr_match(ip: ExpressionType, block_x: ExpressionType) -> InstrumentedExpression: + """Returns true if the provided IP is contained in one of the provided CIDR blocks. + + :param ip: IP address of type `ip` (both IPv4 and IPv6 are supported). + :param block_x: CIDR block to test the IP against. + """ + return InstrumentedExpression(f"CIDR_MATCH({_render(ip)}, {block_x})") + + +def coalesce(first: ExpressionType, rest: ExpressionType) -> InstrumentedExpression: + """Returns the first of its arguments that is not null. If all arguments + are null, it returns `null`. + + :param first: Expression to evaluate. + :param rest: Other expression to evaluate. + """ + return InstrumentedExpression(f"COALESCE({_render(first)}, {_render(rest)})") + + +def concat(*strings: ExpressionType) -> InstrumentedExpression: + """Concatenates two or more strings.""" + return InstrumentedExpression( + f'CONCAT({", ".join([f"{_render(s)}" for s in strings])})' + ) + + +def cos(angle: ExpressionType) -> InstrumentedExpression: + """Returns the cosine of an angle. + + :param angle: An angle, in radians. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"COS({_render(angle)})") + + +def cosh(number: ExpressionType) -> InstrumentedExpression: + """Returns the hyperbolic cosine of a number. + + :param number: Numeric expression. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"COSH({_render(number)})") + + +def count(field: ExpressionType) -> InstrumentedExpression: + """Returns the total number (count) of input values. + + :param field: Expression that outputs values to be counted. If omitted, + equivalent to `COUNT(*)` (the number of rows). + """ + return InstrumentedExpression(f"COUNT({_render(field)})") + + +def count_distinct( + field: ExpressionType, precision: ExpressionType +) -> InstrumentedExpression: + """Returns the approximate number of distinct values. + + :param field: Column or literal for which to count the number of distinct values. + :param precision: Precision threshold. The maximum supported value is 40000. Thresholds + above this number will have the same effect as a threshold of 40000. + The default value is 3000. + """ + return InstrumentedExpression( + f"COUNT_DISTINCT({_render(field)}, {_render(precision)})" + ) + + +def count_distinct_over_time( + field: ExpressionType, precision: ExpressionType +) -> InstrumentedExpression: + """The count of distinct values over time for a field. + + :param field: + :param precision: Precision threshold. The maximum supported value is 40000. Thresholds + above this number will have the same effect as a threshold of 40000. The + default value is 3000. + """ + return InstrumentedExpression( + f"COUNT_DISTINCT_OVER_TIME({_render(field)}, {_render(precision)})" + ) + + +def count_over_time(field: ExpressionType) -> InstrumentedExpression: + """The count over time value of a field. + + :param field: + """ + return InstrumentedExpression(f"COUNT_OVER_TIME({_render(field)})") + + +def date_diff( + unit: ExpressionType, start_timestamp: ExpressionType, end_timestamp: ExpressionType +) -> InstrumentedExpression: + """Subtracts the `startTimestamp` from the `endTimestamp` and returns the + difference in multiples of `unit`. If `startTimestamp` is later than the + `endTimestamp`, negative values are returned. + + :param unit: Time difference unit + :param start_timestamp: A string representing a start timestamp + :param end_timestamp: A string representing an end timestamp + """ + return InstrumentedExpression( + f"DATE_DIFF({_render(unit)}, {start_timestamp}, {end_timestamp})" + ) + + +def date_extract( + date_part: ExpressionType, date: ExpressionType +) -> InstrumentedExpression: + """Extracts parts of a date, like year, month, day, hour. + + :param date_part: Part of the date to extract. Can be: + `aligned_day_of_week_in_month`, `aligned_day_of_week_in_year`, + `aligned_week_of_month`, `aligned_week_of_year`, `ampm_of_day`, + `clock_hour_of_ampm`, `clock_hour_of_day`, `day_of_month`, `day_of_week`, + `day_of_year`, `epoch_day`, `era`, `hour_of_ampm`, `hour_of_day`, + `instant_seconds`, `micro_of_day`, `micro_of_second`, `milli_of_day`, + `milli_of_second`, `minute_of_day`, `minute_of_hour`, `month_of_year`, + `nano_of_day`, `nano_of_second`, `offset_seconds`, `proleptic_month`, + `second_of_day`, `second_of_minute`, `year`, or `year_of_era`. If `null`, + the function returns `null`. + :param date: Date expression. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"DATE_EXTRACT({date_part}, {_render(date)})") + + +def date_format( + date: ExpressionType, + date_format: ExpressionType = None, +) -> InstrumentedExpression: + """Returns a string representation of a date, in the provided format. + + :param date_format: Date format (optional). If no format is specified, the + `yyyy-MM-dd'T'HH:mm:ss.SSSZ` format is used. If `null`, the + function returns `null`. + :param date: Date expression. If `null`, the function returns `null`. + """ + if date_format is not None: + return InstrumentedExpression( + f"DATE_FORMAT({json.dumps(date_format)}, {_render(date)})" + ) + else: + return InstrumentedExpression(f"DATE_FORMAT({_render(date)})") + + +def date_parse( + date_pattern: ExpressionType, date_string: ExpressionType +) -> InstrumentedExpression: + """Returns a date by parsing the second argument using the format specified + in the first argument. + + :param date_pattern: The date format. If `null`, the function returns `null`. + :param date_string: Date expression as a string. If `null` or an empty + string, the function returns `null`. + """ + return InstrumentedExpression(f"DATE_PARSE({date_pattern}, {date_string})") + + +def date_trunc( + interval: ExpressionType, date: ExpressionType +) -> InstrumentedExpression: + """Rounds down a date to the closest interval since epoch, which starts at `0001-01-01T00:00:00Z`. + + :param interval: Interval; expressed using the timespan literal syntax. + :param date: Date expression + """ + return InstrumentedExpression(f"DATE_TRUNC({_render(interval)}, {_render(date)})") + + +def e() -> InstrumentedExpression: + """Returns Euler’s number).""" + return InstrumentedExpression("E()") + + +def ends_with(str: ExpressionType, suffix: ExpressionType) -> InstrumentedExpression: + """Returns a boolean that indicates whether a keyword string ends with + another string. + + :param str: String expression. If `null`, the function returns `null`. + :param suffix: String expression. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"ENDS_WITH({_render(str)}, {_render(suffix)})") + + +def exp(number: ExpressionType) -> InstrumentedExpression: + """Returns the value of e raised to the power of the given number. + + :param number: Numeric expression. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"EXP({_render(number)})") + + +def first_over_time(field: ExpressionType) -> InstrumentedExpression: + """The earliest value of a field, where recency determined by the + `@timestamp` field. + + :param field: + """ + return InstrumentedExpression(f"FIRST_OVER_TIME({_render(field)})") + + +def floor(number: ExpressionType) -> InstrumentedExpression: + """Round a number down to the nearest integer. + + :param number: Numeric expression. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"FLOOR({_render(number)})") + + +def from_base64(string: ExpressionType) -> InstrumentedExpression: + """Decode a base64 string. + + :param string: A base64 string. + """ + return InstrumentedExpression(f"FROM_BASE64({_render(string)})") + + +def greatest(first: ExpressionType, rest: ExpressionType) -> InstrumentedExpression: + """Returns the maximum value from multiple columns. This is similar to + `MV_MAX` except it is intended to run on multiple columns at once. + + :param first: First of the columns to evaluate. + :param rest: The rest of the columns to evaluate. + """ + return InstrumentedExpression(f"GREATEST({_render(first)}, {_render(rest)})") + + +def hash(algorithm: ExpressionType, input: ExpressionType) -> InstrumentedExpression: + """Computes the hash of the input using various algorithms such as MD5, + SHA, SHA-224, SHA-256, SHA-384, SHA-512. + + :param algorithm: Hash algorithm to use. + :param input: Input to hash. + """ + return InstrumentedExpression(f"HASH({_render(algorithm)}, {_render(input)})") + + +def hypot(number1: ExpressionType, number2: ExpressionType) -> InstrumentedExpression: + """Returns the hypotenuse of two numbers. The input can be any numeric + values, the return value is always a double. Hypotenuses of infinities are null. + + :param number1: Numeric expression. If `null`, the function returns `null`. + :param number2: Numeric expression. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"HYPOT({number1}, {number2})") + + +def ip_prefix( + ip: ExpressionType, + prefix_length_v4: ExpressionType, + prefix_length_v6: ExpressionType, +) -> InstrumentedExpression: + """Truncates an IP to a given prefix length. + + :param ip: IP address of type `ip` (both IPv4 and IPv6 are supported). + :param prefix_length_v4: Prefix length for IPv4 addresses. + :param prefix_length_v6: Prefix length for IPv6 addresses. + """ + return InstrumentedExpression( + f"IP_PREFIX({_render(ip)}, {prefix_length_v4}, {prefix_length_v6})" + ) + + +def knn( + field: ExpressionType, query: ExpressionType, options: ExpressionType = None +) -> InstrumentedExpression: + """Finds the k nearest vectors to a query vector, as measured by a + similarity metric. knn function finds nearest vectors through approximate + search on indexed dense_vectors. + + :param field: Field that the query will target. + :param query: Vector value to find top nearest neighbours for. + :param options: (Optional) kNN additional options as function named parameters. + """ + if options is not None: + return InstrumentedExpression( + f"KNN({_render(field)}, {_render(query)}, {_render(options)})" + ) + else: + return InstrumentedExpression(f"KNN({_render(field)}, {_render(query)})") + + +def kql(query: ExpressionType) -> InstrumentedExpression: + """Performs a KQL query. Returns true if the provided KQL query string + matches the row. + + :param query: Query string in KQL query string format. + """ + return InstrumentedExpression(f"KQL({_render(query)})") + + +def last_over_time(field: ExpressionType) -> InstrumentedExpression: + """The latest value of a field, where recency determined by the + `@timestamp` field. + + :param field: + """ + return InstrumentedExpression(f"LAST_OVER_TIME({_render(field)})") + + +def least(first: ExpressionType, rest: ExpressionType) -> InstrumentedExpression: + """Returns the minimum value from multiple columns. This is similar to + `MV_MIN` except it is intended to run on multiple columns at once. + + :param first: First of the columns to evaluate. + :param rest: The rest of the columns to evaluate. + """ + return InstrumentedExpression(f"LEAST({_render(first)}, {_render(rest)})") + + +def left(string: ExpressionType, length: ExpressionType) -> InstrumentedExpression: + """Returns the substring that extracts *length* chars from *string* + starting from the left. + + :param string: The string from which to return a substring. + :param length: The number of characters to return. + """ + return InstrumentedExpression(f"LEFT({_render(string)}, {_render(length)})") + + +def length(string: ExpressionType) -> InstrumentedExpression: + """Returns the character length of a string. + + :param string: String expression. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"LENGTH({_render(string)})") + + +def locate( + string: ExpressionType, substring: ExpressionType, start: ExpressionType +) -> InstrumentedExpression: + """Returns an integer that indicates the position of a keyword substring + within another string. Returns `0` if the substring cannot be found. Note + that string positions start from `1`. + + :param string: An input string + :param substring: A substring to locate in the input string + :param start: The start index + """ + return InstrumentedExpression( + f"LOCATE({_render(string)}, {_render(substring)}, {_render(start)})" + ) + + +def log(base: ExpressionType, number: ExpressionType) -> InstrumentedExpression: + """Returns the logarithm of a value to a base. The input can be any numeric + value, the return value is always a double. Logs of zero, negative + numbers, and base of one return `null` as well as a warning. + + :param base: Base of logarithm. If `null`, the function returns `null`. If + not provided, this function returns the natural logarithm (base e) of a value. + :param number: Numeric expression. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"LOG({_render(base)}, {_render(number)})") + + +def log10(number: ExpressionType) -> InstrumentedExpression: + """Returns the logarithm of a value to base 10. The input can be any + numeric value, the return value is always a double. Logs of 0 and negative + numbers return `null` as well as a warning. + + :param number: Numeric expression. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"LOG10({_render(number)})") + + +def ltrim(string: ExpressionType) -> InstrumentedExpression: + """Removes leading whitespaces from a string. + + :param string: String expression. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"LTRIM({_render(string)})") + + +def match( + field: ExpressionType, query: ExpressionType, options: ExpressionType = None +) -> InstrumentedExpression: + """Use `MATCH` to perform a match query on the specified field. Using + `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL. + + :param field: Field that the query will target. + :param query: Value to find in the provided field. + :param options: (Optional) Match additional options as function named parameters. + """ + if options is not None: + return InstrumentedExpression( + f"MATCH({_render(field)}, {_render(query)}, {_render(options)})" + ) + else: + return InstrumentedExpression(f"MATCH({_render(field)}, {_render(query)})") + + +def match_phrase( + field: ExpressionType, query: ExpressionType, options: ExpressionType = None +) -> InstrumentedExpression: + """Use `MATCH_PHRASE` to perform a `match_phrase` on the specified field. + Using `MATCH_PHRASE` is equivalent to using the `match_phrase` query in the + Elasticsearch Query DSL. + + :param field: Field that the query will target. + :param query: Value to find in the provided field. + :param options: (Optional) MatchPhrase additional options as function named parameters. + """ + if options is not None: + return InstrumentedExpression( + f"MATCH_PHRASE({_render(field)}, {_render(query)}, {_render(options)})" + ) + else: + return InstrumentedExpression( + f"MATCH_PHRASE({_render(field)}, {_render(query)})" + ) + + +def max(field: ExpressionType) -> InstrumentedExpression: + """The maximum value of a field. + + :param field: + """ + return InstrumentedExpression(f"MAX({_render(field)})") + + +def max_over_time(field: ExpressionType) -> InstrumentedExpression: + """The maximum over time value of a field. + + :param field: + """ + return InstrumentedExpression(f"MAX_OVER_TIME({_render(field)})") + + +def md5(input: ExpressionType) -> InstrumentedExpression: + """Computes the MD5 hash of the input. + + :param input: Input to hash. + """ + return InstrumentedExpression(f"MD5({_render(input)})") + + +def median(number: ExpressionType) -> InstrumentedExpression: + """The value that is greater than half of all values and less than half of + all values, also known as the 50% `PERCENTILE`. + + :param number: Expression that outputs values to calculate the median of. + """ + return InstrumentedExpression(f"MEDIAN({_render(number)})") + + +def median_absolute_deviation(number: ExpressionType) -> InstrumentedExpression: + """Returns the median absolute deviation, a measure of variability. It is a + robust statistic, meaning that it is useful for describing data that may + have outliers, or may not be normally distributed. For such data it can be + more descriptive than standard deviation. It is calculated as the median + of each data point’s deviation from the median of the entire sample. That + is, for a random variable `X`, the median absolute deviation is + `median(|median(X) - X|)`. + + :param number: + """ + return InstrumentedExpression(f"MEDIAN_ABSOLUTE_DEVIATION({_render(number)})") + + +def min(field: ExpressionType) -> InstrumentedExpression: + """The minimum value of a field. + + :param field: + """ + return InstrumentedExpression(f"MIN({_render(field)})") + + +def min_over_time(field: ExpressionType) -> InstrumentedExpression: + """The minimum over time value of a field. + + :param field: + """ + return InstrumentedExpression(f"MIN_OVER_TIME({_render(field)})") + + +def multi_match( + query: ExpressionType, fields: ExpressionType, options: ExpressionType = None +) -> InstrumentedExpression: + """Use `MULTI_MATCH` to perform a multi-match query on the specified field. + The multi_match query builds on the match query to allow multi-field queries. + + :param query: Value to find in the provided fields. + :param fields: Fields to use for matching + :param options: (Optional) Additional options for MultiMatch, passed as function + named parameters + """ + if options is not None: + return InstrumentedExpression( + f"MULTI_MATCH({_render(query)}, {_render(fields)}, {_render(options)})" + ) + else: + return InstrumentedExpression( + f"MULTI_MATCH({_render(query)}, {_render(fields)})" + ) + + +def mv_append(field1: ExpressionType, field2: ExpressionType) -> InstrumentedExpression: + """Concatenates values of two multi-value fields. + + :param field1: + :param field2: + """ + return InstrumentedExpression(f"MV_APPEND({field1}, {field2})") + + +def mv_avg(number: ExpressionType) -> InstrumentedExpression: + """Converts a multivalued field into a single valued field containing the + average of all of the values. + + :param number: Multivalue expression. + """ + return InstrumentedExpression(f"MV_AVG({_render(number)})") + + +def mv_concat(string: ExpressionType, delim: ExpressionType) -> InstrumentedExpression: + """Converts a multivalued string expression into a single valued column + containing the concatenation of all values separated by a delimiter. + + :param string: Multivalue expression. + :param delim: Delimiter. + """ + return InstrumentedExpression(f"MV_CONCAT({_render(string)}, {_render(delim)})") + + +def mv_count(field: ExpressionType) -> InstrumentedExpression: + """Converts a multivalued expression into a single valued column containing + a count of the number of values. + + :param field: Multivalue expression. + """ + return InstrumentedExpression(f"MV_COUNT({_render(field)})") + + +def mv_dedupe(field: ExpressionType) -> InstrumentedExpression: + """Remove duplicate values from a multivalued field. + + :param field: Multivalue expression. + """ + return InstrumentedExpression(f"MV_DEDUPE({_render(field)})") + + +def mv_first(field: ExpressionType) -> InstrumentedExpression: + """Converts a multivalued expression into a single valued column containing + the first value. This is most useful when reading from a function that + emits multivalued columns in a known order like `SPLIT`. + + :param field: Multivalue expression. + """ + return InstrumentedExpression(f"MV_FIRST({_render(field)})") + + +def mv_last(field: ExpressionType) -> InstrumentedExpression: + """Converts a multivalue expression into a single valued column containing + the last value. This is most useful when reading from a function that emits + multivalued columns in a known order like `SPLIT`. + + :param field: Multivalue expression. + """ + return InstrumentedExpression(f"MV_LAST({_render(field)})") + + +def mv_max(field: ExpressionType) -> InstrumentedExpression: + """Converts a multivalued expression into a single valued column containing + the maximum value. + + :param field: Multivalue expression. + """ + return InstrumentedExpression(f"MV_MAX({_render(field)})") + + +def mv_median(number: ExpressionType) -> InstrumentedExpression: + """Converts a multivalued field into a single valued field containing the + median value. + + :param number: Multivalue expression. + """ + return InstrumentedExpression(f"MV_MEDIAN({_render(number)})") + + +def mv_median_absolute_deviation(number: ExpressionType) -> InstrumentedExpression: + """Converts a multivalued field into a single valued field containing the + median absolute deviation. It is calculated as the median of each data + point’s deviation from the median of the entire sample. That is, for a + random variable `X`, the median absolute deviation is `median(|median(X) - X|)`. + + :param number: Multivalue expression. + """ + return InstrumentedExpression(f"MV_MEDIAN_ABSOLUTE_DEVIATION({_render(number)})") + + +def mv_min(field: ExpressionType) -> InstrumentedExpression: + """Converts a multivalued expression into a single valued column containing + the minimum value. + + :param field: Multivalue expression. + """ + return InstrumentedExpression(f"MV_MIN({_render(field)})") + + +def mv_percentile( + number: ExpressionType, percentile: ExpressionType +) -> InstrumentedExpression: + """Converts a multivalued field into a single valued field containing the + value at which a certain percentage of observed values occur. + + :param number: Multivalue expression. + :param percentile: The percentile to calculate. Must be a number between 0 + and 100. Numbers out of range will return a null instead. + """ + return InstrumentedExpression( + f"MV_PERCENTILE({_render(number)}, {_render(percentile)})" + ) + + +def mv_pseries_weighted_sum( + number: ExpressionType, p: ExpressionType +) -> InstrumentedExpression: + """Converts a multivalued expression into a single-valued column by + multiplying every element on the input list by its corresponding term in + P-Series and computing the sum. + + :param number: Multivalue expression. + :param p: It is a constant number that represents the *p* parameter in the + P-Series. It impacts every element’s contribution to the weighted sum. + """ + return InstrumentedExpression( + f"MV_PSERIES_WEIGHTED_SUM({_render(number)}, {_render(p)})" + ) + + +def mv_slice( + field: ExpressionType, start: ExpressionType, end: ExpressionType = None +) -> InstrumentedExpression: + """Returns a subset of the multivalued field using the start and end index + values. This is most useful when reading from a function that emits + multivalued columns in a known order like `SPLIT` or `MV_SORT`. + + :param field: Multivalue expression. If `null`, the function returns `null`. + :param start: Start position. If `null`, the function returns `null`. The + start argument can be negative. An index of -1 is used to specify + the last value in the list. + :param end: End position(included). Optional; if omitted, the position at + `start` is returned. The end argument can be negative. An index of -1 + is used to specify the last value in the list. + """ + if end is not None: + return InstrumentedExpression( + f"MV_SLICE({_render(field)}, {_render(start)}, {_render(end)})" + ) + else: + return InstrumentedExpression(f"MV_SLICE({_render(field)}, {_render(start)})") + + +def mv_sort(field: ExpressionType, order: ExpressionType) -> InstrumentedExpression: + """Sorts a multivalued field in lexicographical order. + + :param field: Multivalue expression. If `null`, the function returns `null`. + :param order: Sort order. The valid options are ASC and DESC, the default is ASC. + """ + return InstrumentedExpression(f"MV_SORT({_render(field)}, {_render(order)})") + + +def mv_sum(number: ExpressionType) -> InstrumentedExpression: + """Converts a multivalued field into a single valued field containing the + sum of all of the values. + + :param number: Multivalue expression. + """ + return InstrumentedExpression(f"MV_SUM({_render(number)})") + + +def mv_zip( + string1: ExpressionType, string2: ExpressionType, delim: ExpressionType = None +) -> InstrumentedExpression: + """Combines the values from two multivalued fields with a delimiter that + joins them together. + + :param string1: Multivalue expression. + :param string2: Multivalue expression. + :param delim: Delimiter. Optional; if omitted, `,` is used as a default delimiter. + """ + if delim is not None: + return InstrumentedExpression(f"MV_ZIP({string1}, {string2}, {_render(delim)})") + else: + return InstrumentedExpression(f"MV_ZIP({string1}, {string2})") + + +def now() -> InstrumentedExpression: + """Returns current date and time.""" + return InstrumentedExpression("NOW()") + + +def percentile( + number: ExpressionType, percentile: ExpressionType +) -> InstrumentedExpression: + """Returns the value at which a certain percentage of observed values + occur. For example, the 95th percentile is the value which is greater than + 95% of the observed values and the 50th percentile is the `MEDIAN`. + + :param number: + :param percentile: + """ + return InstrumentedExpression( + f"PERCENTILE({_render(number)}, {_render(percentile)})" + ) + + +def pi() -> InstrumentedExpression: + """Returns Pi, the ratio of a circle’s circumference to its diameter.""" + return InstrumentedExpression("PI()") + + +def pow(base: ExpressionType, exponent: ExpressionType) -> InstrumentedExpression: + """Returns the value of `base` raised to the power of `exponent`. + + :param base: Numeric expression for the base. If `null`, the function returns `null`. + :param exponent: Numeric expression for the exponent. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"POW({_render(base)}, {_render(exponent)})") + + +def qstr( + query: ExpressionType, options: ExpressionType = None +) -> InstrumentedExpression: + """Performs a query string query. Returns true if the provided query string + matches the row. + + :param query: Query string in Lucene query string format. + :param options: (Optional) Additional options for Query String as function named + parameters. + """ + if options is not None: + return InstrumentedExpression(f"QSTR({_render(query)}, {_render(options)})") + else: + return InstrumentedExpression(f"QSTR({_render(query)})") + + +def rate(field: ExpressionType) -> InstrumentedExpression: + """The rate of a counter field. + + :param field: + """ + return InstrumentedExpression(f"RATE({_render(field)})") + + +def repeat(string: ExpressionType, number: ExpressionType) -> InstrumentedExpression: + """Returns a string constructed by concatenating `string` with itself the + specified `number` of times. + + :param string: String expression. + :param number: Number times to repeat. + """ + return InstrumentedExpression(f"REPEAT({_render(string)}, {_render(number)})") + + +def replace( + string: ExpressionType, regex: ExpressionType, new_string: ExpressionType +) -> InstrumentedExpression: + """The function substitutes in the string `str` any match of the regular + expression `regex` with the replacement string `newStr`. + + :param string: String expression. + :param regex: Regular expression. + :param new_string: Replacement string. + """ + return InstrumentedExpression( + f"REPLACE({_render(string)}, {_render(regex)}, {new_string})" + ) + + +def reverse(str: ExpressionType) -> InstrumentedExpression: + """Returns a new string representing the input string in reverse order. + + :param str: String expression. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"REVERSE({_render(str)})") + + +def right(string: ExpressionType, length: ExpressionType) -> InstrumentedExpression: + """Return the substring that extracts *length* chars from *str* starting + from the right. + + :param string: The string from which to returns a substring. + :param length: The number of characters to return. + """ + return InstrumentedExpression(f"RIGHT({_render(string)}, {_render(length)})") + + +def round( + number: ExpressionType, decimals: ExpressionType = None +) -> InstrumentedExpression: + """Rounds a number to the specified number of decimal places. Defaults to + 0, which returns the nearest integer. If the precision is a negative + number, rounds to the number of digits left of the decimal point. + + :param number: The numeric value to round. If `null`, the function returns `null`. + :param decimals: The number of decimal places to round to. Defaults to 0. If + `null`, the function returns `null`. + """ + if decimals is not None: + return InstrumentedExpression(f"ROUND({_render(number)}, {_render(decimals)})") + else: + return InstrumentedExpression(f"ROUND({_render(number)})") + + +def round_to(field: ExpressionType, points: ExpressionType) -> InstrumentedExpression: + """Rounds down to one of a list of fixed points. + + :param field: The numeric value to round. If `null`, the function returns `null`. + :param points: Remaining rounding points. Must be constants. + """ + return InstrumentedExpression(f"ROUND_TO({_render(field)}, {_render(points)})") + + +def rtrim(string: ExpressionType) -> InstrumentedExpression: + """Removes trailing whitespaces from a string. + + :param string: String expression. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"RTRIM({_render(string)})") + + +def sample(field: ExpressionType, limit: ExpressionType) -> InstrumentedExpression: + """Collects sample values for a field. + + :param field: The field to collect sample values for. + :param limit: The maximum number of values to collect. + """ + return InstrumentedExpression(f"SAMPLE({_render(field)}, {_render(limit)})") + + +def scalb(d: ExpressionType, scale_factor: ExpressionType) -> InstrumentedExpression: + """Returns the result of `d * 2 ^ scaleFactor`, Similar to Java's `scalb` + function. Result is rounded as if performed by a single correctly rounded + floating-point multiply to a member of the double value set. + + :param d: Numeric expression for the multiplier. If `null`, the function + returns `null`. + :param scale_factor: Numeric expression for the scale factor. If `null`, the + function returns `null`. + """ + return InstrumentedExpression(f"SCALB({_render(d)}, {scale_factor})") + + +def sha1(input: ExpressionType) -> InstrumentedExpression: + """Computes the SHA1 hash of the input. + + :param input: Input to hash. + """ + return InstrumentedExpression(f"SHA1({_render(input)})") + + +def sha256(input: ExpressionType) -> InstrumentedExpression: + """Computes the SHA256 hash of the input. + + :param input: Input to hash. + """ + return InstrumentedExpression(f"SHA256({_render(input)})") + + +def signum(number: ExpressionType) -> InstrumentedExpression: + """Returns the sign of the given number. It returns `-1` for negative + numbers, `0` for `0` and `1` for positive numbers. + + :param number: Numeric expression. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"SIGNUM({_render(number)})") + + +def sin(angle: ExpressionType) -> InstrumentedExpression: + """Returns the sine of an angle. + + :param angle: An angle, in radians. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"SIN({_render(angle)})") + + +def sinh(number: ExpressionType) -> InstrumentedExpression: + """Returns the hyperbolic sine of a number. + + :param number: Numeric expression. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"SINH({_render(number)})") + + +def space(number: ExpressionType) -> InstrumentedExpression: + """Returns a string made of `number` spaces. + + :param number: Number of spaces in result. + """ + return InstrumentedExpression(f"SPACE({_render(number)})") + + +def split(string: ExpressionType, delim: ExpressionType) -> InstrumentedExpression: + """Split a single valued string into multiple strings. + + :param string: String expression. If `null`, the function returns `null`. + :param delim: Delimiter. Only single byte delimiters are currently supported. + """ + return InstrumentedExpression(f"SPLIT({_render(string)}, {_render(delim)})") + + +def sqrt(number: ExpressionType) -> InstrumentedExpression: + """Returns the square root of a number. The input can be any numeric value, + the return value is always a double. Square roots of negative numbers and + infinities are null. + + :param number: Numeric expression. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"SQRT({_render(number)})") + + +def starts_with(str: ExpressionType, prefix: ExpressionType) -> InstrumentedExpression: + """Returns a boolean that indicates whether a keyword string starts with + another string. + + :param str: String expression. If `null`, the function returns `null`. + :param prefix: String expression. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"STARTS_WITH({_render(str)}, {_render(prefix)})") + + +def std_dev(number: ExpressionType) -> InstrumentedExpression: + """The population standard deviation of a numeric field. + + :param number: + """ + return InstrumentedExpression(f"STD_DEV({_render(number)})") + + +def st_centroid_agg(field: ExpressionType) -> InstrumentedExpression: + """Calculate the spatial centroid over a field with spatial point geometry type. + + :param field: + """ + return InstrumentedExpression(f"ST_CENTROID_AGG({_render(field)})") + + +def st_contains( + geom_a: ExpressionType, geom_b: ExpressionType +) -> InstrumentedExpression: + """Returns whether the first geometry contains the second geometry. This is + the inverse of the ST_WITHIN function. + + :param geom_a: Expression of type `geo_point`, `cartesian_point`, + `geo_shape` or `cartesian_shape`. If `null`, the function returns + `null`. + :param geom_b: Expression of type `geo_point`, `cartesian_point`, `geo_shape` + or `cartesian_shape`. If `null`, the function returns `null`. The + second parameter must also have the same coordinate system as the + first. This means it is not possible to combine `geo_*` and + `cartesian_*` parameters. + """ + return InstrumentedExpression(f"ST_CONTAINS({geom_a}, {geom_b})") + + +def st_disjoint( + geom_a: ExpressionType, geom_b: ExpressionType +) -> InstrumentedExpression: + """Returns whether the two geometries or geometry columns are disjoint. + This is the inverse of the ST_INTERSECTS function. In mathematical terms: + ST_Disjoint(A, B) ⇔ A ⋂ B = ∅ + + :param geom_a: Expression of type `geo_point`, `cartesian_point`, + `geo_shape` or `cartesian_shape`. If `null`, the function returns + `null`. + :param geom_b: Expression of type `geo_point`, `cartesian_point`, `geo_shape` + or `cartesian_shape`. If `null`, the function returns `null`. The + second parameter must also have the same coordinate system as the + first. This means it is not possible to combine `geo_*` and + `cartesian_*` parameters. + """ + return InstrumentedExpression(f"ST_DISJOINT({geom_a}, {geom_b})") + + +def st_distance( + geom_a: ExpressionType, geom_b: ExpressionType +) -> InstrumentedExpression: + """Computes the distance between two points. For cartesian geometries, this + is the pythagorean distance in the same units as the original coordinates. + For geographic geometries, this is the circular distance along the great + circle in meters. + + :param geom_a: Expression of type `geo_point` or `cartesian_point`. If + `null`, the function returns `null`. + :param geom_b: Expression of type `geo_point` or `cartesian_point`. If + `null`, the function returns `null`. The second parameter must + also have the same coordinate system as the first. This means it + is not possible to combine `geo_point` and `cartesian_point` parameters. + """ + return InstrumentedExpression(f"ST_DISTANCE({geom_a}, {geom_b})") + + +def st_envelope(geometry: ExpressionType) -> InstrumentedExpression: + """Determines the minimum bounding box of the supplied geometry. + + :param geometry: Expression of type `geo_point`, `geo_shape`, + `cartesian_point` or `cartesian_shape`. If `null`, the function + returns `null`. + """ + return InstrumentedExpression(f"ST_ENVELOPE({_render(geometry)})") + + +def st_extent_agg(field: ExpressionType) -> InstrumentedExpression: + """Calculate the spatial extent over a field with geometry type. Returns a + bounding box for all values of the field. + + :param field: + """ + return InstrumentedExpression(f"ST_EXTENT_AGG({_render(field)})") + + +def st_geohash( + geometry: ExpressionType, precision: ExpressionType, bounds: ExpressionType = None +) -> InstrumentedExpression: + """Calculates the `geohash` of the supplied geo_point at the specified + precision. The result is long encoded. Use ST_GEOHASH_TO_STRING to convert + the result to a string. These functions are related to the `geo_grid` + query and the `geohash_grid` aggregation. + + :param geometry: Expression of type `geo_point`. If `null`, the function + returns `null`. + :param precision: Expression of type `integer`. If `null`, the function + returns `null`. Valid values are between 1 and 12. + :param bounds: Optional bounds to filter the grid tiles, a `geo_shape` of + type `BBOX`. Use `ST_ENVELOPE` if the `geo_shape` is of any + other type. + """ + if bounds is not None: + return InstrumentedExpression( + f"ST_GEOHASH({_render(geometry)}, {_render(precision)}, {_render(bounds)})" + ) + else: + return InstrumentedExpression( + f"ST_GEOHASH({_render(geometry)}, {_render(precision)})" + ) + + +def st_geohash_to_long(grid_id: ExpressionType) -> InstrumentedExpression: + """Converts an input value representing a geohash grid-ID in string format + into a long. + + :param grid_id: Input geohash grid-id. The input can be a single- or + multi-valued column or an expression. + """ + return InstrumentedExpression(f"ST_GEOHASH_TO_LONG({grid_id})") + + +def st_geohash_to_string(grid_id: ExpressionType) -> InstrumentedExpression: + """Converts an input value representing a geohash grid-ID in long format + into a string. + + :param grid_id: Input geohash grid-id. The input can be a single- or + multi-valued column or an expression. + """ + return InstrumentedExpression(f"ST_GEOHASH_TO_STRING({grid_id})") + + +def st_geohex( + geometry: ExpressionType, precision: ExpressionType, bounds: ExpressionType = None +) -> InstrumentedExpression: + """Calculates the `geohex`, the H3 cell-id, of the supplied geo_point at + the specified precision. The result is long encoded. Use + ST_GEOHEX_TO_STRING to convert the result to a string. These functions are + related to the `geo_grid` query and the `geohex_grid` aggregation. + + :param geometry: Expression of type `geo_point`. If `null`, the function + returns `null`. + :param precision: Expression of type `integer`. If `null`, the function + returns `null`. Valid values are between 0 and 15. + :param bounds: Optional bounds to filter the grid tiles, a `geo_shape` of + type `BBOX`. Use `ST_ENVELOPE` if the `geo_shape` + is of any other type. + """ + if bounds is not None: + return InstrumentedExpression( + f"ST_GEOHEX({_render(geometry)}, {_render(precision)}, {_render(bounds)})" + ) + else: + return InstrumentedExpression( + f"ST_GEOHEX({_render(geometry)}, {_render(precision)})" + ) + + +def st_geohex_to_long(grid_id: ExpressionType) -> InstrumentedExpression: + """Converts an input value representing a geohex grid-ID in string format + into a long. + + :param grid_id: Input geohex grid-id. The input can be a single- or + multi-valued column or an expression. + """ + return InstrumentedExpression(f"ST_GEOHEX_TO_LONG({grid_id})") + + +def st_geohex_to_string(grid_id: ExpressionType) -> InstrumentedExpression: + """Converts an input value representing a Geohex grid-ID in long format + into a string. + + :param grid_id: Input Geohex grid-id. The input can be a single- or + multi-valued column or an expression. + """ + return InstrumentedExpression(f"ST_GEOHEX_TO_STRING({grid_id})") + + +def st_geotile( + geometry: ExpressionType, precision: ExpressionType, bounds: ExpressionType = None +) -> InstrumentedExpression: + """Calculates the `geotile` of the supplied geo_point at the specified + precision. The result is long encoded. Use ST_GEOTILE_TO_STRING to convert + the result to a string. These functions are related to the `geo_grid` + query and the `geotile_grid` aggregation. + + :param geometry: Expression of type `geo_point`. If `null`, the function + returns `null`. + :param precision: Expression of type `integer`. If `null`, the function + returns `null`. Valid values are between 0 and 29. + :param bounds: Optional bounds to filter the grid tiles, a `geo_shape` of + type `BBOX`. Use `ST_ENVELOPE` if the `geo_shape` is of any + other type. + """ + if bounds is not None: + return InstrumentedExpression( + f"ST_GEOTILE({_render(geometry)}, {_render(precision)}, {_render(bounds)})" + ) + else: + return InstrumentedExpression( + f"ST_GEOTILE({_render(geometry)}, {_render(precision)})" + ) + + +def st_geotile_to_long(grid_id: ExpressionType) -> InstrumentedExpression: + """Converts an input value representing a geotile grid-ID in string format + into a long. + + :param grid_id: Input geotile grid-id. The input can be a single- or + multi-valued column or an expression. + """ + return InstrumentedExpression(f"ST_GEOTILE_TO_LONG({grid_id})") + + +def st_geotile_to_string(grid_id: ExpressionType) -> InstrumentedExpression: + """Converts an input value representing a geotile grid-ID in long format + into a string. + + :param grid_id: Input geotile grid-id. The input can be a single- or + multi-valued column or an expression. + """ + return InstrumentedExpression(f"ST_GEOTILE_TO_STRING({grid_id})") + + +def st_intersects( + geom_a: ExpressionType, geom_b: ExpressionType +) -> InstrumentedExpression: + """Returns true if two geometries intersect. They intersect if they have + any point in common, including their interior points (points along lines or + within polygons). This is the inverse of the ST_DISJOINT function. In + mathematical terms: ST_Intersects(A, B) ⇔ A ⋂ B ≠ ∅ + + :param geom_a: Expression of type `geo_point`, `cartesian_point`, + `geo_shape` or `cartesian_shape`. If `null`, the function returns + `null`. + :param geom_b: Expression of type `geo_point`, `cartesian_point`, `geo_shape` + or `cartesian_shape`. If `null`, the function returns `null`. The + second parameter must also have the same coordinate system as the + first. This means it is not possible to combine `geo_*` and + `cartesian_*` parameters. + """ + return InstrumentedExpression(f"ST_INTERSECTS({geom_a}, {geom_b})") + + +def st_within(geom_a: ExpressionType, geom_b: ExpressionType) -> InstrumentedExpression: + """Returns whether the first geometry is within the second geometry. This + is the inverse of the ST_CONTAINS function. + + :param geom_a: Expression of type `geo_point`, `cartesian_point`, + `geo_shape` or `cartesian_shape`. If `null`, the function returns + `null`. + :param geom_b: Expression of type `geo_point`, `cartesian_point`, `geo_shape` + or `cartesian_shape`. If `null`, the function returns `null`. The + second parameter must also have the same coordinate system as the + first. This means it is not possible to combine `geo_*` and + `cartesian_*` parameters. + """ + return InstrumentedExpression(f"ST_WITHIN({geom_a}, {geom_b})") + + +def st_x(point: ExpressionType) -> InstrumentedExpression: + """Extracts the `x` coordinate from the supplied point. If the points is of + type `geo_point` this is equivalent to extracting the `longitude` value. + + :param point: Expression of type `geo_point` or `cartesian_point`. If + `null`, the function returns `null`. + """ + return InstrumentedExpression(f"ST_X({_render(point)})") + + +def st_xmax(point: ExpressionType) -> InstrumentedExpression: + """Extracts the maximum value of the `x` coordinates from the supplied + geometry. If the geometry is of type `geo_point` or `geo_shape` this is + equivalent to extracting the maximum `longitude` value. + + :param point: Expression of type `geo_point`, `geo_shape`, `cartesian_point` + or `cartesian_shape`. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"ST_XMAX({_render(point)})") + + +def st_xmin(point: ExpressionType) -> InstrumentedExpression: + """Extracts the minimum value of the `x` coordinates from the supplied + geometry. If the geometry is of type `geo_point` or `geo_shape` this is + equivalent to extracting the minimum `longitude` value. + + :param point: Expression of type `geo_point`, `geo_shape`, `cartesian_point` + or `cartesian_shape`. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"ST_XMIN({_render(point)})") + + +def st_y(point: ExpressionType) -> InstrumentedExpression: + """Extracts the `y` coordinate from the supplied point. If the points is of + type `geo_point` this is equivalent to extracting the `latitude` value. + + :param point: Expression of type `geo_point` or `cartesian_point`. If + `null`, the function returns `null`. + """ + return InstrumentedExpression(f"ST_Y({_render(point)})") + + +def st_ymax(point: ExpressionType) -> InstrumentedExpression: + """Extracts the maximum value of the `y` coordinates from the supplied + geometry. If the geometry is of type `geo_point` or `geo_shape` this is + equivalent to extracting the maximum `latitude` value. + + :param point: Expression of type `geo_point`, `geo_shape`, `cartesian_point` + or `cartesian_shape`. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"ST_YMAX({_render(point)})") + + +def st_ymin(point: ExpressionType) -> InstrumentedExpression: + """Extracts the minimum value of the `y` coordinates from the supplied + geometry. If the geometry is of type `geo_point` or `geo_shape` this is + equivalent to extracting the minimum `latitude` value. + + :param point: Expression of type `geo_point`, `geo_shape`, `cartesian_point` + or `cartesian_shape`. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"ST_YMIN({_render(point)})") + + +def substring( + string: ExpressionType, start: ExpressionType, length: ExpressionType = None +) -> InstrumentedExpression: + """Returns a substring of a string, specified by a start position and an + optional length. + + :param string: String expression. If `null`, the function returns `null`. + :param start: Start position. + :param length: Length of the substring from the start position. Optional; if + omitted, all positions after `start` are returned. + """ + if length is not None: + return InstrumentedExpression( + f"SUBSTRING({_render(string)}, {_render(start)}, {_render(length)})" + ) + else: + return InstrumentedExpression(f"SUBSTRING({_render(string)}, {_render(start)})") + + +def sum(number: ExpressionType) -> InstrumentedExpression: + """The sum of a numeric expression. + + :param number: + """ + return InstrumentedExpression(f"SUM({_render(number)})") + + +def tan(angle: ExpressionType) -> InstrumentedExpression: + """Returns the tangent of an angle. + + :param angle: An angle, in radians. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"TAN({_render(angle)})") + + +def tanh(number: ExpressionType) -> InstrumentedExpression: + """Returns the hyperbolic tangent of a number. + + :param number: Numeric expression. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"TANH({_render(number)})") + + +def tau() -> InstrumentedExpression: + """Returns the ratio of a circle’s circumference to its radius.""" + return InstrumentedExpression("TAU()") + + +def term(field: ExpressionType, query: ExpressionType) -> InstrumentedExpression: + """Performs a Term query on the specified field. Returns true if the + provided term matches the row. + + :param field: Field that the query will target. + :param query: Term you wish to find in the provided field. + """ + return InstrumentedExpression(f"TERM({_render(field)}, {_render(query)})") + + +def top( + field: ExpressionType, limit: ExpressionType, order: ExpressionType +) -> InstrumentedExpression: + """Collects the top values for a field. Includes repeated values. + + :param field: The field to collect the top values for. + :param limit: The maximum number of values to collect. + :param order: The order to calculate the top values. Either `asc` or `desc`. + """ + return InstrumentedExpression( + f"TOP({_render(field)}, {_render(limit)}, {_render(order)})" + ) + + +def to_aggregate_metric_double(number: ExpressionType) -> InstrumentedExpression: + """Encode a numeric to an aggregate_metric_double. + + :param number: Input value. The input can be a single- or multi-valued + column or an expression. + """ + return InstrumentedExpression(f"TO_AGGREGATE_METRIC_DOUBLE({_render(number)})") + + +def to_base64(string: ExpressionType) -> InstrumentedExpression: + """Encode a string to a base64 string. + + :param string: A string. + """ + return InstrumentedExpression(f"TO_BASE64({_render(string)})") + + +def to_boolean(field: ExpressionType) -> InstrumentedExpression: + """Converts an input value to a boolean value. A string value of `true` + will be case-insensitive converted to the Boolean `true`. For anything + else, including the empty string, the function will return `false`. The + numerical value of `0` will be converted to `false`, anything else will be + converted to `true`. + + :param field: Input value. The input can be a single- or multi-valued column + or an expression. + """ + return InstrumentedExpression(f"TO_BOOLEAN({_render(field)})") + + +def to_cartesianpoint(field: ExpressionType) -> InstrumentedExpression: + """Converts an input value to a `cartesian_point` value. A string will only + be successfully converted if it respects the WKT Point format. + + :param field: Input value. The input can be a single- or multi-valued column + or an expression. + """ + return InstrumentedExpression(f"TO_CARTESIANPOINT({_render(field)})") + + +def to_cartesianshape(field: ExpressionType) -> InstrumentedExpression: + """Converts an input value to a `cartesian_shape` value. A string will only + be successfully converted if it respects the WKT format. + + :param field: Input value. The input can be a single- or multi-valued column + or an expression. + """ + return InstrumentedExpression(f"TO_CARTESIANSHAPE({_render(field)})") + + +def to_dateperiod(field: ExpressionType) -> InstrumentedExpression: + """Converts an input value into a `date_period` value. + + :param field: Input value. The input is a valid constant date period expression. + """ + return InstrumentedExpression(f"TO_DATEPERIOD({_render(field)})") + + +def to_datetime(field: ExpressionType) -> InstrumentedExpression: + """Converts an input value to a date value. A string will only be + successfully converted if it’s respecting the format + `yyyy-MM-dd'T'HH:mm:ss.SSS'Z'`. To convert dates in other formats, use `DATE_PARSE`. + + :param field: Input value. The input can be a single- or multi-valued column + or an expression. + """ + return InstrumentedExpression(f"TO_DATETIME({_render(field)})") + + +def to_date_nanos(field: ExpressionType) -> InstrumentedExpression: + """Converts an input to a nanosecond-resolution date value (aka date_nanos). + + :param field: Input value. The input can be a single- or multi-valued column + or an expression. + """ + return InstrumentedExpression(f"TO_DATE_NANOS({_render(field)})") + + +def to_degrees(number: ExpressionType) -> InstrumentedExpression: + """Converts a number in radians to degrees). + + :param number: Input value. The input can be a single- or multi-valued + column or an expression. + """ + return InstrumentedExpression(f"TO_DEGREES({_render(number)})") + + +def to_double(field: ExpressionType) -> InstrumentedExpression: + """Converts an input value to a double value. If the input parameter is of + a date type, its value will be interpreted as milliseconds since the Unix + epoch, converted to double. Boolean `true` will be converted to double + `1.0`, `false` to `0.0`. + + :param field: Input value. The input can be a single- or multi-valued column + or an expression. + """ + return InstrumentedExpression(f"TO_DOUBLE({_render(field)})") + + +def to_geopoint(field: ExpressionType) -> InstrumentedExpression: + """Converts an input value to a `geo_point` value. A string will only be + successfully converted if it respects the WKT Point format. + + :param field: Input value. The input can be a single- or multi-valued column + or an expression. + """ + return InstrumentedExpression(f"TO_GEOPOINT({_render(field)})") + + +def to_geoshape(field: ExpressionType) -> InstrumentedExpression: + """Converts an input value to a `geo_shape` value. A string will only be + successfully converted if it respects the WKT format. + + :param field: Input value. The input can be a single- or multi-valued column + or an expression. + """ + return InstrumentedExpression(f"TO_GEOSHAPE({_render(field)})") + + +def to_integer(field: ExpressionType) -> InstrumentedExpression: + """Converts an input value to an integer value. If the input parameter is + of a date type, its value will be interpreted as milliseconds since the + Unix epoch, converted to integer. Boolean `true` will be converted to + integer `1`, `false` to `0`. + + :param field: Input value. The input can be a single- or multi-valued column + or an expression. + """ + return InstrumentedExpression(f"TO_INTEGER({_render(field)})") + + +def to_ip( + field: ExpressionType, options: ExpressionType = None +) -> InstrumentedExpression: + """Converts an input string to an IP value. + + :param field: Input value. The input can be a single- or multi-valued column + or an expression. + :param options: (Optional) Additional options. + """ + if options is not None: + return InstrumentedExpression(f"TO_IP({_render(field)}, {_render(options)})") + else: + return InstrumentedExpression(f"TO_IP({_render(field)})") + + +def to_long(field: ExpressionType) -> InstrumentedExpression: + """Converts an input value to a long value. If the input parameter is of a + date type, its value will be interpreted as milliseconds since the Unix + epoch, converted to long. Boolean `true` will be converted to long `1`, + `false` to `0`. + + :param field: Input value. The input can be a single- or multi-valued column + or an expression. + """ + return InstrumentedExpression(f"TO_LONG({_render(field)})") + + +def to_lower(str: ExpressionType) -> InstrumentedExpression: + """Returns a new string representing the input string converted to lower case. + + :param str: String expression. If `null`, the function returns `null`. The + input can be a single-valued column or expression, or a multi-valued + column or expression. + """ + return InstrumentedExpression(f"TO_LOWER({_render(str)})") + + +def to_radians(number: ExpressionType) -> InstrumentedExpression: + """Converts a number in degrees) to radians. + + :param number: Input value. The input can be a single- or multi-valued + column or an expression. + """ + return InstrumentedExpression(f"TO_RADIANS({_render(number)})") + + +def to_string(field: ExpressionType) -> InstrumentedExpression: + """Converts an input value into a string. + + :param field: Input value. The input can be a single- or multi-valued column + or an expression. + """ + return InstrumentedExpression(f"TO_STRING({_render(field)})") + + +def to_timeduration(field: ExpressionType) -> InstrumentedExpression: + """Converts an input value into a `time_duration` value. + + :param field: Input value. The input is a valid constant time duration expression. + """ + return InstrumentedExpression(f"TO_TIMEDURATION({_render(field)})") + + +def to_unsigned_long(field: ExpressionType) -> InstrumentedExpression: + """Converts an input value to an unsigned long value. If the input + parameter is of a date type, its value will be interpreted as milliseconds + since the Unix epoch, converted to unsigned long. Boolean `true` will be + converted to unsigned long `1`, `false` to `0`. + + :param field: Input value. The input can be a single- or multi-valued column + or an expression. + """ + return InstrumentedExpression(f"TO_UNSIGNED_LONG({_render(field)})") + + +def to_upper(str: ExpressionType) -> InstrumentedExpression: + """Returns a new string representing the input string converted to upper case. + + :param str: String expression. If `null`, the function returns `null`. The + input can be a single-valued column or expression, or a multi-valued + column or expression. + """ + return InstrumentedExpression(f"TO_UPPER({_render(str)})") + + +def to_version(field: ExpressionType) -> InstrumentedExpression: + """Converts an input string to a version value. + + :param field: Input value. The input can be a single- or multi-valued column + or an expression. + """ + return InstrumentedExpression(f"TO_VERSION({_render(field)})") + + +def trim(string: ExpressionType) -> InstrumentedExpression: + """Removes leading and trailing whitespaces from a string. + + :param string: String expression. If `null`, the function returns `null`. + """ + return InstrumentedExpression(f"TRIM({_render(string)})") + + +def values(field: ExpressionType) -> InstrumentedExpression: + """Returns unique values as a multivalued field. The order of the returned + values isn’t guaranteed. If you need the values returned in order use `MV_SORT`. + + :param field: + """ + return InstrumentedExpression(f"VALUES({_render(field)})") + + +def weighted_avg( + number: ExpressionType, weight: ExpressionType +) -> InstrumentedExpression: + """The weighted average of a numeric expression. + + :param number: A numeric value. + :param weight: A numeric weight. + """ + return InstrumentedExpression(f"WEIGHTED_AVG({_render(number)}, {_render(weight)})") diff --git a/test_elasticsearch/test_dsl/_async/test_esql.py b/test_elasticsearch/test_dsl/_async/test_esql.py new file mode 100644 index 000000000..7aacb833c --- /dev/null +++ b/test_elasticsearch/test_dsl/_async/test_esql.py @@ -0,0 +1,93 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest + +from elasticsearch.dsl import AsyncDocument, M +from elasticsearch.esql import ESQL, functions + + +class Employee(AsyncDocument): + emp_no: M[int] + first_name: M[str] + last_name: M[str] + height: M[float] + still_hired: M[bool] + + class Index: + name = "employees" + + +async def load_db(): + data = [ + [10000, "Joseph", "Wall", 2.2, True], + [10001, "Stephanie", "Ward", 1.749, True], + [10002, "David", "Keller", 1.872, True], + [10003, "Roger", "Hinton", 1.694, False], + [10004, "Joshua", "Garcia", 1.661, False], + [10005, "Matthew", "Richards", 1.633, False], + [10006, "Maria", "Luna", 1.893, True], + [10007, "Angela", "Navarro", 1.604, False], + [10008, "Maria", "Cannon", 2.079, False], + [10009, "Joseph", "Sutton", 2.025, True], + ] + if await Employee._index.exists(): + await Employee._index.delete() + await Employee.init() + + for e in data: + employee = Employee( + emp_no=e[0], first_name=e[1], last_name=e[2], height=e[3], still_hired=e[4] + ) + await employee.save() + await Employee._index.refresh() + + +@pytest.mark.asyncio +async def test_esql(async_client): + await load_db() + + # get the full names of the employees + query = ( + ESQL.from_(Employee) + .eval(name=functions.concat(Employee.first_name, " ", Employee.last_name)) + .keep("name") + .sort("name") + .limit(10) + ) + r = await async_client.esql.query(query=str(query)) + assert r.body["values"] == [ + ["Angela Navarro"], + ["David Keller"], + ["Joseph Sutton"], + ["Joseph Wall"], + ["Joshua Garcia"], + ["Maria Cannon"], + ["Maria Luna"], + ["Matthew Richards"], + ["Roger Hinton"], + ["Stephanie Ward"], + ] + + # get the average height of all hired employees + query = ESQL.from_(Employee).stats( + avg_height=functions.round(functions.avg(Employee.height), 2).where( + Employee.still_hired == True # noqa: E712 + ) + ) + r = await async_client.esql.query(query=str(query)) + assert r.body["values"] == [[1.95]] diff --git a/test_elasticsearch/test_dsl/_sync/test_esql.py b/test_elasticsearch/test_dsl/_sync/test_esql.py new file mode 100644 index 000000000..1c4084fc7 --- /dev/null +++ b/test_elasticsearch/test_dsl/_sync/test_esql.py @@ -0,0 +1,93 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest + +from elasticsearch.dsl import Document, M +from elasticsearch.esql import ESQL, functions + + +class Employee(Document): + emp_no: M[int] + first_name: M[str] + last_name: M[str] + height: M[float] + still_hired: M[bool] + + class Index: + name = "employees" + + +def load_db(): + data = [ + [10000, "Joseph", "Wall", 2.2, True], + [10001, "Stephanie", "Ward", 1.749, True], + [10002, "David", "Keller", 1.872, True], + [10003, "Roger", "Hinton", 1.694, False], + [10004, "Joshua", "Garcia", 1.661, False], + [10005, "Matthew", "Richards", 1.633, False], + [10006, "Maria", "Luna", 1.893, True], + [10007, "Angela", "Navarro", 1.604, False], + [10008, "Maria", "Cannon", 2.079, False], + [10009, "Joseph", "Sutton", 2.025, True], + ] + if Employee._index.exists(): + Employee._index.delete() + Employee.init() + + for e in data: + employee = Employee( + emp_no=e[0], first_name=e[1], last_name=e[2], height=e[3], still_hired=e[4] + ) + employee.save() + Employee._index.refresh() + + +@pytest.mark.sync +def test_esql(client): + load_db() + + # get the full names of the employees + query = ( + ESQL.from_(Employee) + .eval(name=functions.concat(Employee.first_name, " ", Employee.last_name)) + .keep("name") + .sort("name") + .limit(10) + ) + r = client.esql.query(query=str(query)) + assert r.body["values"] == [ + ["Angela Navarro"], + ["David Keller"], + ["Joseph Sutton"], + ["Joseph Wall"], + ["Joshua Garcia"], + ["Maria Cannon"], + ["Maria Luna"], + ["Matthew Richards"], + ["Roger Hinton"], + ["Stephanie Ward"], + ] + + # get the average height of all hired employees + query = ESQL.from_(Employee).stats( + avg_height=functions.round(functions.avg(Employee.height), 2).where( + Employee.still_hired == True # noqa: E712 + ) + ) + r = client.esql.query(query=str(query)) + assert r.body["values"] == [[1.95]] diff --git a/test_elasticsearch/test_esql.py b/test_elasticsearch/test_esql.py new file mode 100644 index 000000000..70c9ec679 --- /dev/null +++ b/test_elasticsearch/test_esql.py @@ -0,0 +1,715 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from elasticsearch.dsl import E +from elasticsearch.esql import ESQL, and_, functions, not_, or_ + + +def test_from(): + query = ESQL.from_("employees") + assert query.render() == "FROM employees" + + query = ESQL.from_("") + assert query.render() == "FROM " + + query = ESQL.from_("employees-00001", "other-employees-*") + assert query.render() == "FROM employees-00001, other-employees-*" + + query = ESQL.from_("cluster_one:employees-00001", "cluster_two:other-employees-*") + assert ( + query.render() + == "FROM cluster_one:employees-00001, cluster_two:other-employees-*" + ) + + query = ESQL.from_("employees").metadata("_id") + assert query.render() == "FROM employees METADATA _id" + + +def test_row(): + query = ESQL.row(a=1, b="two", c=None) + assert query.render() == 'ROW a = 1, b = "two", c = null' + + query = ESQL.row(a=[2, 1]) + assert query.render() == "ROW a = [2, 1]" + + query = ESQL.row(a=functions.round(1.23, 0)) + assert query.render() == "ROW a = ROUND(1.23, 0)" + + +def test_show(): + query = ESQL.show("INFO") + assert query.render() == "SHOW INFO" + + +def test_change_point(): + query = ( + ESQL.row(key=list(range(1, 26))) + .mv_expand("key") + .eval(value=functions.case(E("key") < 13, 0, 42)) + .change_point("value") + .on("key") + .where("type IS NOT NULL") + ) + assert ( + query.render() + == """ROW key = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] +| MV_EXPAND key +| EVAL value = CASE(key < 13, 0, 42) +| CHANGE_POINT value ON key +| WHERE type IS NOT NULL""" + ) + + +def test_completion(): + query = ( + ESQL.row(question="What is Elasticsearch?") + .completion("question") + .with_("test_completion_model") + .keep("question", "completion") + ) + assert ( + query.render() + == """ROW question = "What is Elasticsearch?" +| COMPLETION question WITH test_completion_model +| KEEP question, completion""" + ) + + query = ( + ESQL.row(question="What is Elasticsearch?") + .completion(answer=E("question")) + .with_("test_completion_model") + .keep("question", "answer") + ) + assert ( + query.render() + == """ROW question = "What is Elasticsearch?" +| COMPLETION answer = question WITH test_completion_model +| KEEP question, answer""" + ) + + query = ( + ESQL.from_("movies") + .sort("rating DESC") + .limit(10) + .eval( + prompt="""CONCAT( + "Summarize this movie using the following information: \\n", + "Title: ", title, "\\n", + "Synopsis: ", synopsis, "\\n", + "Actors: ", MV_CONCAT(actors, ", "), "\\n", + )""" + ) + .completion(summary="prompt") + .with_("test_completion_model") + .keep("title", "summary", "rating") + ) + assert ( + query.render() + == """FROM movies +| SORT rating DESC +| LIMIT 10 +| EVAL prompt = CONCAT( + "Summarize this movie using the following information: \\n", + "Title: ", title, "\\n", + "Synopsis: ", synopsis, "\\n", + "Actors: ", MV_CONCAT(actors, ", "), "\\n", + ) +| COMPLETION summary = prompt WITH test_completion_model +| KEEP title, summary, rating""" + ) + + query = ( + ESQL.from_("movies") + .sort("rating DESC") + .limit(10) + .eval( + prompt=functions.concat( + "Summarize this movie using the following information: \n", + "Title: ", + E("title"), + "\n", + "Synopsis: ", + E("synopsis"), + "\n", + "Actors: ", + functions.mv_concat(E("actors"), ", "), + "\n", + ) + ) + .completion(summary="prompt") + .with_("test_completion_model") + .keep("title", "summary", "rating") + ) + assert ( + query.render() + == """FROM movies +| SORT rating DESC +| LIMIT 10 +| EVAL prompt = CONCAT("Summarize this movie using the following information: \\n", "Title: ", title, "\\n", "Synopsis: ", synopsis, "\\n", "Actors: ", MV_CONCAT(actors, ", "), "\\n") +| COMPLETION summary = prompt WITH test_completion_model +| KEEP title, summary, rating""" + ) + + +def test_dissect(): + query = ( + ESQL.row(a="2023-01-23T12:15:00.000Z - some text - 127.0.0.1") + .dissect("a", "%{date} - %{msg} - %{ip}") + .keep("date", "msg", "ip") + ) + assert ( + query.render() + == """ROW a = "2023-01-23T12:15:00.000Z - some text - 127.0.0.1" +| DISSECT a "%{date} - %{msg} - %{ip}" +| KEEP date, msg, ip""" + ) + + +def test_drop(): + query = ESQL.from_("employees").drop("height") + assert query.render() == "FROM employees\n| DROP height" + query = ESQL.from_("employees").drop("height*") + assert query.render() == "FROM employees\n| DROP height*" + + +def test_enrich(): + query = ESQL.row(language_code="1").enrich("languages_policy") + assert ( + query.render() + == """ROW language_code = "1" +| ENRICH languages_policy""" + ) + + query = ESQL.row(language_code="1").enrich("languages_policy").on("a") + assert ( + query.render() + == """ROW language_code = "1" +| ENRICH languages_policy ON a""" + ) + + query = ( + ESQL.row(language_code="1") + .enrich("languages_policy") + .on("a") + .with_(name="language_name") + ) + assert ( + query.render() + == """ROW language_code = "1" +| ENRICH languages_policy ON a WITH name = language_name""" + ) + + +def test_eval(): + query = ( + ESQL.from_("employees") + .sort("emp_no") + .keep("first_name", "last_name", "height") + .eval(height_feet=E("height") * 3.281, height_cm=E("height") * 100) + ) + assert ( + query.render() + == """FROM employees +| SORT emp_no +| KEEP first_name, last_name, height +| EVAL height_feet = height * 3.281, height_cm = height * 100""" + ) + + query = ( + ESQL.from_("employees") + .sort("emp_no") + .keep("first_name", "last_name", "height") + .eval(E("height") * 3.281) + ) + assert ( + query.render() + == """FROM employees +| SORT emp_no +| KEEP first_name, last_name, height +| EVAL height * 3.281""" + ) + + query = ( + ESQL.from_("employees") + .eval("height * 3.281") + .stats(avg_height_feet=functions.avg(E("`height * 3.281`"))) + ) + assert ( + query.render() + == """FROM employees +| EVAL height * 3.281 +| STATS avg_height_feet = AVG(`height * 3.281`)""" + ) + + +def test_fork(): + query = ( + ESQL.from_("employees") + .fork( + ESQL.branch().where(E("emp_no") == 10001), + ESQL.branch().where("emp_no == 10002"), + ) + .keep("emp_no", "_fork") + .sort("emp_no") + ) + assert ( + query.render() + == """FROM employees +| FORK ( WHERE emp_no == 10001 ) + ( WHERE emp_no == 10002 ) +| KEEP emp_no, _fork +| SORT emp_no""" + ) + + +def test_grok(): + query = ( + ESQL.row(a="2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42") + .grok( + "a", + "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num}", + ) + .keep("date", "ip", "email", "num") + ) + assert ( + query.render() + == """ROW a = "2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42" +| GROK a "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num}" +| KEEP date, ip, email, num""" + ) + + query = ( + ESQL.row(a="2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42") + .grok( + "a", + "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}", + ) + .keep("date", "ip", "email", "num") + .eval(date=functions.to_datetime(E("date"))) + ) + assert ( + query.render() + == """ROW a = "2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42" +| GROK a "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}" +| KEEP date, ip, email, num +| EVAL date = TO_DATETIME(date)""" + ) + + query = ( + ESQL.from_("addresses") + .keep("city.name", "zip_code") + .grok("zip_code", "%{WORD:zip_parts} %{WORD:zip_parts}") + ) + assert ( + query.render() + == """FROM addresses +| KEEP city.name, zip_code +| GROK zip_code "%{WORD:zip_parts} %{WORD:zip_parts}\"""" + ) + + +def test_keep(): + query = ESQL.from_("employees").keep("emp_no", "first_name", "last_name", "height") + assert ( + query.render() == "FROM employees\n| KEEP emp_no, first_name, last_name, height" + ) + + query = ESQL.from_("employees").keep("h*") + assert query.render() == "FROM employees\n| KEEP h*" + + query = ESQL.from_("employees").keep("*", "first_name") + assert query.render() == "FROM employees\n| KEEP *, first_name" + + +def test_limit(): + query = ESQL.from_("index").where(E("field") == "value").limit(1000) + assert query.render() == 'FROM index\n| WHERE field == "value"\n| LIMIT 1000' + + query = ( + ESQL.from_("index").stats(functions.avg(E("field1"))).by("field2").limit(20000) + ) + assert ( + query.render() + == "FROM index\n| STATS AVG(field1)\n BY field2\n| LIMIT 20000" + ) + + +def test_lookup_join(): + query = ( + ESQL.from_("firewall_logs") + .lookup_join("threat_list") + .on("source.IP") + .where("threat_level IS NOT NULL") + ) + assert ( + query.render() + == """FROM firewall_logs +| LOOKUP JOIN threat_list ON source.IP +| WHERE threat_level IS NOT NULL""" + ) + + query = ( + ESQL.from_("system_metrics") + .lookup_join("host_inventory") + .on("host.name") + .lookup_join("ownerships") + .on("host.name") + ) + assert ( + query.render() + == """FROM system_metrics +| LOOKUP JOIN host_inventory ON host.name +| LOOKUP JOIN ownerships ON host.name""" + ) + + query = ESQL.from_("app_logs").lookup_join("service_owners").on("service_id") + assert ( + query.render() + == """FROM app_logs +| LOOKUP JOIN service_owners ON service_id""" + ) + + query = ( + ESQL.from_("employees") + .eval(language_code="languages") + .where(E("emp_no") >= 10091, E("emp_no") < 10094) + .lookup_join("languages_lookup") + .on("language_code") + ) + assert ( + query.render() + == """FROM employees +| EVAL language_code = languages +| WHERE emp_no >= 10091 AND emp_no < 10094 +| LOOKUP JOIN languages_lookup ON language_code""" + ) + + +def test_mv_expand(): + query = ESQL.row(a=[1, 2, 3], b="b", j=["a", "b"]).mv_expand("a") + assert ( + query.render() + == """ROW a = [1, 2, 3], b = "b", j = ["a", "b"] +| MV_EXPAND a""" + ) + + +def test_rename(): + query = ( + ESQL.from_("employees") + .keep("first_name", "last_name", "still_hired") + .rename(still_hired="employed") + ) + assert ( + query.render() + == """FROM employees +| KEEP first_name, last_name, still_hired +| RENAME still_hired AS employed""" + ) + + +def test_sample(): + query = ESQL.from_("employees").keep("emp_no").sample(0.05) + assert ( + query.render() + == """FROM employees +| KEEP emp_no +| SAMPLE 0.05""" + ) + + +def test_sort(): + query = ( + ESQL.from_("employees").keep("first_name", "last_name", "height").sort("height") + ) + assert ( + query.render() + == """FROM employees +| KEEP first_name, last_name, height +| SORT height""" + ) + + query = ( + ESQL.from_("employees") + .keep("first_name", "last_name", "height") + .sort("height DESC") + ) + assert ( + query.render() + == """FROM employees +| KEEP first_name, last_name, height +| SORT height DESC""" + ) + + query = ( + ESQL.from_("employees") + .keep("first_name", "last_name", "height") + .sort("height DESC", "first_name ASC") + ) + assert ( + query.render() + == """FROM employees +| KEEP first_name, last_name, height +| SORT height DESC, first_name ASC""" + ) + + query = ( + ESQL.from_("employees") + .keep("first_name", "last_name", "height") + .sort("first_name ASC NULLS FIRST") + ) + assert ( + query.render() + == """FROM employees +| KEEP first_name, last_name, height +| SORT first_name ASC NULLS FIRST""" + ) + + +def test_stats(): + query = ( + ESQL.from_("employees") + .stats(count=functions.count(E("emp_no"))) + .by("languages") + .sort("languages") + ) + assert ( + query.render() + == """FROM employees +| STATS count = COUNT(emp_no) + BY languages +| SORT languages""" + ) + + query = ESQL.from_("employees").stats(avg_lang=functions.avg(E("languages"))) + assert ( + query.render() + == """FROM employees +| STATS avg_lang = AVG(languages)""" + ) + + query = ESQL.from_("employees").stats( + avg_lang=functions.avg(E("languages")), max_lang=functions.max(E("languages")) + ) + assert ( + query.render() + == """FROM employees +| STATS avg_lang = AVG(languages), + max_lang = MAX(languages)""" + ) + + query = ( + ESQL.from_("employees") + .stats( + avg50s=functions.avg(E("salary")).where('birth_date < "1960-01-01"'), + avg60s=functions.avg(E("salary")).where('birth_date >= "1960-01-01"'), + ) + .by("gender") + .sort("gender") + ) + assert ( + query.render() + == """FROM employees +| STATS avg50s = AVG(salary) WHERE birth_date < "1960-01-01", + avg60s = AVG(salary) WHERE birth_date >= "1960-01-01" + BY gender +| SORT gender""" + ) + + query = ( + ESQL.from_("employees") + .eval(Ks="salary / 1000") + .stats( + under_40K=functions.count(E("*")).where("Ks < 40"), + inbetween=functions.count(E("*")).where("40 <= Ks", "Ks < 60"), + over_60K=functions.count(E("*")).where("60 <= Ks"), + total=functions.count(E("*")), + ) + ) + assert ( + query.render() + == """FROM employees +| EVAL Ks = salary / 1000 +| STATS under_40K = COUNT(*) WHERE Ks < 40, + inbetween = COUNT(*) WHERE (40 <= Ks) AND (Ks < 60), + over_60K = COUNT(*) WHERE 60 <= Ks, + total = COUNT(*)""" + ) + + query = ( + ESQL.row(i=1, a=["a", "b"]).stats(functions.min(E("i"))).by("a").sort("a ASC") + ) + assert ( + query.render() + == 'ROW i = 1, a = ["a", "b"]\n| STATS MIN(i)\n BY a\n| SORT a ASC' + ) + + query = ( + ESQL.from_("employees") + .eval(hired=functions.date_format(E("hire_date"), "yyyy")) + .stats(avg_salary=functions.avg(E("salary"))) + .by("hired", "languages.long") + .eval(avg_salary=functions.round(E("avg_salary"))) + .sort("hired", "languages.long") + ) + assert ( + query.render() + == """FROM employees +| EVAL hired = DATE_FORMAT("yyyy", hire_date) +| STATS avg_salary = AVG(salary) + BY hired, languages.long +| EVAL avg_salary = ROUND(avg_salary) +| SORT hired, languages.long""" + ) + + +def test_where(): + query = ( + ESQL.from_("employees") + .keep("first_name", "last_name", "still_hired") + .where("still_hired == true") + ) + assert ( + query.render() + == """FROM employees +| KEEP first_name, last_name, still_hired +| WHERE still_hired == true""" + ) + + query = ESQL.from_("sample_data").where("@timestamp > NOW() - 1 hour") + assert ( + query.render() + == """FROM sample_data +| WHERE @timestamp > NOW() - 1 hour""" + ) + + query = ( + ESQL.from_("employees") + .keep("first_name", "last_name", "height") + .where("LENGTH(first_name) < 4") + ) + assert ( + query.render() + == """FROM employees +| KEEP first_name, last_name, height +| WHERE LENGTH(first_name) < 4""" + ) + + +def test_and_operator(): + query = ESQL.from_("index").where( + and_(E("age") > 30, E("age") < 40, E("name").is_not_null()) + ) + assert ( + query.render() + == """FROM index +| WHERE (age > 30) AND (age < 40) AND (name IS NOT NULL)""" + ) + + +def test_or_operator(): + query = ESQL.from_("index").where( + or_(E("age") < 30, E("age") > 40, E("name").is_null()) + ) + assert ( + query.render() + == """FROM index +| WHERE (age < 30) OR (age > 40) OR (name IS NULL)""" + ) + + +def test_not_operator(): + query = ESQL.from_("index").where(not_(E("age") > 40)) + assert ( + query.render() + == """FROM index +| WHERE NOT (age > 40)""" + ) + + +def test_in_operator(): + query = ESQL.row(a=1, b=4, c=3).where((E("c") - E("a")).in_(3, E("b") / 2, "a")) + assert ( + query.render() + == """ROW a = 1, b = 4, c = 3 +| WHERE c - a IN (3, b / 2, a)""" + ) + + +def test_like_operator(): + query = ( + ESQL.from_("employees") + .where(E("first_name").like("?b*")) + .keep("first_name", "last_name") + ) + assert ( + query.render() + == """FROM employees +| WHERE first_name LIKE "?b*" +| KEEP first_name, last_name""" + ) + + query = ESQL.row(message="foo * bar").where(E("message").like("foo \\* bar")) + assert ( + query.render() + == """ROW message = "foo * bar" +| WHERE message LIKE "foo \\\\* bar\"""" + ) + + query = ESQL.row(message="foobar").where(E("message").like("foo*", "bar?")) + assert ( + query.render() + == """ROW message = "foobar" +| WHERE message LIKE ("foo*", "bar?")""" + ) + + +def test_rlike_operator(): + query = ( + ESQL.from_("employees") + .where(E("first_name").rlike(".leja*")) + .keep("first_name", "last_name") + ) + assert ( + query.render() + == """FROM employees +| WHERE first_name RLIKE ".leja*" +| KEEP first_name, last_name""" + ) + + query = ESQL.row(message="foo ( bar").where(E("message").rlike("foo \\( bar")) + assert ( + query.render() + == """ROW message = "foo ( bar" +| WHERE message RLIKE "foo \\\\( bar\"""" + ) + + query = ESQL.row(message="foobar").where(E("message").rlike("foo.*", "bar.")) + assert ( + query.render() + == """ROW message = "foobar" +| WHERE message RLIKE ("foo.*", "bar.")""" + ) + + +def test_match_operator(): + query = ESQL.from_("books").where(E("author").match("Faulkner")) + assert ( + query.render() + == """FROM books +| WHERE author:"Faulkner\"""" + ) From 7abc620788491e4c7e59d6cb8091b728fc9d2ebc Mon Sep 17 00:00:00 2001 From: Elastic Machine Date: Tue, 29 Jul 2025 12:38:20 +0200 Subject: [PATCH 61/65] Auto-generated API code (#2957) Co-authored-by: Miguel Grinberg --- elasticsearch/_async/client/__init__.py | 129 ++-- elasticsearch/_async/client/async_search.py | 12 +- elasticsearch/_async/client/autoscaling.py | 8 +- elasticsearch/_async/client/cat.py | 685 ++++++++++++++++-- elasticsearch/_async/client/ccr.py | 26 +- elasticsearch/_async/client/cluster.py | 45 +- elasticsearch/_async/client/connector.py | 60 +- .../_async/client/dangling_indices.py | 6 +- elasticsearch/_async/client/enrich.py | 10 +- elasticsearch/_async/client/eql.py | 18 +- elasticsearch/_async/client/esql.py | 39 +- elasticsearch/_async/client/features.py | 4 +- elasticsearch/_async/client/fleet.py | 24 +- elasticsearch/_async/client/graph.py | 2 +- elasticsearch/_async/client/ilm.py | 22 +- elasticsearch/_async/client/indices.py | 213 +++--- elasticsearch/_async/client/inference.py | 523 ++++++++++--- elasticsearch/_async/client/ingest.py | 25 +- elasticsearch/_async/client/license.py | 20 +- elasticsearch/_async/client/logstash.py | 6 +- elasticsearch/_async/client/migration.py | 6 +- elasticsearch/_async/client/ml.py | 156 ++-- elasticsearch/_async/client/nodes.py | 17 +- elasticsearch/_async/client/query_rules.py | 16 +- elasticsearch/_async/client/rollup.py | 16 +- .../_async/client/search_application.py | 20 +- .../_async/client/searchable_snapshots.py | 8 +- elasticsearch/_async/client/security.py | 152 ++-- elasticsearch/_async/client/shutdown.py | 6 +- elasticsearch/_async/client/simulate.py | 2 +- elasticsearch/_async/client/slm.py | 18 +- elasticsearch/_async/client/snapshot.py | 32 +- elasticsearch/_async/client/sql.py | 12 +- elasticsearch/_async/client/ssl.py | 2 +- elasticsearch/_async/client/synonyms.py | 14 +- elasticsearch/_async/client/tasks.py | 6 +- elasticsearch/_async/client/text_structure.py | 8 +- elasticsearch/_async/client/transform.py | 18 +- elasticsearch/_async/client/xpack.py | 2 +- elasticsearch/_sync/client/__init__.py | 129 ++-- elasticsearch/_sync/client/async_search.py | 12 +- elasticsearch/_sync/client/autoscaling.py | 8 +- elasticsearch/_sync/client/cat.py | 685 ++++++++++++++++-- elasticsearch/_sync/client/ccr.py | 26 +- elasticsearch/_sync/client/cluster.py | 45 +- elasticsearch/_sync/client/connector.py | 60 +- .../_sync/client/dangling_indices.py | 6 +- elasticsearch/_sync/client/enrich.py | 10 +- elasticsearch/_sync/client/eql.py | 18 +- elasticsearch/_sync/client/esql.py | 39 +- elasticsearch/_sync/client/features.py | 4 +- elasticsearch/_sync/client/fleet.py | 24 +- elasticsearch/_sync/client/graph.py | 2 +- elasticsearch/_sync/client/ilm.py | 22 +- elasticsearch/_sync/client/indices.py | 213 +++--- elasticsearch/_sync/client/inference.py | 523 ++++++++++--- elasticsearch/_sync/client/ingest.py | 25 +- elasticsearch/_sync/client/license.py | 20 +- elasticsearch/_sync/client/logstash.py | 6 +- elasticsearch/_sync/client/migration.py | 6 +- elasticsearch/_sync/client/ml.py | 156 ++-- elasticsearch/_sync/client/nodes.py | 17 +- elasticsearch/_sync/client/query_rules.py | 16 +- elasticsearch/_sync/client/rollup.py | 16 +- .../_sync/client/search_application.py | 20 +- .../_sync/client/searchable_snapshots.py | 8 +- elasticsearch/_sync/client/security.py | 152 ++-- elasticsearch/_sync/client/shutdown.py | 6 +- elasticsearch/_sync/client/simulate.py | 2 +- elasticsearch/_sync/client/slm.py | 18 +- elasticsearch/_sync/client/snapshot.py | 32 +- elasticsearch/_sync/client/sql.py | 12 +- elasticsearch/_sync/client/ssl.py | 2 +- elasticsearch/_sync/client/synonyms.py | 14 +- elasticsearch/_sync/client/tasks.py | 6 +- elasticsearch/_sync/client/text_structure.py | 8 +- elasticsearch/_sync/client/transform.py | 18 +- elasticsearch/_sync/client/xpack.py | 2 +- elasticsearch/dsl/types.py | 2 + 79 files changed, 3282 insertions(+), 1496 deletions(-) diff --git a/elasticsearch/_async/client/__init__.py b/elasticsearch/_async/client/__init__.py index cf34c6284..6f6c139a3 100644 --- a/elasticsearch/_async/client/__init__.py +++ b/elasticsearch/_async/client/__init__.py @@ -731,7 +731,7 @@ async def bulk( The other two shards that make up the index do not participate in the _bulk request at all.

        - ``_ + ``_ :param operations: :param index: The name of the data stream, index, or index alias to perform bulk @@ -856,7 +856,7 @@ async def clear_scroll( Clear the search context and results for a scrolling search.

        - ``_ + ``_ :param scroll_id: The scroll IDs to clear. To clear all scroll IDs, use `_all`. """ @@ -913,7 +913,7 @@ async def close_point_in_time( However, keeping points in time has a cost; close them as soon as they are no longer required for search requests.

        - ``_ + ``_ :param id: The ID of the point-in-time. """ @@ -997,7 +997,7 @@ async def count( This means that replicas increase the scalability of the count.

        - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, @@ -1121,10 +1121,7 @@ async def create( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, - if_primary_term: t.Optional[int] = None, - if_seq_no: t.Optional[int] = None, include_source_on_error: t.Optional[bool] = None, - op_type: t.Optional[t.Union[str, t.Literal["create", "index"]]] = None, pipeline: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ @@ -1199,7 +1196,7 @@ async def create( The _shards section of the API response reveals the number of shard copies on which replication succeeded and failed.

        - ``_ + ``_ :param index: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template @@ -1209,18 +1206,8 @@ async def create( :param id: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format. :param document: - :param if_primary_term: Only perform the operation if the document has this primary - term. - :param if_seq_no: Only perform the operation if the document has this sequence - number. :param include_source_on_error: True or false if to include the document source in the error message in case of parsing errors. - :param op_type: Set to `create` to only index the document if it does not already - exist (put if absent). If a document with the specified `_id` already exists, - the indexing operation will fail. The behavior is the same as using the `/_create` - endpoint. If a document ID is specified, this paramater defaults to `index`. - Otherwise, it defaults to `create`. If the request targets a data stream, - an `op_type` of `create` is required. :param pipeline: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final @@ -1272,14 +1259,8 @@ async def create( __query["filter_path"] = filter_path if human is not None: __query["human"] = human - if if_primary_term is not None: - __query["if_primary_term"] = if_primary_term - if if_seq_no is not None: - __query["if_seq_no"] = if_seq_no if include_source_on_error is not None: __query["include_source_on_error"] = include_source_on_error - if op_type is not None: - __query["op_type"] = op_type if pipeline is not None: __query["pipeline"] = pipeline if pretty is not None: @@ -1366,7 +1347,7 @@ async def delete( It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group.

        - ``_ + ``_ :param index: The name of the target index. :param id: A unique identifier for the document. @@ -1555,7 +1536,7 @@ async def delete_by_query( The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself.

        - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, @@ -1752,7 +1733,7 @@ async def delete_by_query_rethrottle( Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts.

        - ``_ + ``_ :param task_id: The ID for the task. :param requests_per_second: The throttle for this request in sub-requests per @@ -1802,7 +1783,7 @@ async def delete_script( Deletes a stored script or search template.

        - ``_ + ``_ :param id: The identifier for the stored script or search template. :param master_timeout: The period to wait for a connection to the master node. @@ -1886,7 +1867,7 @@ async def exists( Elasticsearch cleans up deleted documents in the background as you continue to index more data.

        - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases. It supports wildcards (`*`). @@ -2009,7 +1990,7 @@ async def exists_source(

        A document's source is not available if it is disabled in the mapping.

        - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases. It supports wildcards (`*`). @@ -2115,7 +2096,7 @@ async def explain( It computes a score explanation for a query and a specific document.

        - ``_ + ``_ :param index: Index names that are used to limit the request. Only a single index name can be provided to this parameter. @@ -2250,7 +2231,7 @@ async def field_caps( For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the keyword family.

        - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams @@ -2411,7 +2392,7 @@ async def get( Elasticsearch cleans up deleted documents in the background as you continue to index more data.

        - ``_ + ``_ :param index: The name of the index that contains the document. :param id: A unique document identifier. @@ -2518,7 +2499,7 @@ async def get_script( Retrieves a stored script or search template.

        - ``_ + ``_ :param id: The identifier for the stored script or search template. :param master_timeout: The period to wait for the master node. If the master @@ -2567,7 +2548,7 @@ async def get_script_context(

        Get a list of supported script contexts and their methods.

        - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_script_context" @@ -2606,7 +2587,7 @@ async def get_script_languages(

        Get a list of available script types, languages, and contexts.

        - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_script_language" @@ -2652,7 +2633,6 @@ async def get_source( source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None, - stored_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, version: t.Optional[int] = None, version_type: t.Optional[ t.Union[str, t.Literal["external", "external_gte", "force", "internal"]] @@ -2671,7 +2651,7 @@ async def get_source( - ``_ + ``_ :param index: The name of the index that contains the document. :param id: A unique document identifier. @@ -2689,8 +2669,6 @@ async def get_source( the response. :param source_includes: A comma-separated list of source fields to include in the response. - :param stored_fields: A comma-separated list of stored fields to return as part - of a hit. :param version: The version number for concurrency control. It must match the current version of the document for the request to succeed. :param version_type: The version type. @@ -2724,8 +2702,6 @@ async def get_source( __query["_source_excludes"] = source_excludes if source_includes is not None: __query["_source_includes"] = source_includes - if stored_fields is not None: - __query["stored_fields"] = stored_fields if version is not None: __query["version"] = version if version_type is not None: @@ -2771,7 +2747,7 @@ async def health_report( When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic.

        - ``_ + ``_ :param feature: A feature of the cluster, as returned by the top-level health report API. @@ -2834,6 +2810,7 @@ async def index( t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, require_alias: t.Optional[bool] = None, + require_data_stream: t.Optional[bool] = None, routing: t.Optional[str] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, version: t.Optional[int] = None, @@ -2936,7 +2913,7 @@ async def index( - ``_ + ``_ :param index: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template @@ -2969,6 +2946,8 @@ async def index( this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. :param require_alias: If `true`, the destination must be an index alias. + :param require_data_stream: If `true`, the request's actions must target a data + stream (existing or to be created). :param routing: A custom value that is used to route operations to a specific shard. :param timeout: The period the request waits for the following operations: automatic @@ -3030,6 +3009,8 @@ async def index( __query["refresh"] = refresh if require_alias is not None: __query["require_alias"] = require_alias + if require_data_stream is not None: + __query["require_data_stream"] = require_data_stream if routing is not None: __query["routing"] = routing if timeout is not None: @@ -3065,10 +3046,11 @@ async def info( .. raw:: html

        Get cluster info. - Get basic build, version, and cluster information.

        + Get basic build, version, and cluster information. + ::: In Serverless, this API is retained for backward compatibility only. Some response fields, such as the version number, should be ignored.

        - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/" @@ -3142,7 +3124,7 @@ async def knn_search(
      - ``_ + ``_ :param index: A comma-separated list of index names to search; use `_all` or to perform the operation on all indices. @@ -3258,7 +3240,7 @@ async def mget( You can include the stored_fields query parameter in the request URI to specify the defaults to use when there are no per-document instructions.

      - ``_ + ``_ :param index: Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. @@ -3393,7 +3375,7 @@ async def msearch( When sending requests to this endpoint the Content-Type header should be set to application/x-ndjson.

      - ``_ + ``_ :param searches: :param index: Comma-separated list of data streams, indices, and index aliases @@ -3539,7 +3521,7 @@ async def msearch_template( - ``_ + ``_ :param search_templates: :param index: A comma-separated list of data streams, indices, and aliases to @@ -3644,7 +3626,7 @@ async def mtermvectors( The mapping used is determined by the specified _index.

      - ``_ + ``_ :param index: The name of the index that contains the documents. :param docs: An array of existing or artificial documents. @@ -3785,7 +3767,7 @@ async def open_point_in_time( You can check how many point-in-times (that is, search contexts) are open with the nodes stats API.

      - ``_ + ``_ :param index: A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices @@ -3798,8 +3780,7 @@ async def open_point_in_time( :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated - values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, - `hidden`, `none`. + values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param index_filter: Filter indices if the provided query rewrites to `match_none` @@ -3883,7 +3864,7 @@ async def put_script( Creates or updates a stored script or search template.

      - ``_ + ``_ :param id: The identifier for the stored script or search template. It must be unique within the cluster. @@ -3973,7 +3954,7 @@ async def rank_eval(

      Evaluate the quality of ranked search results over a set of typical search queries.

      - ``_ + ``_ :param requests: A set of typical search requests, together with their provided ratings. @@ -4205,7 +4186,7 @@ async def reindex( It is not possible to configure SSL in the body of the reindex request.

      - ``_ + ``_ :param dest: The destination you are copying to. :param source: The source you are copying from. @@ -4329,7 +4310,7 @@ async def reindex_rethrottle( This behavior prevents scroll timeouts.

      - ``_ + ``_ :param task_id: The task identifier, which can be found by using the tasks API. :param requests_per_second: The throttle for this request in sub-requests per @@ -4385,7 +4366,7 @@ async def render_search_template(

      Render a search template as a search request body.

      - ``_ + ``_ :param id: The ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. @@ -4479,7 +4460,7 @@ async def scripts_painless_execute(

      Each context requires a script, but additional parameters depend on the context you're using for that script.

      - ``_ + ``_ :param context: The context that the script should run in. NOTE: Result ordering in the field contexts is not guaranteed. @@ -4552,7 +4533,7 @@ async def scroll(

      IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.

      - ``_ + ``_ :param scroll_id: The scroll ID of the search. :param rest_total_hits_as_int: If true, the API response’s hit.total property @@ -4758,7 +4739,7 @@ async def search( This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.

      - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, @@ -5509,7 +5490,7 @@ async def search_mvt( Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density.

      - ``_ + ``_ :param index: Comma-separated list of data streams, indices, or aliases to search :param field: Field containing geospatial data to return @@ -5683,7 +5664,7 @@ async def search_shards(

      If the Elasticsearch security features are enabled, you must have the view_index_metadata or manage index privilege for the target data stream, index, or alias.

      - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, @@ -5696,7 +5677,7 @@ async def search_shards( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param local: If `true`, the request retrieves information from the local node @@ -5794,7 +5775,7 @@ async def search_template(

      Run a search with a search template.

      - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). @@ -5808,8 +5789,7 @@ async def search_template( :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated - values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, - `hidden`, `none`. + values, such as `open,hidden`. :param explain: If `true`, returns detailed information about score calculation as part of each hit. If you specify both this and the `explain` query parameter, the API uses only the query parameter. @@ -5937,7 +5917,7 @@ async def terms_enum( - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and index aliases to search. Wildcard (`*`) expressions are supported. To search all data streams @@ -6086,7 +6066,7 @@ async def termvectors( Use routing only to hit a particular shard.

      - ``_ + ``_ :param index: The name of the index that contains the document. :param id: A unique identifier for the document. @@ -6257,7 +6237,7 @@ async def update( In addition to _source, you can access the following variables through the ctx map: _index, _type, _id, _version, _routing, and _now (the current timestamp).

      - ``_ + ``_ :param index: The name of the target index. By default, the index is created automatically if it doesn't exist. @@ -6495,7 +6475,7 @@ async def update_by_query( This API enables you to only modify the source of matching documents; you cannot move them.

      - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, @@ -6520,8 +6500,7 @@ async def update_by_query( :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated - values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, - `hidden`, `none`. + values, such as `open,hidden`. :param from_: Skips the specified number of documents. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. @@ -6715,7 +6694,7 @@ async def update_by_query_rethrottle( Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts.

      - ``_ + ``_ :param task_id: The ID for the task. :param requests_per_second: The throttle for this request in sub-requests per diff --git a/elasticsearch/_async/client/async_search.py b/elasticsearch/_async/client/async_search.py index b480e199b..c34530efc 100644 --- a/elasticsearch/_async/client/async_search.py +++ b/elasticsearch/_async/client/async_search.py @@ -44,7 +44,7 @@ async def delete( If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the cancel_task cluster privilege.

      - ``_ + ``_ :param id: A unique identifier for the async search. """ @@ -94,7 +94,7 @@ async def get( If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it.

      - ``_ + ``_ :param id: A unique identifier for the async search. :param keep_alive: The length of time that the async search should be available @@ -164,7 +164,7 @@ async def status(
    - ``_ + ``_ :param id: A unique identifier for the async search. :param keep_alive: The length of time that the async search needs to be available. @@ -281,7 +281,6 @@ async def submit( ] = None, lenient: t.Optional[bool] = None, max_concurrent_shard_requests: t.Optional[int] = None, - min_compatible_shard_node: t.Optional[str] = None, min_score: t.Optional[float] = None, pit: t.Optional[t.Mapping[str, t.Any]] = None, post_filter: t.Optional[t.Mapping[str, t.Any]] = None, @@ -346,7 +345,7 @@ async def submit( The maximum allowed size for a stored async search response can be set by changing the search.max_async_search_response_size cluster level setting.

    - ``_ + ``_ :param index: A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices @@ -401,7 +400,6 @@ async def submit( per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests - :param min_compatible_shard_node: :param min_score: Minimum _score for matching documents. Documents with a lower _score are not included in search results and results collected by aggregations. :param pit: Limits the search to a point in time (PIT). If you provide a PIT, @@ -526,8 +524,6 @@ async def submit( __query["lenient"] = lenient if max_concurrent_shard_requests is not None: __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests - if min_compatible_shard_node is not None: - __query["min_compatible_shard_node"] = min_compatible_shard_node if preference is not None: __query["preference"] = preference if pretty is not None: diff --git a/elasticsearch/_async/client/autoscaling.py b/elasticsearch/_async/client/autoscaling.py index 9999f82e8..8de430f83 100644 --- a/elasticsearch/_async/client/autoscaling.py +++ b/elasticsearch/_async/client/autoscaling.py @@ -44,7 +44,7 @@ async def delete_autoscaling_policy(

    NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

    - ``_ + ``_ :param name: the name of the autoscaling policy :param master_timeout: Period to wait for a connection to the master node. If @@ -104,7 +104,7 @@ async def get_autoscaling_capacity( Do not use this information to make autoscaling decisions.

    - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -151,7 +151,7 @@ async def get_autoscaling_policy(

    NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

    - ``_ + ``_ :param name: the name of the autoscaling policy :param master_timeout: Period to wait for a connection to the master node. If @@ -206,7 +206,7 @@ async def put_autoscaling_policy(

    NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

    - ``_ + ``_ :param name: the name of the autoscaling policy :param policy: diff --git a/elasticsearch/_async/client/cat.py b/elasticsearch/_async/client/cat.py index 2bd625661..681b51e59 100644 --- a/elasticsearch/_async/client/cat.py +++ b/elasticsearch/_async/client/cat.py @@ -51,7 +51,6 @@ async def aliases( help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, @@ -65,7 +64,7 @@ async def aliases(

    IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.

    - ``_ + ``_ :param name: A comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. @@ -82,10 +81,6 @@ async def aliases( the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - :param master_timeout: The period to wait for a connection to the master node. - If the master node is not available before the timeout expires, the request - fails and returns an error. To indicated that the request should never timeout, - you can set it to `-1`. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -115,8 +110,6 @@ async def aliases( __query["human"] = human if local is not None: __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -161,7 +154,7 @@ async def allocation(

    IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.

    - ``_ + ``_ :param node_id: A comma-separated list of node identifiers or names used to limit the returned information. @@ -250,7 +243,7 @@ async def component_templates( They are not intended for use by applications. For application consumption, use the get component template API.

    - ``_ + ``_ :param name: The name of the component template. It accepts wildcard expressions. If it is omitted, all component templates are returned. @@ -334,7 +327,7 @@ async def count( They are not intended for use by applications. For application consumption, use the count API.

    - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. It supports wildcards (`*`). To target all data streams @@ -412,7 +405,7 @@ async def fielddata( They are not intended for use by applications. For application consumption, use the nodes stats API.

    - ``_ + ``_ :param fields: Comma-separated list of fields used to limit returned information. To retrieve all fields, omit this parameter. @@ -498,7 +491,7 @@ async def health( You also can use the API to track the recovery of a large cluster over a longer period of time.

    - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -556,7 +549,7 @@ async def help(self) -> TextApiResponse:

    Get help for the CAT APIs.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_cat" @@ -591,7 +584,9 @@ async def indices( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[t.Union[str, t.Sequence[str]]] = None, - health: t.Optional[t.Union[str, t.Literal["green", "red", "yellow"]]] = None, + health: t.Optional[ + t.Union[str, t.Literal["green", "red", "unavailable", "unknown", "yellow"]] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, @@ -623,7 +618,7 @@ async def indices( They are not intended for use by applications. For application consumption, use an index endpoint.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -721,7 +716,7 @@ async def master(

    IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

    - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -899,7 +894,7 @@ async def ml_data_frame_analytics( application consumption, use the get data frame analytics jobs statistics API.

    - ``_ + ``_ :param id: The ID of the data frame analytics to fetch :param allow_no_match: Whether to ignore if a wildcard expression matches no @@ -1067,7 +1062,7 @@ async def ml_datafeeds( application consumption, use the get datafeed statistics API.

    - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. @@ -1433,7 +1428,7 @@ async def ml_jobs( application consumption, use the get anomaly detection job statistics API.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param allow_no_match: Specifies what to do when the request: * Contains wildcard @@ -1618,7 +1613,7 @@ async def ml_trained_models( application consumption, use the get trained models statistics API.

    - ``_ + ``_ :param model_id: A unique identifier for the trained model. :param allow_no_match: Specifies what to do when the request: contains wildcard @@ -1711,7 +1706,7 @@ async def nodeattrs( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

    - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -1774,7 +1769,200 @@ async def nodes( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, full_id: t.Optional[t.Union[bool, str]] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "build", + "completion.size", + "cpu", + "disk.avail", + "disk.total", + "disk.used", + "disk.used_percent", + "fielddata.evictions", + "fielddata.memory_size", + "file_desc.current", + "file_desc.max", + "file_desc.percent", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "heap.current", + "heap.max", + "heap.percent", + "http_address", + "id", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "jdk", + "load_15m", + "load_1m", + "load_5m", + "mappings.total_count", + "mappings.total_estimated_overhead_in_bytes", + "master", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "name", + "node.role", + "pid", + "port", + "query_cache.evictions", + "query_cache.hit_count", + "query_cache.memory_size", + "query_cache.miss_count", + "ram.current", + "ram.max", + "ram.percent", + "refresh.time", + "refresh.total", + "request_cache.evictions", + "request_cache.hit_count", + "request_cache.memory_size", + "request_cache.miss_count", + "script.cache_evictions", + "script.compilations", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "shard_stats.total_count", + "suggest.current", + "suggest.time", + "suggest.total", + "uptime", + "version", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "build", + "completion.size", + "cpu", + "disk.avail", + "disk.total", + "disk.used", + "disk.used_percent", + "fielddata.evictions", + "fielddata.memory_size", + "file_desc.current", + "file_desc.max", + "file_desc.percent", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "heap.current", + "heap.max", + "heap.percent", + "http_address", + "id", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "jdk", + "load_15m", + "load_1m", + "load_5m", + "mappings.total_count", + "mappings.total_estimated_overhead_in_bytes", + "master", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "name", + "node.role", + "pid", + "port", + "query_cache.evictions", + "query_cache.hit_count", + "query_cache.memory_size", + "query_cache.miss_count", + "ram.current", + "ram.max", + "ram.percent", + "refresh.time", + "refresh.total", + "request_cache.evictions", + "request_cache.hit_count", + "request_cache.memory_size", + "request_cache.miss_count", + "script.cache_evictions", + "script.compilations", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "shard_stats.total_count", + "suggest.current", + "suggest.time", + "suggest.total", + "uptime", + "version", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, @@ -1794,23 +1982,24 @@ async def nodes( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

    - ``_ + ``_ :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param full_id: If `true`, return the full node ID. If `false`, return the shortened node ID. - :param h: List of columns to appear in the response. Supports simple wildcards. + :param h: A comma-separated list of columns names to display. It supports simple + wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param include_unloaded_segments: If true, the response includes information from segments that are not loaded into memory. - :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. - :param time: Unit used to display time values. + :param master_timeout: The period to wait for a connection to the master node. + :param s: A comma-separated list of column names or aliases that determines the + sort order. Sorting defaults to ascending and can be changed by setting `:asc` + or `:desc` as a suffix to the column name. + :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} @@ -1881,7 +2070,7 @@ async def pending_tasks( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API.

    - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -1961,7 +2150,7 @@ async def plugins( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

    - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -2029,7 +2218,74 @@ async def recovery( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "bytes", + "bytes_percent", + "bytes_recovered", + "bytes_total", + "files", + "files_percent", + "files_recovered", + "files_total", + "index", + "repository", + "shard", + "snapshot", + "source_host", + "source_node", + "stage", + "start_time", + "start_time_millis", + "stop_time", + "stop_time_millis", + "target_host", + "target_node", + "time", + "translog_ops", + "translog_ops_percent", + "translog_ops_recovered", + "type", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "bytes", + "bytes_percent", + "bytes_recovered", + "bytes_total", + "files", + "files_percent", + "files_recovered", + "files_total", + "index", + "repository", + "shard", + "snapshot", + "source_host", + "source_node", + "stage", + "start_time", + "start_time_millis", + "stop_time", + "stop_time_millis", + "target_host", + "target_node", + "time", + "translog_ops", + "translog_ops_percent", + "translog_ops_recovered", + "type", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, @@ -2049,7 +2305,7 @@ async def recovery( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API.

    - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2060,13 +2316,14 @@ async def recovery( shard recoveries. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. - :param h: List of columns to appear in the response. Supports simple wildcards. + :param h: A comma-separated list of columns names to display. It supports simple + wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. - :param time: Unit used to display time values. + :param s: A comma-separated list of column names or aliases that determines the + sort order. Sorting defaults to ascending and can be changed by setting `:asc` + or `:desc` as a suffix to the column name. + :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2137,7 +2394,7 @@ async def repositories( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API.

    - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -2200,7 +2457,52 @@ async def segments( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "committed", + "compound", + "docs.count", + "docs.deleted", + "generation", + "id", + "index", + "ip", + "prirep", + "searchable", + "segment", + "shard", + "size", + "size.memory", + "version", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "committed", + "compound", + "docs.count", + "docs.deleted", + "generation", + "id", + "index", + "ip", + "prirep", + "searchable", + "segment", + "shard", + "size", + "size.memory", + "version", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, @@ -2218,7 +2520,7 @@ async def segments( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API.

    - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2226,7 +2528,8 @@ async def segments( :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. - :param h: List of columns to appear in the response. Supports simple wildcards. + :param h: A comma-separated list of columns names to display. It supports simple + wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param local: If `true`, the request computes the list of selected nodes from @@ -2234,9 +2537,9 @@ async def segments( from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. + :param s: A comma-separated list of column names or aliases that determines the + sort order. Sorting defaults to ascending and can be changed by setting `:asc` + or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2292,7 +2595,162 @@ async def shards( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "completion.size", + "dataset.size", + "dense_vector.value_count", + "docs", + "dsparse_vector.value_count", + "fielddata.evictions", + "fielddata.memory_size", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "id", + "index", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "node", + "prirep", + "query_cache.evictions", + "query_cache.memory_size", + "recoverysource.type", + "refresh.time", + "refresh.total", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "seq_no.global_checkpoint", + "seq_no.local_checkpoint", + "seq_no.max", + "shard", + "state", + "store", + "suggest.current", + "suggest.time", + "suggest.total", + "sync_id", + "unassigned.at", + "unassigned.details", + "unassigned.for", + "unassigned.reason", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "completion.size", + "dataset.size", + "dense_vector.value_count", + "docs", + "dsparse_vector.value_count", + "fielddata.evictions", + "fielddata.memory_size", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "id", + "index", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "node", + "prirep", + "query_cache.evictions", + "query_cache.memory_size", + "recoverysource.type", + "refresh.time", + "refresh.total", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "seq_no.global_checkpoint", + "seq_no.local_checkpoint", + "seq_no.max", + "shard", + "state", + "store", + "suggest.current", + "suggest.time", + "suggest.total", + "sync_id", + "unassigned.at", + "unassigned.details", + "unassigned.for", + "unassigned.reason", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, @@ -2312,7 +2770,7 @@ async def shards( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.

    - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2323,11 +2781,11 @@ async def shards( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. - :param time: Unit used to display time values. + :param master_timeout: The period to wait for a connection to the master node. + :param s: A comma-separated list of column names or aliases that determines the + sort order. Sorting defaults to ascending and can be changed by setting `:asc` + or `:desc` as a suffix to the column name. + :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2380,7 +2838,48 @@ async def snapshots( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "duration", + "end_epoch", + "end_time", + "failed_shards", + "id", + "indices", + "reason", + "repository", + "start_epoch", + "start_time", + "status", + "successful_shards", + "total_shards", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "duration", + "end_epoch", + "end_time", + "failed_shards", + "id", + "indices", + "reason", + "repository", + "start_epoch", + "start_time", + "status", + "successful_shards", + "total_shards", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, @@ -2401,14 +2900,15 @@ async def snapshots( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API.

    - ``_ + ``_ :param repository: A comma-separated list of snapshot repositories used to limit the request. Accepts wildcard expressions. `_all` returns all repositories. If any repository fails during the request, Elasticsearch returns an error. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. - :param h: List of columns to appear in the response. Supports simple wildcards. + :param h: A comma-separated list of columns names to display. It supports simple + wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param ignore_unavailable: If `true`, the response does not include information @@ -2494,7 +2994,7 @@ async def tasks( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API.

    - ``_ + ``_ :param actions: The task action names, which are used to limit the response. :param detailed: If `true`, the response includes detailed information about @@ -2588,7 +3088,7 @@ async def templates( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API.

    - ``_ + ``_ :param name: The name of the template to return. Accepts wildcard expressions. If omitted, all templates are returned. @@ -2655,7 +3155,62 @@ async def thread_pool( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "active", + "completed", + "core", + "ephemeral_id", + "host", + "ip", + "keep_alive", + "largest", + "max", + "name", + "node_id", + "node_name", + "pid", + "pool_size", + "port", + "queue", + "queue_size", + "rejected", + "size", + "type", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "active", + "completed", + "core", + "ephemeral_id", + "host", + "ip", + "keep_alive", + "largest", + "max", + "name", + "node_id", + "node_name", + "pid", + "pool_size", + "port", + "queue", + "queue_size", + "rejected", + "size", + "type", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, @@ -2676,7 +3231,7 @@ async def thread_pool( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

    - ``_ + ``_ :param thread_pool_patterns: A comma-separated list of thread pool names used to limit the request. Accepts wildcard expressions. @@ -2689,10 +3244,10 @@ async def thread_pool( the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. + :param master_timeout: The period to wait for a connection to the master node. + :param s: A comma-separated list of column names or aliases that determines the + sort order. Sorting defaults to ascending and can be changed by setting `:asc` + or `:desc` as a suffix to the column name. :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ @@ -2933,7 +3488,7 @@ async def transforms( application consumption, use the get transform statistics API.

    - ``_ + ``_ :param transform_id: A transform identifier or a wildcard expression. If you do not specify one of these options, the API returns information for all diff --git a/elasticsearch/_async/client/ccr.py b/elasticsearch/_async/client/ccr.py index cfa80673b..eeef7c7ab 100644 --- a/elasticsearch/_async/client/ccr.py +++ b/elasticsearch/_async/client/ccr.py @@ -43,7 +43,7 @@ async def delete_auto_follow_pattern(

    Delete a collection of cross-cluster replication auto-follow patterns.

    - ``_ + ``_ :param name: The auto-follow pattern collection to delete. :param master_timeout: The period to wait for a connection to the master node. @@ -130,7 +130,7 @@ async def follow( When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index.

    - ``_ + ``_ :param index: The name of the follower index. :param leader_index: The name of the index in the leader cluster to follow. @@ -259,7 +259,7 @@ async def follow_info( For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused.

    - ``_ + ``_ :param index: A comma-delimited list of follower index patterns. :param master_timeout: The period to wait for a connection to the master node. @@ -311,7 +311,7 @@ async def follow_stats( The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices.

    - ``_ + ``_ :param index: A comma-delimited list of index patterns. :param timeout: The period to wait for a response. If no response is received @@ -380,7 +380,7 @@ async def forget_follower( The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked.

    - ``_ + ``_ :param index: the name of the leader index for which specified follower retention leases should be removed @@ -445,7 +445,7 @@ async def get_auto_follow_pattern(

    Get cross-cluster replication auto-follow patterns.

    - ``_ + ``_ :param name: The auto-follow pattern collection that you want to retrieve. If you do not specify a name, the API returns information for all collections. @@ -505,7 +505,7 @@ async def pause_auto_follow_pattern( Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim.

    - ``_ + ``_ :param name: The name of the auto-follow pattern to pause. :param master_timeout: The period to wait for a connection to the master node. @@ -559,7 +559,7 @@ async def pause_follow( You can pause and resume a follower index to change the configuration of the following task.

    - ``_ + ``_ :param index: The name of the follower index. :param master_timeout: The period to wait for a connection to the master node. @@ -648,7 +648,7 @@ async def put_auto_follow_pattern( NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns.

    - ``_ + ``_ :param name: The name of the collection of auto-follow patterns. :param remote_cluster: The remote cluster containing the leader indices to match @@ -782,7 +782,7 @@ async def resume_auto_follow_pattern( Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim.

    - ``_ + ``_ :param name: The name of the auto-follow pattern to resume. :param master_timeout: The period to wait for a connection to the master node. @@ -860,7 +860,7 @@ async def resume_follow( When this API returns, the follower index will resume fetching operations from the leader index.

    - ``_ + ``_ :param index: The name of the follow index to resume following. :param master_timeout: Period to wait for a connection to the master node. @@ -951,7 +951,7 @@ async def stats(

    This API returns stats about auto-following and the same shard-level stats as the get follower stats API.

    - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request @@ -1009,7 +1009,7 @@ async def unfollow( - ``_ + ``_ :param index: The name of the follower index. :param master_timeout: The period to wait for a connection to the master node. diff --git a/elasticsearch/_async/client/cluster.py b/elasticsearch/_async/client/cluster.py index 7847bbdb6..8b393a265 100644 --- a/elasticsearch/_async/client/cluster.py +++ b/elasticsearch/_async/client/cluster.py @@ -54,7 +54,7 @@ async def allocation_explain( This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise.

    - ``_ + ``_ :param current_node: Specifies the node ID or the name of the node to only explain a shard that is currently located on the specified node. @@ -130,7 +130,7 @@ async def delete_component_template( Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.

    - ``_ + ``_ :param name: Comma-separated list or wildcard expression of component template names used to limit the request. @@ -239,7 +239,7 @@ async def exists_component_template( Returns information about whether a particular component template exists.

    - ``_ + ``_ :param name: Comma-separated list of component template names used to limit the request. Wildcard (*) expressions are supported. @@ -290,6 +290,7 @@ async def get_component_template( local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + settings_filter: t.Optional[t.Union[str, t.Sequence[str]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html @@ -298,7 +299,7 @@ async def get_component_template( Get information about component templates.

    - ``_ + ``_ :param name: Comma-separated list of component template names used to limit the request. Wildcard (`*`) expressions are supported. @@ -310,6 +311,8 @@ async def get_component_template( :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + :param settings_filter: Filter out results, for example to filter out sensitive + information. Supports wildcards or full settings keys """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: @@ -335,6 +338,8 @@ async def get_component_template( __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if settings_filter is not None: + __query["settings_filter"] = settings_filter __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", @@ -365,7 +370,7 @@ async def get_settings( By default, it returns only settings that have been explicitly defined.

    - ``_ + ``_ :param flat_settings: If `true`, returns settings in flat format. :param include_defaults: If `true`, returns default cluster settings from the @@ -441,7 +446,7 @@ async def health( wait_for_no_relocating_shards: t.Optional[bool] = None, wait_for_nodes: t.Optional[t.Union[int, str]] = None, wait_for_status: t.Optional[ - t.Union[str, t.Literal["green", "red", "yellow"]] + t.Union[str, t.Literal["green", "red", "unavailable", "unknown", "yellow"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -457,7 +462,7 @@ async def health( The cluster status is controlled by the worst index status.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target @@ -565,7 +570,7 @@ async def info( Returns basic information about the cluster.

    - ``_ + ``_ :param target: Limits the information returned to the specific target. Supports a comma-separated list, such as http,ingest. @@ -614,7 +619,7 @@ async def pending_tasks( However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API.

    - ``_ + ``_ :param local: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. @@ -731,6 +736,7 @@ async def put_component_template( *, name: str, template: t.Optional[t.Mapping[str, t.Any]] = None, + cause: t.Optional[str] = None, create: t.Optional[bool] = None, deprecated: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, @@ -761,7 +767,7 @@ async def put_component_template( To be applied, a component template must be included in an index template's composed_of list.

    - ``_ + ``_ :param name: Name of the component template to create. Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; @@ -774,6 +780,7 @@ async def put_component_template( update settings API. :param template: The template to be applied which includes mappings, settings, or aliases configuration. + :param cause: User defined reason for create the component template. :param create: If `true`, this request cannot replace or update existing component templates. :param deprecated: Marks this index template as deprecated. When creating or @@ -798,6 +805,8 @@ async def put_component_template( __path = f'/_component_template/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} + if cause is not None: + __query["cause"] = cause if create is not None: __query["create"] = create if error_trace is not None: @@ -866,13 +875,13 @@ async def put_settings( If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration.

    - ``_ + ``_ :param flat_settings: Return settings in flat format (default: false) :param master_timeout: Explicit operation timeout for connection to master node - :param persistent: + :param persistent: The settings that persist after the cluster restarts. :param timeout: Explicit operation timeout - :param transient: + :param transient: The settings that do not persist after the cluster restarts. """ __path_parts: t.Dict[str, str] = {} __path = "/_cluster/settings" @@ -928,11 +937,11 @@ async def remote_info( This API returns information that reflects current state on the local cluster. The connected field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it. Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster. - To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the resolve cluster endpoint.

    + To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the /_resolve/cluster endpoint.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_remote/info" @@ -989,7 +998,7 @@ async def reroute(

    Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the ?retry_failed URI query parameter, which will attempt a single retry round for these shards.

    - ``_ + ``_ :param commands: Defines the commands to perform. :param dry_run: If true, then the request simulates the operation. It will calculate @@ -1094,7 +1103,7 @@ async def state( Instead, obtain the information you require using other more stable cluster APIs.

    - ``_ + ``_ :param metric: Limit the information returned to the specified metrics :param index: A comma-separated list of index names; use `_all` or empty string @@ -1182,7 +1191,7 @@ async def stats( Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins).

    - ``_ + ``_ :param node_id: Comma-separated list of node filters used to limit returned information. Defaults to all nodes in the cluster. diff --git a/elasticsearch/_async/client/connector.py b/elasticsearch/_async/client/connector.py index 2f5080821..9f4b91c32 100644 --- a/elasticsearch/_async/client/connector.py +++ b/elasticsearch/_async/client/connector.py @@ -49,7 +49,7 @@ async def check_in(

    Update the last_seen field in the connector and set it to the current timestamp.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be checked in """ @@ -98,7 +98,7 @@ async def delete( These need to be removed manually.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be deleted :param delete_sync_jobs: A flag indicating if associated sync jobs should be @@ -147,7 +147,7 @@ async def get(

    Get the details about a connector.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector """ @@ -247,7 +247,7 @@ async def last_sync( This action is used for analytics and monitoring.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param last_access_control_sync_error: @@ -346,7 +346,7 @@ async def list(

    Get information about all connectors.

    - ``_ + ``_ :param connector_name: A comma-separated list of connector names to fetch connector documents for @@ -427,7 +427,7 @@ async def post( Self-managed connectors (Connector clients) are self-managed on your infrastructure.

    - ``_ + ``_ :param description: :param index_name: @@ -509,7 +509,7 @@ async def put(

    Create or update a connector.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. @@ -584,7 +584,7 @@ async def sync_job_cancel( The connector service is then responsible for setting the status of connector sync jobs to cancelled.

    - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job """ @@ -635,7 +635,7 @@ async def sync_job_check_in( This service runs automatically on Elastic Cloud for Elastic managed connectors.

    - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job to be checked in. @@ -695,7 +695,7 @@ async def sync_job_claim( This service runs automatically on Elastic Cloud for Elastic managed connectors.

    - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job. :param worker_hostname: The host name of the current system that will run the @@ -757,7 +757,7 @@ async def sync_job_delete( This is a destructive action that is not recoverable.

    - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job to be deleted @@ -811,7 +811,7 @@ async def sync_job_error( This service runs automatically on Elastic Cloud for Elastic managed connectors.

    - ``_ + ``_ :param connector_sync_job_id: The unique identifier for the connector sync job. :param error: The error for the connector sync job error field. @@ -865,7 +865,7 @@ async def sync_job_get(

    Get a connector sync job.

    - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job """ @@ -938,7 +938,7 @@ async def sync_job_list(

    Get information about all stored connector sync jobs listed by their creation date in ascending order.

    - ``_ + ``_ :param connector_id: A connector id to fetch connector sync jobs for :param from_: Starting offset (default: 0) @@ -1004,7 +1004,7 @@ async def sync_job_post(

    Create a connector sync job document in the internal index and initialize its counters and timestamps with default values.

    - ``_ + ``_ :param id: The id of the associated connector :param job_type: @@ -1080,7 +1080,7 @@ async def sync_job_update_stats( This service runs automatically on Elastic Cloud for Elastic managed connectors.

    - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job. :param deleted_document_count: The number of documents the sync job deleted. @@ -1163,7 +1163,7 @@ async def update_active_filtering(

    Activates the valid draft filtering for a connector.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated """ @@ -1216,7 +1216,7 @@ async def update_api_key_id( Self-managed connectors (connector clients) do not use this field.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param api_key_id: @@ -1275,7 +1275,7 @@ async def update_configuration(

    Update the configuration field in the connector document.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param configuration: @@ -1335,7 +1335,7 @@ async def update_error( Otherwise, if the error is reset to null, the connector status is updated to connected.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param error: @@ -1403,7 +1403,7 @@ async def update_features( This service runs automatically on Elastic Cloud for Elastic managed connectors.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated. :param features: @@ -1464,7 +1464,7 @@ async def update_filtering( The filtering property is used to configure sync rules (both basic and advanced) for a connector.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param advanced_snippet: @@ -1525,7 +1525,7 @@ async def update_filtering_validation(

    Update the draft filtering validation info for a connector.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param validation: @@ -1582,7 +1582,7 @@ async def update_index_name(

    Update the index_name field of a connector, specifying the index where the data ingested by the connector is stored.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param index_name: @@ -1639,7 +1639,7 @@ async def update_name(

    Update the connector name and description.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param description: @@ -1696,7 +1696,7 @@ async def update_native(

    Update the connector is_native flag.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param is_native: @@ -1753,7 +1753,7 @@ async def update_pipeline(

    When you create a new connector, the configuration of an ingest pipeline is populated with default settings.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param pipeline: @@ -1809,7 +1809,7 @@ async def update_scheduling(

    Update the connector scheduling.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param scheduling: @@ -1865,7 +1865,7 @@ async def update_service_type(

    Update the connector service type.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param service_type: @@ -1928,7 +1928,7 @@ async def update_status(

    Update the connector status.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param status: diff --git a/elasticsearch/_async/client/dangling_indices.py b/elasticsearch/_async/client/dangling_indices.py index d7b5d5597..b792b815b 100644 --- a/elasticsearch/_async/client/dangling_indices.py +++ b/elasticsearch/_async/client/dangling_indices.py @@ -46,7 +46,7 @@ async def delete_dangling_index( For example, this can happen if you delete more than cluster.indices.tombstones.size indices while an Elasticsearch node is offline.

    - ``_ + ``_ :param index_uuid: The UUID of the index to delete. Use the get dangling indices API to find the UUID. @@ -107,7 +107,7 @@ async def import_dangling_index( For example, this can happen if you delete more than cluster.indices.tombstones.size indices while an Elasticsearch node is offline.

    - ``_ + ``_ :param index_uuid: The UUID of the index to import. Use the get dangling indices API to locate the UUID. @@ -168,7 +168,7 @@ async def list_dangling_indices(

    Use this API to list dangling indices, which you can then import or delete.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_dangling" diff --git a/elasticsearch/_async/client/enrich.py b/elasticsearch/_async/client/enrich.py index 83161c9dd..e05204a04 100644 --- a/elasticsearch/_async/client/enrich.py +++ b/elasticsearch/_async/client/enrich.py @@ -43,7 +43,7 @@ async def delete_policy( Deletes an existing enrich policy and its enrich index.

    - ``_ + ``_ :param name: Enrich policy to delete. :param master_timeout: Period to wait for a connection to the master node. @@ -92,7 +92,7 @@ async def execute_policy( Create the enrich index for an existing enrich policy.

    - ``_ + ``_ :param name: Enrich policy to execute. :param master_timeout: Period to wait for a connection to the master node. @@ -144,7 +144,7 @@ async def get_policy( Returns information about an enrich policy.

    - ``_ + ``_ :param name: Comma-separated list of enrich policy names used to limit the request. To return information for all enrich policies, omit this parameter. @@ -202,7 +202,7 @@ async def put_policy( Creates an enrich policy.

    - ``_ + ``_ :param name: Name of the enrich policy to create or update. :param geo_match: Matches enrich data to incoming documents based on a `geo_shape` @@ -263,7 +263,7 @@ async def stats( Returns enrich coordinator statistics and information about enrich policies that are currently executing.

    - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. """ diff --git a/elasticsearch/_async/client/eql.py b/elasticsearch/_async/client/eql.py index 8985e91ec..a1725df33 100644 --- a/elasticsearch/_async/client/eql.py +++ b/elasticsearch/_async/client/eql.py @@ -93,7 +93,7 @@ async def get( Get the current status and available results for an async EQL search or a stored synchronous EQL search.

    - ``_ + ``_ :param id: Identifier for the search. :param keep_alive: Period for which the search and its results are stored on @@ -147,7 +147,7 @@ async def get_status( Get the current status for an async EQL search or a stored synchronous EQL search without returning results.

    - ``_ + ``_ :param id: Identifier for the search. """ @@ -204,6 +204,7 @@ async def search( allow_partial_search_results: t.Optional[bool] = None, allow_partial_sequence_results: t.Optional[bool] = None, case_sensitive: t.Optional[bool] = None, + ccs_minimize_roundtrips: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, event_category_field: t.Optional[str] = None, expand_wildcards: t.Optional[ @@ -246,11 +247,13 @@ async def search( EQL assumes each document in a data stream or index corresponds to an event.

    - ``_ + ``_ :param index: The name of the index to scope the operation :param query: EQL query you wish to run. - :param allow_no_indices: + :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves + into no concrete indices. (This includes `_all` string or when no indices + have been specified) :param allow_partial_search_results: Allow query execution also in case of shard failures. If true, the query will keep running and will return results based on the available shards. For sequences, the behavior can be further refined @@ -261,9 +264,12 @@ async def search( If false, the sequence query will return successfully, but will always have empty results. :param case_sensitive: + :param ccs_minimize_roundtrips: Indicates whether network round-trips should + be minimized as part of cross-cluster search requests execution :param event_category_field: Field containing the event classification, such as process, file, or network. - :param expand_wildcards: + :param expand_wildcards: Whether to expand wildcard expression to concrete indices + that are open, closed or both. :param fetch_size: Maximum number of events to search at a time for sequence queries. :param fields: Array of wildcard (*) patterns. The response returns values for @@ -298,6 +304,8 @@ async def search( __body: t.Dict[str, t.Any] = body if body is not None else {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices + if ccs_minimize_roundtrips is not None: + __query["ccs_minimize_roundtrips"] = ccs_minimize_roundtrips if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: diff --git a/elasticsearch/_async/client/esql.py b/elasticsearch/_async/client/esql.py index a1dc79cd1..bca1e4255 100644 --- a/elasticsearch/_async/client/esql.py +++ b/elasticsearch/_async/client/esql.py @@ -31,6 +31,8 @@ class EsqlClient(NamespacedClient): "columnar", "filter", "include_ccs_metadata", + "keep_alive", + "keep_on_completion", "locale", "params", "profile", @@ -84,13 +86,15 @@ async def async_query(

    The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties.

    - ``_ + ``_ :param query: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. :param allow_partial_results: If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other - clusters and shards. + clusters and shards. If `false`, the query will fail if there are any failures. + To override the default behavior, you can set the `esql.query.allow_partial_results` + cluster setting to `false`. :param columnar: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one @@ -153,10 +157,6 @@ async def async_query( __query["format"] = format if human is not None: __query["human"] = human - if keep_alive is not None: - __query["keep_alive"] = keep_alive - if keep_on_completion is not None: - __query["keep_on_completion"] = keep_on_completion if pretty is not None: __query["pretty"] = pretty if not __body: @@ -168,6 +168,10 @@ async def async_query( __body["filter"] = filter if include_ccs_metadata is not None: __body["include_ccs_metadata"] = include_ccs_metadata + if keep_alive is not None: + __body["keep_alive"] = keep_alive + if keep_on_completion is not None: + __body["keep_on_completion"] = keep_on_completion if locale is not None: __body["locale"] = locale if params is not None: @@ -212,7 +216,7 @@ async def async_query_delete( - ``_ + ``_ :param id: The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the @@ -250,6 +254,14 @@ async def async_query_get( drop_null_columns: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + format: t.Optional[ + t.Union[ + str, + t.Literal[ + "arrow", "cbor", "csv", "json", "smile", "tsv", "txt", "yaml" + ], + ] + ] = None, human: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, @@ -265,7 +277,7 @@ async def async_query_get( If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API.

    - ``_ + ``_ :param id: The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the @@ -275,6 +287,7 @@ async def async_query_get( will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. + :param format: A short version of the Accept header, for example `json` or `yaml`. :param keep_alive: The period for which the query and its results are stored in the cluster. When this period expires, the query and its results are deleted, even if the query is still ongoing. @@ -295,6 +308,8 @@ async def async_query_get( __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path + if format is not None: + __query["format"] = format if human is not None: __query["human"] = human if keep_alive is not None: @@ -332,7 +347,7 @@ async def async_query_stop( If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it.

    - ``_ + ``_ :param id: The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the @@ -420,13 +435,15 @@ async def query( Get search results for an ES|QL (Elasticsearch query language) query.

    - ``_ + ``_ :param query: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. :param allow_partial_results: If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other - clusters and shards. + clusters and shards. If `false`, the query will fail if there are any failures. + To override the default behavior, you can set the `esql.query.allow_partial_results` + cluster setting to `false`. :param columnar: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one diff --git a/elasticsearch/_async/client/features.py b/elasticsearch/_async/client/features.py index 8cd9d7b9e..4b2b787ae 100644 --- a/elasticsearch/_async/client/features.py +++ b/elasticsearch/_async/client/features.py @@ -48,7 +48,7 @@ async def get_features( In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node.

    - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. """ @@ -102,7 +102,7 @@ async def reset_features(

    IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes.

    - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. """ diff --git a/elasticsearch/_async/client/fleet.py b/elasticsearch/_async/client/fleet.py index ff8449659..672c4de81 100644 --- a/elasticsearch/_async/client/fleet.py +++ b/elasticsearch/_async/client/fleet.py @@ -53,7 +53,7 @@ async def global_checkpoints( This API is designed for internal use by the Fleet server project.

    - ``_ + ``_ :param index: A single index or index alias that resolves to a single index. :param checkpoints: A comma separated list of previous global checkpoints. When @@ -138,12 +138,12 @@ async def msearch( """ .. raw:: html -

    Executes several fleet searches with a single API request. - The API follows the same structure as the multi search API. However, similar to the fleet search API, it - supports the wait_for_checkpoints parameter.

    +

    Executes several fleet searches with a single API request.

    +

    The API follows the same structure as the multi search (_msearch) API. + However, similar to the fleet search API, it supports the wait_for_checkpoints parameter.

    - ``_ + ``_ :param searches: :param index: A single target to search. If the target is an index alias, it @@ -154,9 +154,9 @@ async def msearch( example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. :param allow_partial_search_results: If true, returns partial results if there - are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). - If false, returns an error with no partial results. Defaults to the configured - cluster setting `search.default_allow_partial_results` which is true by default. + are shard request timeouts or shard failures. If false, returns an error + with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` + which is true by default. :param ccs_minimize_roundtrips: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. :param expand_wildcards: Type of index that wildcard expressions can match. If @@ -392,7 +392,7 @@ async def search( after provided checkpoint has been processed and is visible for searches inside of Elasticsearch.

    - ``_ + ``_ :param index: A single target to search. If the target is an index alias, it must resolve to a single index. @@ -400,9 +400,9 @@ async def search( :param aggs: :param allow_no_indices: :param allow_partial_search_results: If true, returns partial results if there - are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). - If false, returns an error with no partial results. Defaults to the configured - cluster setting `search.default_allow_partial_results` which is true by default. + are shard request timeouts or shard failures. If false, returns an error + with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` + which is true by default. :param analyze_wildcard: :param analyzer: :param batched_reduce_size: diff --git a/elasticsearch/_async/client/graph.py b/elasticsearch/_async/client/graph.py index 5b86970b1..8b0933f19 100644 --- a/elasticsearch/_async/client/graph.py +++ b/elasticsearch/_async/client/graph.py @@ -55,7 +55,7 @@ async def explore( You can exclude vertices that have already been returned.

    - ``_ + ``_ :param index: Name of the index. :param connections: Specifies or more fields from which you want to extract terms diff --git a/elasticsearch/_async/client/ilm.py b/elasticsearch/_async/client/ilm.py index d483797d0..a5c047e78 100644 --- a/elasticsearch/_async/client/ilm.py +++ b/elasticsearch/_async/client/ilm.py @@ -44,7 +44,7 @@ async def delete_lifecycle( You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error.

    - ``_ + ``_ :param name: Identifier for the policy. :param master_timeout: Period to wait for a connection to the master node. If @@ -102,7 +102,7 @@ async def explain_lifecycle(

    The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to target. Supports wildcards (`*`). To target all data streams and indices, use `*` @@ -163,7 +163,7 @@ async def get_lifecycle(

    Get lifecycle policies.

    - ``_ + ``_ :param name: Identifier for the policy. :param master_timeout: Period to wait for a connection to the master node. If @@ -218,7 +218,7 @@ async def get_status(

    Get the current index lifecycle management status.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ilm/status" @@ -274,7 +274,7 @@ async def migrate_to_data_tiers( Use the stop ILM and get ILM status APIs to wait until the reported operation mode is STOPPED.

    - ``_ + ``_ :param dry_run: If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. This provides @@ -347,7 +347,7 @@ async def move_to_step( An index cannot move to a step that is not part of its policy.

    - ``_ + ``_ :param index: The name of the index whose lifecycle step is to change :param current_step: The step that the index is expected to be in. @@ -415,7 +415,7 @@ async def put_lifecycle(

    NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions.

    - ``_ + ``_ :param name: Identifier for the policy. :param master_timeout: Period to wait for a connection to the master node. If @@ -479,7 +479,7 @@ async def remove_policy( It also stops managing the indices.

    - ``_ + ``_ :param index: The name of the index to remove policy on """ @@ -525,7 +525,7 @@ async def retry( Use the explain lifecycle state API to determine whether an index is in the ERROR step.

    - ``_ + ``_ :param index: The name of the indices (comma-separated) whose failed lifecycle step is to be retry @@ -573,7 +573,7 @@ async def start( Restarting ILM is necessary only when it has been stopped using the stop ILM API.

    - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -627,7 +627,7 @@ async def stop( Use the get ILM status API to check whether ILM is running.

    - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and diff --git a/elasticsearch/_async/client/indices.py b/elasticsearch/_async/client/indices.py index 79cac1395..77621b695 100644 --- a/elasticsearch/_async/client/indices.py +++ b/elasticsearch/_async/client/indices.py @@ -62,7 +62,7 @@ async def add_block( Index blocks limit the operations allowed on an index by blocking specific operation types.

    - ``_ + ``_ :param index: A comma-separated list or wildcard expression of index names used to limit the request. By default, you must explicitly name the indices you @@ -173,7 +173,7 @@ async def analyze( The _analyze endpoint without a specified index will always use 10000 as its limit.

    - ``_ + ``_ :param index: Index used to derive the analyzer. If specified, the `analyzer` or field parameter overrides this value. If no index is specified or the @@ -265,7 +265,7 @@ async def cancel_migrate_reindex(

    Cancel a migration reindex attempt for a data stream or index.

    - ``_ + ``_ :param index: The index or data stream name """ @@ -327,7 +327,7 @@ async def clear_cache( To clear the cache only of specific fields, use the fields parameter.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -338,7 +338,7 @@ async def clear_cache( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param fielddata: If `true`, clears the fields cache. Use the `fields` parameter to clear the cache of specific fields only. :param fields: Comma-separated list of field names used to limit the `fielddata` @@ -449,7 +449,7 @@ async def clone(

    Because the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well.

    - ``_ + ``_ :param index: Name of the source index to clone. :param target: Name of the target index to create. @@ -553,7 +553,7 @@ async def close( Closing indices can be turned off with the cluster settings API by setting cluster.indices.close.enable to false.

    - ``_ + ``_ :param index: Comma-separated list or wildcard expression of index names used to limit the request. @@ -563,7 +563,7 @@ async def close( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: Period to wait for a connection to the master node. If @@ -654,7 +654,7 @@ async def create( Note that changing this setting will also affect the wait_for_active_shards value on all subsequent write operations.

    - ``_ + ``_ :param index: Name of the index you wish to create. :param aliases: Aliases for the index. @@ -731,7 +731,7 @@ async def create_data_stream(

    You must have a matching index template with data stream enabled.

    - ``_ + ``_ :param name: Name of the data stream, which must meet the following criteria: Lowercase only; Cannot include `\\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, @@ -794,7 +794,7 @@ async def create_from(

    Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values.

    - ``_ + ``_ :param source: The source index or data stream name :param dest: The destination index or data stream name @@ -861,7 +861,7 @@ async def data_streams_stats(

    Get statistics for one or more data streams.

    - ``_ + ``_ :param name: Comma-separated list of data streams used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams in a @@ -930,7 +930,7 @@ async def delete( You can then use the delete index API to delete the previous write index.

    - ``_ + ``_ :param index: Comma-separated list of indices to delete. You cannot specify index aliases. By default, this parameter does not support wildcards (`*`) or `_all`. @@ -942,7 +942,7 @@ async def delete( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: Period to wait for a connection to the master node. If @@ -1004,7 +1004,7 @@ async def delete_alias( Removes a data stream or index from an alias.

    - ``_ + ``_ :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). @@ -1072,7 +1072,7 @@ async def delete_data_lifecycle( Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle.

    - ``_ + ``_ :param name: A comma-separated list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams @@ -1136,7 +1136,7 @@ async def delete_data_stream( Deletes one or more data streams and their backing indices.

    - ``_ + ``_ :param name: Comma-separated list of data streams to delete. Wildcard (`*`) expressions are supported. @@ -1194,7 +1194,7 @@ async def delete_index_template( existing templates.

    - ``_ + ``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. @@ -1246,10 +1246,11 @@ async def delete_template( """ .. raw:: html -

    Delete a legacy index template.

    +

    Delete a legacy index template. + IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

    - ``_ + ``_ :param name: The name of the legacy index template to delete. Wildcard (`*`) expressions are supported. @@ -1321,7 +1322,7 @@ async def disk_usage( The stored size of the _id field is likely underestimated while the _source field is overestimated.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. It’s recommended to execute this API with a single @@ -1404,7 +1405,7 @@ async def downsample( The source index must be read only (index.blocks.write: true).

    - ``_ + ``_ :param index: Name of the time series index to downsample. :param target_index: Name of the index to create. @@ -1476,7 +1477,7 @@ async def exists( Check if one or more indices, index aliases, or data streams exist.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases. Supports wildcards (`*`). @@ -1486,7 +1487,7 @@ async def exists( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param flat_settings: If `true`, returns settings in flat format. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. @@ -1570,7 +1571,7 @@ async def exists_alias( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. :param local: If `true`, the request retrieves information from the local node @@ -1697,7 +1698,7 @@ async def exists_template(

    IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

    - ``_ + ``_ :param name: A comma-separated list of index template names used to limit the request. Wildcard (`*`) expressions are supported. @@ -1755,7 +1756,7 @@ async def explain_data_lifecycle( Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution.

    - ``_ + ``_ :param index: The name of the index to explain :param include_defaults: indicates if the API should return the default values @@ -1822,7 +1823,7 @@ async def field_usage_stats( A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times.

    - ``_ + ``_ :param index: Comma-separated list or wildcard expression of index names used to limit the request. @@ -1907,7 +1908,7 @@ async def flush( If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to flush. Supports wildcards (`*`). To flush all data streams and indices, omit this @@ -1918,7 +1919,7 @@ async def flush( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param force: If `true`, the request forces a flush even if there are no changes to commit to the index. :param ignore_unavailable: If `false`, the request returns an error if it targets @@ -2032,7 +2033,7 @@ async def forcemerge( - ``_ + ``_ :param index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices @@ -2130,7 +2131,7 @@ async def get( stream’s backing indices.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (*) are supported. @@ -2223,7 +2224,7 @@ async def get_alias( Retrieves information for one or more data stream or index aliases.

    - ``_ + ``_ :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, @@ -2236,7 +2237,7 @@ async def get_alias( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param local: If `true`, the request retrieves information from the local node @@ -2309,14 +2310,13 @@ async def get_data_lifecycle(

    Get the data stream lifecycle configuration of one or more data streams.

    - ``_ + ``_ :param name: Comma-separated list of data streams to limit the request. Supports wildcards (`*`). To target all data streams, omit this parameter or use `*` or `_all`. :param expand_wildcards: Type of data stream that wildcard patterns can match. - Supports comma-separated values, such as `open,hidden`. Valid values are: - `all`, `open`, `closed`, `hidden`, `none`. + Supports comma-separated values, such as `open,hidden`. :param include_defaults: If `true`, return all default settings in the response. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -2367,7 +2367,7 @@ async def get_data_lifecycle_stats( Get statistics about the data streams that are managed by a data stream lifecycle.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_lifecycle/stats" @@ -2418,7 +2418,7 @@ async def get_data_stream(

    Get information about one or more data streams.

    - ``_ + ``_ :param name: Comma-separated list of data stream names used to limit the request. Wildcard (`*`) expressions are supported. If omitted, all data streams are @@ -2499,7 +2499,7 @@ async def get_field_mapping(

    This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields.

    - ``_ + ``_ :param fields: Comma-separated list or wildcard expression of fields used to limit returned information. Supports wildcards (`*`). @@ -2512,7 +2512,7 @@ async def get_field_mapping( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param include_defaults: If `true`, return all default settings in the response. @@ -2580,7 +2580,7 @@ async def get_index_template( Get information about one or more index templates.

    - ``_ + ``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. @@ -2657,7 +2657,7 @@ async def get_mapping( For data streams, the API retrieves mappings for the stream’s backing indices.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2668,7 +2668,7 @@ async def get_mapping( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param local: If `true`, the request retrieves information from the local node @@ -2731,7 +2731,7 @@ async def get_migrate_reindex_status(

    Get the status of a migration reindex attempt for a data stream or index.

    - ``_ + ``_ :param index: The index or data stream name. """ @@ -2791,7 +2791,7 @@ async def get_settings( For data streams, it returns setting information for the stream's backing indices.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2878,12 +2878,12 @@ async def get_template( """ .. raw:: html -

    Get index templates. +

    Get legacy index templates. Get information about one or more index templates.

    IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

    - ``_ + ``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (`*`) expressions are supported. To return all index templates, @@ -2950,7 +2950,7 @@ async def migrate_reindex( The persistent task ID is returned immediately and the reindexing work is completed in that task.

    - ``_ + ``_ :param reindex: """ @@ -3146,7 +3146,7 @@ async def open(

    Because opening or closing an index allocates its shards, the wait_for_active_shards setting on index creation applies to the _open and _close index actions as well.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). By default, you must explicitly @@ -3160,7 +3160,7 @@ async def open( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: Period to wait for a connection to the master node. If @@ -3419,8 +3419,7 @@ async def put_data_lifecycle( for this data stream. A data stream lifecycle that's disabled (enabled: `false`) will have no effect on the data stream. :param expand_wildcards: Type of data stream that wildcard patterns can match. - Supports comma-separated values, such as `open,hidden`. Valid values are: - `all`, `hidden`, `open`, `closed`, `none`. + Supports comma-separated values, such as `open,hidden`. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -3716,7 +3715,7 @@ async def put_mapping( Instead, add an alias field to create an alternate field name.

    - ``_ + ``_ :param index: A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. @@ -3732,7 +3731,7 @@ async def put_mapping( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param field_names: Control whether field names are enabled for the index. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. @@ -3850,8 +3849,34 @@ async def put_settings( Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default.

    To revert a setting to the default value, use a null value. - The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. + The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation. To preserve existing settings from being updated, set the preserve_existing parameter to true.

    +

    There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example:

    +
    {
    +            "number_of_replicas": 1
    +          }
    +          
    +

    Or you can use an index setting object:

    +
    {
    +            "index": {
    +              "number_of_replicas": 1
    +            }
    +          }
    +          
    +

    Or you can use dot annotation:

    +
    {
    +            "index.number_of_replicas": 1
    +          }
    +          
    +

    Or you can embed any of the aforementioned options in a settings object. For example:

    +
    {
    +            "settings": {
    +              "index": {
    +                "number_of_replicas": 1
    +              }
    +            }
    +          }
    +          

    NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. @@ -3862,7 +3887,7 @@ async def put_settings( To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.

    - ``_ + ``_ :param settings: :param index: Comma-separated list of data streams, indices, and aliases used @@ -3971,7 +3996,7 @@ async def put_template( """ .. raw:: html -

    Create or update an index template. +

    Create or update a legacy index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name.

    IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

    @@ -3988,7 +4013,7 @@ async def put_template( NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order.

    - ``_ + ``_ :param name: The name of the template :param aliases: Aliases for the index. @@ -4060,10 +4085,20 @@ async def recovery( *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, active_only: t.Optional[bool] = None, + allow_no_indices: t.Optional[bool] = None, detailed: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, + expand_wildcards: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] + ], + t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], + ] + ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -4090,14 +4125,23 @@ async def recovery( This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param active_only: If `true`, the response only includes ongoing shard recoveries. + :param allow_no_indices: If `false`, the request returns an error if any wildcard + expression, index alias, or `_all` value targets only missing or closed indices. + This behavior applies even if the request targets other open indices. :param detailed: If `true`, the response includes detailed information about shard recoveries. + :param expand_wildcards: Type of index that wildcard patterns can match. If the + request can target data streams, this argument determines whether wildcard + expressions match hidden data streams. Supports comma-separated values, such + as `open,hidden`. + :param ignore_unavailable: If `false`, the request returns an error if it targets + a missing or closed index. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: @@ -4109,14 +4153,20 @@ async def recovery( __query: t.Dict[str, t.Any] = {} if active_only is not None: __query["active_only"] = active_only + if allow_no_indices is not None: + __query["allow_no_indices"] = allow_no_indices if detailed is not None: __query["detailed"] = detailed if error_trace is not None: __query["error_trace"] = error_trace + if expand_wildcards is not None: + __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if ignore_unavailable is not None: + __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -4164,7 +4214,7 @@ async def refresh( This option ensures the indexing operation waits for a periodic refresh before running the search.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -4175,7 +4225,7 @@ async def refresh( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. """ @@ -4248,7 +4298,7 @@ async def reload_search_analyzers( This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future.

    - ``_ + ``_ :param index: A comma-separated list of index names to reload analyzers for :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves @@ -4355,7 +4405,7 @@ async def resolve_cluster( If a connection was (re-)established, this will also cause the remote/info endpoint to now indicate a connected status.

    - ``_ + ``_ :param name: A comma-separated list of names or index patterns for the indices, aliases, and data streams to resolve. Resources on remote clusters can be @@ -4374,10 +4424,9 @@ async def resolve_cluster( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - NOTE: This option is only supported when specifying an index expression. - You will get an error if you specify index options to the `_resolve/cluster` - API endpoint that takes no index expression. + as `open,hidden`. NOTE: This option is only supported when specifying an + index expression. You will get an error if you specify index options to the + `_resolve/cluster` API endpoint that takes no index expression. :param ignore_throttled: If true, concrete, expanded, or aliased indices are ignored when frozen. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to @@ -4457,7 +4506,7 @@ async def resolve_index( Multiple patterns and remote clusters are supported.

    - ``_ + ``_ :param name: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified @@ -4470,7 +4519,7 @@ async def resolve_index( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. """ @@ -4559,7 +4608,7 @@ async def rollover( If you roll over the alias on May 7, 2099, the new index's name is my-index-2099.05.07-000002.

    - ``_ + ``_ :param alias: Name of the data stream or index alias to roll over. :param new_index: Name of the index to create. Supports date math. Data streams @@ -4674,7 +4723,7 @@ async def segments( For data streams, the API returns information about the stream's backing indices.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -4685,7 +4734,7 @@ async def segments( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param verbose: If `true`, the request returns a verbose response. @@ -4766,7 +4815,7 @@ async def shard_stores(

    By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards.

    - ``_ + ``_ :param index: List of data streams, indices, and aliases used to limit the request. :param allow_no_indices: If false, the request returns an error if any wildcard @@ -4868,7 +4917,7 @@ async def shrink( - ``_ + ``_ :param index: Name of the source index to shrink. :param target: Name of the target index to create. @@ -4949,7 +4998,7 @@ async def simulate_index_template( Get the index configuration that would be applied to the specified index from an existing index template.

    - ``_ + ``_ :param name: Name of the index to simulate :param cause: User defined reason for dry-run creating the new template for simulation @@ -5039,7 +5088,7 @@ async def simulate_template( Get the index configuration that would be applied by a particular index template.

    - ``_ + ``_ :param name: Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit this parameter and specify the template @@ -5211,7 +5260,7 @@ async def split( - ``_ + ``_ :param index: Name of the source index to split. :param target: Name of the target index to create. @@ -5313,7 +5362,7 @@ async def stats( Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed.

    - ``_ + ``_ :param index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices @@ -5420,7 +5469,7 @@ async def unfreeze( When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again.

    - ``_ + ``_ :param index: Identifier for the index. :param allow_no_indices: If `false`, the request returns an error if any wildcard @@ -5577,7 +5626,7 @@ async def validate_query( Validates a query without running it.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this @@ -5598,7 +5647,7 @@ async def validate_query( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param explain: If `true`, the response returns detailed information if an error has occurred. :param ignore_unavailable: If `false`, the request returns an error if it targets diff --git a/elasticsearch/_async/client/inference.py b/elasticsearch/_async/client/inference.py index ce939ccf9..0083677f4 100644 --- a/elasticsearch/_async/client/inference.py +++ b/elasticsearch/_async/client/inference.py @@ -47,7 +47,7 @@ async def completion(

    Perform completion inference on the service

    - ``_ + ``_ :param inference_id: The inference Id :param input: Inference input. Either a string or an array of strings. @@ -123,7 +123,7 @@ async def delete(

    Delete an inference endpoint

    - ``_ + ``_ :param inference_id: The inference identifier. :param task_type: The task type @@ -197,7 +197,7 @@ async def get(

    Get an inference endpoint

    - ``_ + ``_ :param task_type: The task type :param inference_id: The inference Id @@ -235,7 +235,7 @@ async def get( ) @_rewrite_parameters( - body_fields=("input", "query", "task_settings"), + body_fields=("input", "input_type", "query", "task_settings"), ) async def inference( self, @@ -257,6 +257,7 @@ async def inference( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + input_type: t.Optional[str] = None, pretty: t.Optional[bool] = None, query: t.Optional[str] = None, task_settings: t.Optional[t.Any] = None, @@ -277,13 +278,22 @@ async def inference( - ``_ + ``_ :param inference_id: The unique identifier for the inference endpoint. :param input: The text on which you want to perform the inference task. It can be a single string or an array. > info > Inference endpoints for the `completion` task type currently only support a single string as input. :param task_type: The type of inference task that the model performs. + :param input_type: Specifies the input data type for the text embedding model. + The `input_type` parameter only applies to Inference Endpoints with the `text_embedding` + task type. Possible values include: * `SEARCH` * `INGEST` * `CLASSIFICATION` + * `CLUSTERING` Not all services support all values. Unsupported values will + trigger a validation exception. Accepted values depend on the configured + inference service, refer to the relevant service-specific documentation for + more info. > info > The `input_type` parameter specified on the root level + of the request body will take precedence over the `input_type` parameter + specified in `task_settings`. :param query: The query input, which is required only for the `rerank` task. It is not required for other tasks. :param task_settings: Task settings for the individual inference request. These @@ -322,6 +332,8 @@ async def inference( if not __body: if input is not None: __body["input"] = input + if input_type is not None: + __body["input_type"] = input_type if query is not None: __body["query"] = query if task_settings is not None: @@ -366,26 +378,45 @@ async def put( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html -

    Create an inference endpoint. - When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    +

    Create an inference endpoint.

    IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

    +

    The following integrations are available through the inference API. You can find the available task types next to the integration name:

    +
      +
    • AlibabaCloud AI Search (completion, rerank, sparse_embedding, text_embedding)
    • +
    • Amazon Bedrock (completion, text_embedding)
    • +
    • Anthropic (completion)
    • +
    • Azure AI Studio (completion, text_embedding)
    • +
    • Azure OpenAI (completion, text_embedding)
    • +
    • Cohere (completion, rerank, text_embedding)
    • +
    • DeepSeek (completion, chat_completion)
    • +
    • Elasticsearch (rerank, sparse_embedding, text_embedding - this service is for built-in models and models uploaded through Eland)
    • +
    • ELSER (sparse_embedding)
    • +
    • Google AI Studio (completion, text_embedding)
    • +
    • Google Vertex AI (rerank, text_embedding)
    • +
    • Hugging Face (chat_completion, completion, rerank, text_embedding)
    • +
    • Mistral (chat_completion, completion, text_embedding)
    • +
    • OpenAI (chat_completion, completion, text_embedding)
    • +
    • VoyageAI (text_embedding, rerank)
    • +
    • Watsonx inference integration (text_embedding)
    • +
    • JinaAI (text_embedding, rerank)
    • +
    - ``_ + ``_ :param inference_id: The inference Id :param inference_config: - :param task_type: The task type + :param task_type: The task type. Refer to the integration list in the API description + for the available task types. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") @@ -416,6 +447,8 @@ async def put( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __body = inference_config if inference_config is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] @@ -451,6 +484,7 @@ async def put_alibabacloud( human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -458,14 +492,9 @@ async def put_alibabacloud(

    Create an AlibabaCloud AI Search inference endpoint.

    Create an inference endpoint to perform an inference task with the alibabacloud-ai-search service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. :param alibabacloud_inference_id: The unique identifier of the inference endpoint. @@ -476,6 +505,8 @@ async def put_alibabacloud( :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -502,6 +533,8 @@ async def put_alibabacloud( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -547,25 +580,21 @@ async def put_amazonbedrock( human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

    Create an Amazon Bedrock inference endpoint.

    -

    Creates an inference endpoint to perform an inference task with the amazonbedrock service.

    +

    Create an inference endpoint to perform an inference task with the amazonbedrock service.

    info You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. :param amazonbedrock_inference_id: The unique identifier of the inference endpoint. @@ -576,6 +605,8 @@ async def put_amazonbedrock( :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -602,6 +633,8 @@ async def put_amazonbedrock( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -647,6 +680,7 @@ async def put_anthropic( human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -654,14 +688,9 @@ async def put_anthropic(

    Create an Anthropic inference endpoint.

    Create an inference endpoint to perform an inference task with the anthropic service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The task type. The only valid task type for the model to perform is `completion`. @@ -673,6 +702,8 @@ async def put_anthropic( :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -699,6 +730,8 @@ async def put_anthropic( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -744,6 +777,7 @@ async def put_azureaistudio( human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -751,14 +785,9 @@ async def put_azureaistudio(

    Create an Azure AI studio inference endpoint.

    Create an inference endpoint to perform an inference task with the azureaistudio service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. :param azureaistudio_inference_id: The unique identifier of the inference endpoint. @@ -769,6 +798,8 @@ async def put_azureaistudio( :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -795,6 +826,8 @@ async def put_azureaistudio( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -840,6 +873,7 @@ async def put_azureopenai( human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -853,14 +887,9 @@ async def put_azureopenai(
  • GPT-3.5
  • The list of embeddings models that you can choose from in your deployment can be found in the Azure models documentation.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. NOTE: The `chat_completion` task type only supports streaming and only through @@ -873,6 +902,8 @@ async def put_azureopenai( :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -899,6 +930,8 @@ async def put_azureopenai( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -944,6 +977,7 @@ async def put_cohere( human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -951,14 +985,9 @@ async def put_cohere(

    Create a Cohere inference endpoint.

    Create an inference endpoint to perform an inference task with the cohere service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. :param cohere_inference_id: The unique identifier of the inference endpoint. @@ -969,6 +998,8 @@ async def put_cohere( :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -993,6 +1024,8 @@ async def put_cohere( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -1017,6 +1050,221 @@ async def put_cohere( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + async def put_custom( + self, + *, + task_type: t.Union[ + str, t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"] + ], + custom_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["custom"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

    Create a custom inference endpoint.

    +

    The custom service gives more control over how to interact with external inference services that aren't explicitly supported through dedicated integrations. + The custom service gives you the ability to define the headers, url, query parameters, request body, and secrets. + The custom service supports the template replacement functionality, which enables you to define a template that can be replaced with the value associated with that key. + Templates are portions of a string that start with ${ and end with }. + The parameters secret_parameters and task_settings are checked for keys for template replacement. Template replacement is supported in the request, headers, url, and query_parameters. + If the definition (key) is not found for a template, an error message is returned. + In case of an endpoint definition like the following:

    +
    PUT _inference/text_embedding/test-text-embedding
    +          {
    +            "service": "custom",
    +            "service_settings": {
    +               "secret_parameters": {
    +                    "api_key": "<some api key>"
    +               },
    +               "url": "...endpoints.huggingface.cloud/v1/embeddings",
    +               "headers": {
    +                   "Authorization": "Bearer ${api_key}",
    +                   "Content-Type": "application/json"
    +               },
    +               "request": "{\\"input\\": ${input}}",
    +               "response": {
    +                   "json_parser": {
    +                       "text_embeddings":"$.data[*].embedding[*]"
    +                   }
    +               }
    +            }
    +          }
    +          
    +

    To replace ${api_key} the secret_parameters and task_settings are checked for a key named api_key.

    +
    +

    info + Templates should not be surrounded by quotes.

    +
    +

    Pre-defined templates:

    +
      +
    • ${input} refers to the array of input strings that comes from the input field of the subsequent inference requests.
    • +
    • ${input_type} refers to the input type translation values.
    • +
    • ${query} refers to the query field used specifically for reranking tasks.
    • +
    • ${top_n} refers to the top_n field available when performing rerank requests.
    • +
    • ${return_documents} refers to the return_documents field available when performing rerank requests.
    • +
    + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param custom_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `custom`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `custom` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if custom_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'custom_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "custom_inference_id": _quote(custom_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["custom_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_custom", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings", "chunking_settings"), + ) + async def put_deepseek( + self, + *, + task_type: t.Union[str, t.Literal["chat_completion", "completion"]], + deepseek_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["deepseek"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

    Create a DeepSeek inference endpoint.

    +

    Create an inference endpoint to perform an inference task with the deepseek service.

    + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param deepseek_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `deepseek`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `deepseek` service. + :param chunking_settings: The chunking configuration object. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if deepseek_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'deepseek_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "deepseek_inference_id": _quote(deepseek_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["deepseek_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return await self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_deepseek", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=( "service", @@ -1040,6 +1288,7 @@ async def put_elasticsearch( human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -1062,7 +1311,7 @@ async def put_elasticsearch( Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. :param elasticsearch_inference_id: The unique identifier of the inference endpoint. @@ -1074,6 +1323,8 @@ async def put_elasticsearch( :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -1100,6 +1351,8 @@ async def put_elasticsearch( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -1139,6 +1392,7 @@ async def put_elser( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -1162,7 +1416,7 @@ async def put_elser( Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. :param elser_inference_id: The unique identifier of the inference endpoint. @@ -1171,6 +1425,8 @@ async def put_elser( :param service_settings: Settings used to install the inference model. These settings are specific to the `elser` service. :param chunking_settings: The chunking configuration object. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -1195,6 +1451,8 @@ async def put_elser( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -1232,6 +1490,7 @@ async def put_googleaistudio( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -1239,14 +1498,9 @@ async def put_googleaistudio(

    Create an Google AI Studio inference endpoint.

    Create an inference endpoint to perform an inference task with the googleaistudio service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. :param googleaistudio_inference_id: The unique identifier of the inference endpoint. @@ -1255,6 +1509,8 @@ async def put_googleaistudio( :param service_settings: Settings used to install the inference model. These settings are specific to the `googleaistudio` service. :param chunking_settings: The chunking configuration object. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -1281,6 +1537,8 @@ async def put_googleaistudio( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -1314,7 +1572,9 @@ async def put_googleaistudio( async def put_googlevertexai( self, *, - task_type: t.Union[str, t.Literal["rerank", "text_embedding"]], + task_type: t.Union[ + str, t.Literal["chat_completion", "completion", "rerank", "text_embedding"] + ], googlevertexai_inference_id: str, service: t.Optional[t.Union[str, t.Literal["googlevertexai"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, @@ -1324,6 +1584,7 @@ async def put_googlevertexai( human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -1331,14 +1592,9 @@ async def put_googlevertexai(

    Create a Google Vertex AI inference endpoint.

    Create an inference endpoint to perform an inference task with the googlevertexai service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. :param googlevertexai_inference_id: The unique identifier of the inference endpoint. @@ -1349,6 +1605,8 @@ async def put_googlevertexai( :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -1375,6 +1633,8 @@ async def put_googlevertexai( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -1400,12 +1660,19 @@ async def put_googlevertexai( ) @_rewrite_parameters( - body_fields=("service", "service_settings", "chunking_settings"), + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), ) async def put_hugging_face( self, *, - task_type: t.Union[str, t.Literal["text_embedding"]], + task_type: t.Union[ + str, t.Literal["chat_completion", "completion", "rerank", "text_embedding"] + ], huggingface_inference_id: str, service: t.Optional[t.Union[str, t.Literal["hugging_face"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, @@ -1414,17 +1681,22 @@ async def put_hugging_face( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

    Create a Hugging Face inference endpoint.

    -

    Create an inference endpoint to perform an inference task with the hugging_face service.

    -

    You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. - Select the model you want to use on the new endpoint creation page (for example intfloat/e5-small-v2), then select the sentence embeddings task under the advanced configuration section. - Create the endpoint and copy the URL after the endpoint initialization has been finished.

    -

    The following models are recommended for the Hugging Face service:

    +

    Create an inference endpoint to perform an inference task with the hugging_face service. + Supported tasks include: text_embedding, completion, and chat_completion.

    +

    To configure the endpoint, first visit the Hugging Face Inference Endpoints page and create a new endpoint. + Select a model that supports the task you intend to use.

    +

    For Elastic's text_embedding task: + The selected model must support the Sentence Embeddings task. On the new endpoint creation page, select the Sentence Embeddings task under the Advanced Configuration section. + After the endpoint has initialized, copy the generated endpoint URL. + Recommended models for text_embedding task:

    • all-MiniLM-L6-v2
    • all-MiniLM-L12-v2
    • @@ -1434,14 +1706,27 @@ async def put_hugging_face(
    • multilingual-e5-base
    • multilingual-e5-small
    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    +

    For Elastic's chat_completion and completion tasks: + The selected model must support the Text Generation task and expose OpenAI API. HuggingFace supports both serverless and dedicated endpoints for Text Generation. When creating dedicated endpoint select the Text Generation task. + After the endpoint is initialized (for dedicated) or ready (for serverless), ensure it supports the OpenAI API and includes /v1/chat/completions part in URL. Then, copy the full endpoint URL for use. + Recommended models for chat_completion and completion tasks:

    +
      +
    • Mistral-7B-Instruct-v0.2
    • +
    • QwQ-32B
    • +
    • Phi-3-mini-128k-instruct
    • +
    +

    For Elastic's rerank task: + The selected model must support the sentence-ranking task and expose OpenAI API. + HuggingFace supports only dedicated (not serverless) endpoints for Rerank so far. + After the endpoint is initialized, copy the full endpoint URL for use. + Tested models for rerank task:

    +
      +
    • bge-reranker-base
    • +
    • jina-reranker-v1-turbo-en-GGUF
    • +
    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. :param huggingface_inference_id: The unique identifier of the inference endpoint. @@ -1450,6 +1735,10 @@ async def put_hugging_face( :param service_settings: Settings used to install the inference model. These settings are specific to the `hugging_face` service. :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -1476,6 +1765,8 @@ async def put_hugging_face( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -1483,6 +1774,8 @@ async def put_hugging_face( __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} @@ -1519,6 +1812,7 @@ async def put_jinaai( human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -1528,14 +1822,9 @@ async def put_jinaai(

    Create an inference endpoint to perform an inference task with the jinaai service.

    To review the available rerank models, refer to https://jina.ai/reranker. To review the available text_embedding models, refer to the https://jina.ai/embeddings/.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. :param jinaai_inference_id: The unique identifier of the inference endpoint. @@ -1546,6 +1835,8 @@ async def put_jinaai( :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -1570,6 +1861,8 @@ async def put_jinaai( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -1600,7 +1893,9 @@ async def put_jinaai( async def put_mistral( self, *, - task_type: t.Union[str, t.Literal["text_embedding"]], + task_type: t.Union[ + str, t.Literal["chat_completion", "completion", "text_embedding"] + ], mistral_inference_id: str, service: t.Optional[t.Union[str, t.Literal["mistral"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, @@ -1609,30 +1904,27 @@ async def put_mistral( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

    Create a Mistral inference endpoint.

    -

    Creates an inference endpoint to perform an inference task with the mistral service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    +

    Create an inference endpoint to perform an inference task with the mistral service.

    - ``_ + ``_ - :param task_type: The task type. The only valid task type for the model to perform - is `text_embedding`. + :param task_type: The type of the inference task that the model will perform. :param mistral_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `mistral`. :param service_settings: Settings used to install the inference model. These settings are specific to the `mistral` service. :param chunking_settings: The chunking configuration object. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -1657,6 +1949,8 @@ async def put_mistral( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -1702,6 +1996,7 @@ async def put_openai( human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -1709,14 +2004,9 @@ async def put_openai(

    Create an OpenAI inference endpoint.

    Create an inference endpoint to perform an inference task with the openai service or openai compatible APIs.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. NOTE: The `chat_completion` task type only supports streaming and only through @@ -1729,6 +2019,8 @@ async def put_openai( :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -1753,6 +2045,8 @@ async def put_openai( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -1798,6 +2092,7 @@ async def put_voyageai( human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -1808,7 +2103,7 @@ async def put_voyageai(

    Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. :param voyageai_inference_id: The unique identifier of the inference endpoint. @@ -1819,6 +2114,8 @@ async def put_voyageai( :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -1843,6 +2140,8 @@ async def put_voyageai( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -1881,6 +2180,7 @@ async def put_watsonx( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -1890,14 +2190,9 @@ async def put_watsonx(

    Create an inference endpoint to perform an inference task with the watsonxai service. You need an IBM Cloud Databases for Elasticsearch deployment to use the watsonxai inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The task type. The only valid task type for the model to perform is `text_embedding`. @@ -1906,6 +2201,8 @@ async def put_watsonx( this case, `watsonxai`. :param service_settings: Settings used to install the inference model. These settings are specific to the `watsonxai` service. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -1930,6 +2227,8 @@ async def put_watsonx( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -1970,10 +2269,10 @@ async def rerank( """ .. raw:: html -

    Perform rereanking inference on the service

    +

    Perform reranking inference on the service

    - ``_ + ``_ :param inference_id: The unique identifier for the inference endpoint. :param input: The text on which you want to perform the inference task. It can @@ -2049,7 +2348,7 @@ async def sparse_embedding(

    Perform sparse embedding inference on the service

    - ``_ + ``_ :param inference_id: The inference Id :param input: Inference input. Either a string or an array of strings. @@ -2117,7 +2416,7 @@ async def text_embedding(

    Perform text embedding inference on the service

    - ``_ + ``_ :param inference_id: The inference Id :param input: Inference input. Either a string or an array of strings. @@ -2199,7 +2498,7 @@ async def update( However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

    - ``_ + ``_ :param inference_id: The unique identifier of the inference endpoint. :param inference_config: diff --git a/elasticsearch/_async/client/ingest.py b/elasticsearch/_async/client/ingest.py index 69792566c..e977c81a5 100644 --- a/elasticsearch/_async/client/ingest.py +++ b/elasticsearch/_async/client/ingest.py @@ -98,7 +98,7 @@ async def delete_ip_location_database(

    Delete IP geolocation database configurations.

    - ``_ + ``_ :param id: A comma-separated list of IP location database configurations. :param master_timeout: The period to wait for a connection to the master node. @@ -155,7 +155,7 @@ async def delete_pipeline( Delete one or more ingest pipelines.

    - ``_ + ``_ :param id: Pipeline ID or wildcard expression of pipeline IDs used to limit the request. To delete all ingest pipelines in a cluster, use a value of `*`. @@ -208,7 +208,7 @@ async def geo_ip_stats( Get download statistics for GeoIP2 databases that are used with the GeoIP processor.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ingest/geoip/stats" @@ -288,7 +288,6 @@ async def get_ip_location_database( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -297,15 +296,11 @@ async def get_ip_location_database(

    Get IP geolocation database configurations.

    - ``_ + ``_ :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. - :param master_timeout: The period to wait for a connection to the master node. - If no response is received before the timeout expires, the request fails - and returns an error. A value of `-1` indicates that the request should never - time out. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: @@ -321,8 +316,6 @@ async def get_ip_location_database( __query["filter_path"] = filter_path if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -355,7 +348,7 @@ async def get_pipeline( This API returns a local reference of the pipeline.

    - ``_ + ``_ :param id: Comma-separated list of pipeline IDs to retrieve. Wildcard (`*`) expressions are supported. To get all ingest pipelines, omit this parameter or use `*`. @@ -412,7 +405,7 @@ async def processor_grok( A grok pattern is like a regular expression that supports aliased expressions that can be reused.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ingest/processor/grok" @@ -534,7 +527,7 @@ async def put_ip_location_database(

    Create or update an IP geolocation database configuration.

    - ``_ + ``_ :param id: The database configuration identifier. :param configuration: @@ -620,7 +613,7 @@ async def put_pipeline( Changes made using this API take effect immediately.

    - ``_ + ``_ :param id: ID of the ingest pipeline to create or update. :param deprecated: Marks this ingest pipeline as deprecated. When a deprecated @@ -717,7 +710,7 @@ async def simulate( You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.

    - ``_ + ``_ :param docs: Sample documents to test in the pipeline. :param id: The pipeline to test. If you don't specify a `pipeline` in the request diff --git a/elasticsearch/_async/client/license.py b/elasticsearch/_async/client/license.py index 84c91196a..0cbdceb60 100644 --- a/elasticsearch/_async/client/license.py +++ b/elasticsearch/_async/client/license.py @@ -44,7 +44,7 @@ async def delete(

    If the operator privileges feature is enabled, only operator users can use this API.

    - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. :param timeout: The period to wait for a response. If no response is received @@ -98,7 +98,7 @@ async def get( - ``_ + ``_ :param accept_enterprise: If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum @@ -147,7 +147,7 @@ async def get_basic_status(

    Get the basic license status.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_license/basic_status" @@ -185,7 +185,7 @@ async def get_trial_status(

    Get the trial status.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_license/trial_status" @@ -308,7 +308,7 @@ async def post_start_basic(

    To check the status of your basic license, use the get basic license API.

    - ``_ + ``_ :param acknowledge: whether the user has acknowledged acknowledge messages (default: false) @@ -353,7 +353,7 @@ async def post_start_trial( human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, - type_query_string: t.Optional[str] = None, + type: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html @@ -365,12 +365,12 @@ async def post_start_trial(

    To check the status of your trial, use the get trial status API.

    - ``_ + ``_ :param acknowledge: whether the user has acknowledged acknowledge messages (default: false) :param master_timeout: Period to wait for a connection to the master node. - :param type_query_string: + :param type: The type of trial license to generate (default: "trial") """ __path_parts: t.Dict[str, str] = {} __path = "/_license/start_trial" @@ -387,8 +387,8 @@ async def post_start_trial( __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty - if type_query_string is not None: - __query["type_query_string"] = type_query_string + if type is not None: + __query["type"] = type __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", diff --git a/elasticsearch/_async/client/logstash.py b/elasticsearch/_async/client/logstash.py index 9e8e6c639..3e1a8c305 100644 --- a/elasticsearch/_async/client/logstash.py +++ b/elasticsearch/_async/client/logstash.py @@ -43,7 +43,7 @@ async def delete_pipeline( If the request succeeds, you receive an empty response with an appropriate status code.

    - ``_ + ``_ :param id: An identifier for the pipeline. """ @@ -87,7 +87,7 @@ async def get_pipeline( Get pipelines that are used for Logstash Central Management.

    - ``_ + ``_ :param id: A comma-separated list of pipeline identifiers. """ @@ -139,7 +139,7 @@ async def put_pipeline( If the specified pipeline exists, it is replaced.

    - ``_ + ``_ :param id: An identifier for the pipeline. :param pipeline: diff --git a/elasticsearch/_async/client/migration.py b/elasticsearch/_async/client/migration.py index 6cbc5283e..62c476b26 100644 --- a/elasticsearch/_async/client/migration.py +++ b/elasticsearch/_async/client/migration.py @@ -44,7 +44,7 @@ async def deprecations( You are strongly recommended to use the Upgrade Assistant.

    - ``_ + ``_ :param index: Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. @@ -94,7 +94,7 @@ async def get_feature_upgrade_status( You are strongly recommended to use the Upgrade Assistant.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_migration/system_features" @@ -136,7 +136,7 @@ async def post_feature_upgrade(

    TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_migration/system_features" diff --git a/elasticsearch/_async/client/ml.py b/elasticsearch/_async/client/ml.py index c97cbda3a..af9a4b946 100644 --- a/elasticsearch/_async/client/ml.py +++ b/elasticsearch/_async/client/ml.py @@ -45,7 +45,7 @@ async def clear_trained_model_deployment_cache( Calling this API clears the caches without restarting the deployment.

    - ``_ + ``_ :param model_id: The unique identifier of the trained model. """ @@ -100,7 +100,7 @@ async def close_job( When a datafeed that has a specified end date stops, it automatically closes its associated job.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection @@ -165,7 +165,7 @@ async def delete_calendar(

    Remove all scheduled events from a calendar, then delete it.

    - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. """ @@ -209,7 +209,7 @@ async def delete_calendar_event(

    Delete events from a calendar.

    - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param event_id: Identifier for the scheduled event. You can obtain this identifier @@ -260,7 +260,7 @@ async def delete_calendar_job(

    Delete anomaly jobs from a calendar.

    - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param job_id: An identifier for the anomaly detection jobs. It can be a job @@ -312,7 +312,7 @@ async def delete_data_frame_analytics(

    Delete a data frame analytics job.

    - ``_ + ``_ :param id: Identifier for the data frame analytics job. :param force: If `true`, it deletes a job that is not stopped; this method is @@ -363,7 +363,7 @@ async def delete_datafeed(

    Delete a datafeed.

    - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -426,7 +426,7 @@ async def delete_expired_data( <job_id>.

    - ``_ + ``_ :param job_id: Identifier for an anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. @@ -490,7 +490,7 @@ async def delete_filter( filter. You must update or delete the job before you can delete the filter.

    - ``_ + ``_ :param filter_id: A string that uniquely identifies a filter. """ @@ -540,7 +540,7 @@ async def delete_forecast( forecasts before they expire.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param forecast_id: A comma-separated list of forecast identifiers. If you do @@ -616,7 +616,7 @@ async def delete_job( delete job request.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param delete_user_annotations: Specifies whether annotations that have been @@ -676,7 +676,7 @@ async def delete_model_snapshot( the model_snapshot_id in the results from the get jobs API.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: Identifier for the model snapshot. @@ -728,7 +728,7 @@ async def delete_trained_model(

    The request deletes a trained inference model that is not referenced by an ingest pipeline.

    - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param force: Forcefully deletes a trained model that is referenced by ingest @@ -783,7 +783,7 @@ async def delete_trained_model_alias( by the model_id, this API returns an error.

    - ``_ + ``_ :param model_id: The trained model ID to which the model alias refers. :param model_alias: The model alias to delete. @@ -844,7 +844,7 @@ async def estimate_model_memory( estimates for the fields it references.

    - ``_ + ``_ :param analysis_config: For a list of the properties that you can specify in the `analysis_config` component of the body of this API. @@ -916,7 +916,7 @@ async def evaluate_data_frame( field and an analytics result field to be present.

    - ``_ + ``_ :param evaluation: Defines the type of evaluation you want to perform. :param index: Defines the `index` in which the evaluation will be performed. @@ -1001,7 +1001,7 @@ async def explain_data_frame_analytics( - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -1112,7 +1112,7 @@ async def flush_job( analyzing further data.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param advance_time: Refer to the description for the `advance_time` query parameter. @@ -1187,7 +1187,7 @@ async def forecast( based on historical data.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. The job must be open when you create a forecast; otherwise, an error occurs. @@ -1273,7 +1273,7 @@ async def get_buckets( The API presents a chronological view of the records, grouped by bucket.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param timestamp: The timestamp of a single bucket result. If you do not specify @@ -1371,7 +1371,7 @@ async def get_calendar_events(

    Get info about events in calendars.

    - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids @@ -1440,7 +1440,7 @@ async def get_calendars(

    Get calendar configuration info.

    - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids @@ -1516,7 +1516,7 @@ async def get_categories(

    Get anomaly detection job results for categories.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param category_id: Identifier for the category, which is unique in the job. @@ -1604,7 +1604,7 @@ async def get_data_frame_analytics( wildcard expression.

    - ``_ + ``_ :param id: Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame @@ -1679,7 +1679,7 @@ async def get_data_frame_analytics_stats(

    Get data frame analytics job stats.

    - ``_ + ``_ :param id: Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame @@ -1753,7 +1753,7 @@ async def get_datafeed_stats( This API returns a maximum of 10,000 datafeeds.

    - ``_ + ``_ :param datafeed_id: Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the @@ -1817,7 +1817,7 @@ async def get_datafeeds( This API returns a maximum of 10,000 datafeeds.

    - ``_ + ``_ :param datafeed_id: Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the @@ -1884,7 +1884,7 @@ async def get_filters( You can get a single filter or all filters.

    - ``_ + ``_ :param filter_id: A string that uniquely identifies a filter. :param from_: Skips the specified number of filters. @@ -1952,7 +1952,7 @@ async def get_influencers( influencer_field_name is specified in the job configuration.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param desc: If true, the results are sorted in descending order. @@ -2036,7 +2036,7 @@ async def get_job_stats(

    Get anomaly detection job stats.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. If @@ -2100,7 +2100,7 @@ async def get_jobs( _all, by specifying * as the <job_id>, or by omitting the <job_id>.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. If you do not specify one of these @@ -2166,7 +2166,7 @@ async def get_memory_stats( on each node, both within the JVM heap, and natively, outside of the JVM.

    - ``_ + ``_ :param node_id: The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or `ml:true` @@ -2224,7 +2224,7 @@ async def get_model_snapshot_upgrade_stats(

    Get anomaly detection job model snapshot upgrade usage info.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: A numerical character string that uniquely identifies the @@ -2298,7 +2298,7 @@ async def get_model_snapshots(

    Get model snapshots info.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: A numerical character string that uniquely identifies the @@ -2418,7 +2418,7 @@ async def get_overall_buckets( jobs' largest bucket span.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, a comma-separated list of jobs or groups, or a wildcard expression. @@ -2528,7 +2528,7 @@ async def get_records( number of detectors.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param desc: Refer to the description for the `desc` query parameter. @@ -2627,7 +2627,7 @@ async def get_trained_models(

    Get trained model configuration info.

    - ``_ + ``_ :param model_id: The unique identifier of the trained model or a model alias. You can get information for multiple trained models in a single API request @@ -2718,7 +2718,7 @@ async def get_trained_models_stats( models in a single API request by using a comma-separated list of model IDs or a wildcard expression.

    - ``_ + ``_ :param model_id: The unique identifier of the trained model or a model alias. It can be a comma-separated list or a wildcard expression. @@ -2784,7 +2784,7 @@ async def infer_trained_model(

    Evaluate a trained model.

    - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param docs: An array of objects to pass to the model for inference. The objects @@ -2851,7 +2851,7 @@ async def info( cluster configuration.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ml/info" @@ -2900,7 +2900,7 @@ async def open_job( new data is received.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param timeout: Refer to the description for the `timeout` query parameter. @@ -2957,7 +2957,7 @@ async def post_calendar_events(

    Add scheduled events to the calendar.

    - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param events: A list of one of more scheduled events. The event’s start and @@ -3018,7 +3018,7 @@ async def post_data( It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. @@ -3085,7 +3085,7 @@ async def preview_data_frame_analytics( Preview the extracted features used by a data frame analytics config.

    - ``_ + ``_ :param id: Identifier for the data frame analytics job. :param config: A data frame analytics config as described in create data frame @@ -3158,7 +3158,7 @@ async def preview_datafeed( You can also use secondary authorization headers to supply the credentials.

    - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -3237,7 +3237,7 @@ async def put_calendar(

    Create a calendar.

    - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param description: A description of the calendar. @@ -3294,7 +3294,7 @@ async def put_calendar_job(

    Add anomaly detection job to calendar.

    - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param job_id: An identifier for the anomaly detection jobs. It can be a job @@ -3377,7 +3377,7 @@ async def put_data_frame_analytics(

    If you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters.

    - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -3562,7 +3562,7 @@ async def put_datafeed( directly to the .ml-config index. Do not give users write privileges on the .ml-config index.

    - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -3724,7 +3724,7 @@ async def put_filter( Specifically, filters are referenced in the custom_rules property of detector configuration objects.

    - ``_ + ``_ :param filter_id: A string that uniquely identifies a filter. :param description: A description of the filter. @@ -3826,7 +3826,7 @@ async def put_job( If you include a datafeed_config but do not provide a query, the datafeed uses {"match_all": {"boost": 1}}.

    - ``_ + ``_ :param job_id: The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and @@ -3876,13 +3876,7 @@ async def put_job( :param description: A description of the job. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard - expressions match hidden data streams. Supports comma-separated values. Valid - values are: * `all`: Match any data stream or index, including hidden ones. - * `closed`: Match closed, non-hidden indices. Also matches any non-hidden - data stream. Data streams cannot be closed. * `hidden`: Match hidden data - streams and hidden indices. Must be combined with `open`, `closed`, or both. - * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden - indices. Also matches any non-hidden data stream. + expressions match hidden data streams. Supports comma-separated values. :param groups: A list of job groups. A job can belong to no groups or many. :param ignore_throttled: If `true`, concrete, expanded or aliased indices are ignored when frozen. @@ -4034,7 +4028,7 @@ async def put_trained_model( Enable you to supply a trained model that is not created by data frame analytics.

    - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param compressed_definition: The compressed (GZipped and Base64 encoded) inference @@ -4155,7 +4149,7 @@ async def put_trained_model_alias( returns a warning.

    - ``_ + ``_ :param model_id: The identifier for the trained model that the alias refers to. :param model_alias: The alias to create or update. This value cannot end in numbers. @@ -4216,7 +4210,7 @@ async def put_trained_model_definition_part(

    Create part of a trained model definition.

    - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param part: The definition part number. When the definition is loaded for inference @@ -4298,7 +4292,7 @@ async def put_trained_model_vocabulary( The vocabulary is stored in the index as described in inference_config.*.vocabulary of the trained model definition.

    - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param vocabulary: The model vocabulary, which must not be empty. @@ -4361,7 +4355,7 @@ async def reset_job( comma separated list.

    - ``_ + ``_ :param job_id: The ID of the job to reset. :param delete_user_annotations: Specifies whether annotations that have been @@ -4425,7 +4419,7 @@ async def revert_model_snapshot( snapshot after Black Friday or a critical system failure.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: You can specify `empty` as the . Reverting to @@ -4500,7 +4494,7 @@ async def set_upgrade_mode( machine learning info API.

    - ``_ + ``_ :param enabled: When `true`, it enables `upgrade_mode` which temporarily halts all job and datafeed tasks and prohibits new job and datafeed tasks from @@ -4560,7 +4554,7 @@ async def start_data_frame_analytics( the destination index in advance with custom settings and mappings.

    - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -4623,7 +4617,7 @@ async def start_datafeed( authorization headers when you created or updated the datafeed, those credentials are used instead.

    - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -4700,7 +4694,7 @@ async def start_trained_model_deployment( It allocates the model to every machine learning node.

    - ``_ + ``_ :param model_id: The unique identifier of the trained model. Currently, only PyTorch models are supported. @@ -4801,7 +4795,7 @@ async def stop_data_frame_analytics( throughout its lifecycle.

    - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -4871,7 +4865,7 @@ async def stop_datafeed( multiple times throughout its lifecycle.

    - ``_ + ``_ :param datafeed_id: Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a @@ -4936,7 +4930,7 @@ async def stop_trained_model_deployment(

    Stop a trained model deployment.

    - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param allow_no_match: Specifies what to do when the request: contains wildcard @@ -5119,7 +5113,7 @@ async def update_datafeed( those credentials are used instead.

    - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -5145,13 +5139,7 @@ async def update_datafeed( check runs only on real-time datafeeds. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard - expressions match hidden data streams. Supports comma-separated values. Valid - values are: * `all`: Match any data stream or index, including hidden ones. - * `closed`: Match closed, non-hidden indices. Also matches any non-hidden - data stream. Data streams cannot be closed. * `hidden`: Match hidden data - streams and hidden indices. Must be combined with `open`, `closed`, or both. - * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden - indices. Also matches any non-hidden data stream. + expressions match hidden data streams. Supports comma-separated values. :param frequency: The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the @@ -5286,7 +5274,7 @@ async def update_filter( Updates the description of a filter, adds items, or removes items from the list.

    - ``_ + ``_ :param filter_id: A string that uniquely identifies a filter. :param add_items: The items to add to the filter. @@ -5380,7 +5368,7 @@ async def update_job( Updates certain properties of an anomaly detection job.

    - ``_ + ``_ :param job_id: Identifier for the job. :param allow_lazy_open: Advanced configuration option. Specifies whether this @@ -5512,7 +5500,7 @@ async def update_model_snapshot( Updates certain properties of a snapshot.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: Identifier for the model snapshot. @@ -5654,7 +5642,7 @@ async def upgrade_job_snapshot( job.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: A numerical character string that uniquely identifies the @@ -5733,7 +5721,7 @@ async def validate(

    Validate an anomaly detection job.

    - ``_ + ``_ :param analysis_config: :param analysis_limits: diff --git a/elasticsearch/_async/client/nodes.py b/elasticsearch/_async/client/nodes.py index ff8c7b321..9714e5402 100644 --- a/elasticsearch/_async/client/nodes.py +++ b/elasticsearch/_async/client/nodes.py @@ -50,7 +50,7 @@ async def clear_repositories_metering_archive( Clear the archived repositories metering information in the cluster.

    - ``_ + ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. @@ -105,10 +105,11 @@ async def get_repositories_metering_info( Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts.

    - ``_ + ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned - information. All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). + information. For more information about the nodes selective options, refer + to the node specification documentation. """ if node_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'node_id'") @@ -162,7 +163,7 @@ async def hot_threads( The output is plain text with a breakdown of the top hot threads for each node.

    - ``_ + ``_ :param node_id: List of node IDs or names used to limit returned information. :param ignore_idle_threads: If true, known idle threads (e.g. waiting in a socket @@ -235,7 +236,7 @@ async def info(

    By default, the API returns all attributes and core settings for cluster nodes.

    - ``_ + ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. @@ -308,7 +309,7 @@ async def reload_secure_settings( Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password.

    - ``_ + ``_ :param node_id: The names of particular nodes in the cluster to target. :param secure_settings_password: The password for the Elasticsearch keystore. @@ -383,7 +384,7 @@ async def stats( By default, all stats are returned. You can limit the returned information by using metrics.

    - ``_ + ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. @@ -498,7 +499,7 @@ async def usage(

    Get feature usage information.

    - ``_ + ``_ :param node_id: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting diff --git a/elasticsearch/_async/client/query_rules.py b/elasticsearch/_async/client/query_rules.py index 9e7f38b4b..4ca766d7d 100644 --- a/elasticsearch/_async/client/query_rules.py +++ b/elasticsearch/_async/client/query_rules.py @@ -44,7 +44,7 @@ async def delete_rule( This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API.

    - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset containing the rule to delete @@ -97,7 +97,7 @@ async def delete_ruleset( This is a destructive action that is not recoverable.

    - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset to delete """ @@ -142,7 +142,7 @@ async def get_rule( Get details about a query rule within a query ruleset.

    - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset containing the rule to retrieve @@ -194,7 +194,7 @@ async def get_ruleset( Get details about a query ruleset.

    - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset """ @@ -241,7 +241,7 @@ async def list_rulesets( Get summarized information about the query rulesets.

    - ``_ + ``_ :param from_: The offset from the first result to fetch. :param size: The maximum number of results to retrieve. @@ -302,7 +302,7 @@ async def put_rule( If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.

    - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset containing the rule to be created or updated. @@ -389,7 +389,7 @@ async def put_ruleset( If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.

    - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset to be created or updated. @@ -446,7 +446,7 @@ async def test( Evaluate match criteria against a query ruleset to identify the rules that would match that criteria.

    - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset to be created or updated diff --git a/elasticsearch/_async/client/rollup.py b/elasticsearch/_async/client/rollup.py index 94dc52ae3..1c589b5ed 100644 --- a/elasticsearch/_async/client/rollup.py +++ b/elasticsearch/_async/client/rollup.py @@ -67,7 +67,7 @@ async def delete_job( - ``_ + ``_ :param id: Identifier for the job. """ @@ -115,7 +115,7 @@ async def get_jobs( For details about a historical rollup job, the rollup capabilities API may be more useful.

    - ``_ + ``_ :param id: Identifier for the rollup job. If it is `_all` or omitted, the API returns all rollup jobs. @@ -171,7 +171,7 @@ async def get_rollup_caps( - ``_ + ``_ :param id: Index, indices or index-pattern to return rollup capabilities for. `_all` may be used to fetch rollup capabilities from all jobs. @@ -225,7 +225,7 @@ async def get_rollup_index_caps( - ``_ + ``_ :param index: Data stream or index to check for rollup capabilities. Wildcard (`*`) expressions are supported. @@ -295,7 +295,7 @@ async def put_job(

    Jobs are created in a STOPPED state. You can start them with the start rollup jobs API.

    - ``_ + ``_ :param id: Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the data that is associated with the rollup job. @@ -443,7 +443,7 @@ async def rollup_search( During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used.

    - ``_ + ``_ :param index: A comma-separated list of data streams and indices used to limit the request. This parameter has the following rules: * At least one data @@ -521,7 +521,7 @@ async def start_job( If you try to start a job that is already started, nothing happens.

    - ``_ + ``_ :param id: Identifier for the rollup job. """ @@ -575,7 +575,7 @@ async def stop_job( If the specified time elapses without the job moving to STOPPED, a timeout exception occurs.

    - ``_ + ``_ :param id: Identifier for the rollup job. :param timeout: If `wait_for_completion` is `true`, the API blocks for (at maximum) diff --git a/elasticsearch/_async/client/search_application.py b/elasticsearch/_async/client/search_application.py index a517db976..f1bca27fd 100644 --- a/elasticsearch/_async/client/search_application.py +++ b/elasticsearch/_async/client/search_application.py @@ -49,7 +49,7 @@ async def delete(

    Remove a search application and its associated alias. Indices attached to the search application are not removed.

    - ``_ + ``_ :param name: The name of the search application to delete. """ @@ -94,7 +94,7 @@ async def delete_behavioral_analytics( The associated data stream is also deleted.

    - ``_ + ``_ :param name: The name of the analytics collection to be deleted """ @@ -138,7 +138,7 @@ async def get(

    Get search application details.

    - ``_ + ``_ :param name: The name of the search application """ @@ -182,7 +182,7 @@ async def get_behavioral_analytics(

    Get behavioral analytics collections.

    - ``_ + ``_ :param name: A list of analytics collections to limit the returned information """ @@ -234,7 +234,7 @@ async def list( Get information about search applications.

    - ``_ + ``_ :param from_: Starting offset. :param q: Query in the Lucene query string syntax. @@ -290,7 +290,7 @@ async def post_behavioral_analytics_event(

    Create a behavioral analytics collection event.

    - ``_ + ``_ :param collection_name: The name of the behavioral analytics collection. :param event_type: The analytics event type. @@ -357,7 +357,7 @@ async def put(

    Create or update a search application.

    - ``_ + ``_ :param name: The name of the search application to be created or updated. :param search_application: @@ -414,7 +414,7 @@ async def put_behavioral_analytics(

    Create a behavioral analytics collection.

    - ``_ + ``_ :param name: The name of the analytics collection to be created or updated. """ @@ -467,7 +467,7 @@ async def render_query(

    You must have read privileges on the backing alias of the search application.

    - ``_ + ``_ :param name: The name of the search application to render teh query for. :param params: @@ -531,7 +531,7 @@ async def search( Unspecified template parameters are assigned their default values if applicable.

    - ``_ + ``_ :param name: The name of the search application to be searched. :param params: Query parameters specific to this request, which will override diff --git a/elasticsearch/_async/client/searchable_snapshots.py b/elasticsearch/_async/client/searchable_snapshots.py index 6d9ef50cc..b15bf0705 100644 --- a/elasticsearch/_async/client/searchable_snapshots.py +++ b/elasticsearch/_async/client/searchable_snapshots.py @@ -50,7 +50,7 @@ async def cache_stats( Get statistics about the shared cache for partially mounted indices.

    - ``_ + ``_ :param node_id: The names of the nodes in the cluster to target. :param master_timeout: @@ -111,7 +111,7 @@ async def clear_cache( Clear indices and data streams from the shared cache for partially mounted indices.

    - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases to clear from the cache. It supports wildcards (`*`). @@ -190,7 +190,7 @@ async def mount( Manually mounting ILM-managed snapshots can interfere with ILM processes.

    - ``_ + ``_ :param repository: The name of the repository containing the snapshot of the index to mount. @@ -278,7 +278,7 @@ async def stats(

    Get searchable snapshot statistics.

    - ``_ + ``_ :param index: A comma-separated list of data streams and indices to retrieve statistics for. diff --git a/elasticsearch/_async/client/security.py b/elasticsearch/_async/client/security.py index a2f5ac605..ebb046b56 100644 --- a/elasticsearch/_async/client/security.py +++ b/elasticsearch/_async/client/security.py @@ -58,7 +58,7 @@ async def activate_user_profile( Any updates do not change existing content for either the labels or data fields.

    - ``_ + ``_ :param grant_type: The type of grant. :param access_token: The user's Elasticsearch access token or JWT. Both `access` @@ -124,7 +124,7 @@ async def authenticate( If the user cannot be authenticated, this API returns a 401 status code.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/_authenticate" @@ -171,7 +171,7 @@ async def bulk_delete_role( The bulk delete roles API cannot delete roles that are defined in roles files.

    - ``_ + ``_ :param names: An array of role names to delete :param refresh: If `true` (the default) then refresh the affected shards to make @@ -232,7 +232,7 @@ async def bulk_put_role( The bulk create or update roles API cannot update roles that are defined in roles files.

    - ``_ + ``_ :param roles: A dictionary of role name to RoleDescriptor objects to add or update :param refresh: If `true` (the default) then refresh the affected shards to make @@ -300,7 +300,7 @@ async def bulk_update_api_keys(

    A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update.

    - ``_ + ``_ :param ids: The API key identifiers. :param expiration: Expiration time for the API keys. By default, API keys never @@ -378,7 +378,7 @@ async def change_password(

    Change the passwords of users in the native realm and built-in users.

    - ``_ + ``_ :param username: The user whose password you want to change. If you do not specify this parameter, the password is changed for the current user. @@ -445,7 +445,7 @@ async def clear_api_key_cache( The cache is also automatically cleared on state changes of the security index.

    - ``_ + ``_ :param ids: Comma-separated list of API key IDs to evict from the API key cache. To evict all API keys, use `*`. Does not support other wildcard patterns. @@ -491,7 +491,7 @@ async def clear_cached_privileges( The cache is also automatically cleared for applications that have their privileges updated.

    - ``_ + ``_ :param application: A comma-separated list of applications. To clear all applications, use an asterism (`*`). It does not support other wildcard patterns. @@ -541,7 +541,7 @@ async def clear_cached_realms( For more information, refer to the documentation about controlling the user cache.

    - ``_ + ``_ :param realms: A comma-separated list of realms. To clear all realms, use an asterisk (`*`). It does not support other wildcard patterns. @@ -591,7 +591,7 @@ async def clear_cached_roles(

    Evict roles from the native role cache.

    - ``_ + ``_ :param name: A comma-separated list of roles to evict from the role cache. To evict all roles, use an asterisk (`*`). It does not support other wildcard @@ -643,7 +643,7 @@ async def clear_cached_service_tokens( The cache for tokens backed by the service_tokens file is cleared automatically on file changes.

    - ``_ + ``_ :param namespace: The namespace, which is a top-level grouping of service accounts. :param service: The name of the service, which must be unique within its namespace. @@ -715,7 +715,7 @@ async def create_api_key( To configure or turn off the API key service, refer to API key service setting documentation.

    - ``_ + ``_ :param expiration: The expiration time for the API key. By default, API keys never expire. @@ -805,7 +805,7 @@ async def create_cross_cluster_api_key( Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error.

    - ``_ + ``_ :param access: The access to be granted to this API key. The access is composed of permissions for cross-cluster search and cross-cluster replication. At @@ -880,7 +880,7 @@ async def create_service_token( You must actively delete them if they are no longer needed.

    - ``_ + ``_ :param namespace: The name of the namespace, which is a top-level grouping of service accounts. @@ -966,7 +966,7 @@ async def delegate_pki( The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token.

    - ``_ + ``_ :param x509_certificate_chain: The X509Certificate chain, which is represented as an ordered string array. Each string in the array is a base64-encoded @@ -1030,7 +1030,7 @@ async def delete_privileges( - ``_ + ``_ :param application: The name of the application. Application privileges are always associated with exactly one application. @@ -1093,7 +1093,7 @@ async def delete_role( The delete roles API cannot remove roles that are defined in roles files.

    - ``_ + ``_ :param name: The name of the role. :param refresh: If `true` (the default) then refresh the affected shards to make @@ -1147,7 +1147,7 @@ async def delete_role_mapping( The delete role mappings API cannot remove role mappings that are defined in role mapping files.

    - ``_ + ``_ :param name: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does @@ -1203,7 +1203,7 @@ async def delete_service_token(

    Delete service account tokens for a service in a specified namespace.

    - ``_ + ``_ :param namespace: The namespace, which is a top-level grouping of service accounts. :param service: The service name. @@ -1265,7 +1265,7 @@ async def delete_user(

    Delete users from the native realm.

    - ``_ + ``_ :param username: An identifier for the user. :param refresh: If `true` (the default) then refresh the affected shards to make @@ -1319,7 +1319,7 @@ async def disable_user( You can use this API to revoke a user's access to Elasticsearch.

    - ``_ + ``_ :param username: An identifier for the user. :param refresh: If `true` (the default) then refresh the affected shards to make @@ -1376,7 +1376,7 @@ async def disable_user_profile( To re-enable a disabled user profile, use the enable user profile API .

    - ``_ + ``_ :param uid: Unique identifier for the user profile. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make @@ -1429,7 +1429,7 @@ async def enable_user( By default, when you create users, they are enabled.

    - ``_ + ``_ :param username: An identifier for the user. :param refresh: If `true` (the default) then refresh the affected shards to make @@ -1486,7 +1486,7 @@ async def enable_user_profile( If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again.

    - ``_ + ``_ :param uid: A unique identifier for the user profile. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make @@ -1536,7 +1536,7 @@ async def enroll_kibana( Kibana uses this API internally to configure itself for communications with an Elasticsearch cluster that already has security features enabled.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/enroll/kibana" @@ -1577,7 +1577,7 @@ async def enroll_node( The response contains key and certificate material that allows the caller to generate valid signed certificates for the HTTP layer of all nodes in the cluster.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/enroll/node" @@ -1626,7 +1626,7 @@ async def get_api_key( If you have read_security, manage_api_key or greater privileges (including manage_security), this API returns all API keys regardless of ownership.

    - ``_ + ``_ :param active_only: A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, @@ -1704,7 +1704,7 @@ async def get_builtin_privileges(

    Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/privilege/_builtin" @@ -1749,7 +1749,7 @@ async def get_privileges( - ``_ + ``_ :param application: The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, @@ -1805,7 +1805,7 @@ async def get_role( The get roles API cannot retrieve roles that are defined in roles files.

    - ``_ + ``_ :param name: The name of the role. You can specify multiple roles as a comma-separated list. If you do not specify this parameter, the API returns information about @@ -1856,7 +1856,7 @@ async def get_role_mapping( The get role mappings API cannot retrieve role mappings that are defined in role mapping files.

    - ``_ + ``_ :param name: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does @@ -1909,7 +1909,7 @@ async def get_service_accounts(

    NOTE: Currently, only the elastic/fleet-server service account is available.

    - ``_ + ``_ :param namespace: The name of the namespace. Omit this parameter to retrieve information about all service accounts. If you omit this parameter, you must @@ -1967,7 +1967,7 @@ async def get_service_credentials( Tokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens.

    - ``_ + ``_ :param namespace: The name of the namespace. :param service: The service name. @@ -2023,7 +2023,7 @@ async def get_settings( - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -2099,7 +2099,7 @@ async def get_token( If you want to invalidate a token immediately, you can do so by using the invalidate token API.

    - ``_ + ``_ :param grant_type: The type of grant. Supported grant types are: `password`, `_kerberos`, `client_credentials`, and `refresh_token`. @@ -2173,7 +2173,7 @@ async def get_user(

    Get information about users in the native realm and built-in users.

    - ``_ + ``_ :param username: An identifier for the user. You can specify multiple usernames as a comma-separated list. If you omit this parameter, the API retrieves @@ -2213,13 +2213,10 @@ async def get_user( async def get_user_privileges( self, *, - application: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, - priviledge: t.Optional[str] = None, - username: t.Optional[t.Union[None, str]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html @@ -2231,20 +2228,11 @@ async def get_user_privileges( To check whether a user has a specific list of privileges, use the has privileges API.

    - ``_ - - :param application: The name of the application. Application privileges are always - associated with exactly one application. If you do not specify this parameter, - the API returns information about all privileges for all applications. - :param priviledge: The name of the privilege. If you do not specify this parameter, - the API returns information about all privileges for the requested application. - :param username: + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/user/_privileges" __query: t.Dict[str, t.Any] = {} - if application is not None: - __query["application"] = application if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: @@ -2253,10 +2241,6 @@ async def get_user_privileges( __query["human"] = human if pretty is not None: __query["pretty"] = pretty - if priviledge is not None: - __query["priviledge"] = priviledge - if username is not None: - __query["username"] = username __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", @@ -2288,7 +2272,7 @@ async def get_user_profile( Elastic reserves the right to change or remove this feature in future releases without prior notice.

    - ``_ + ``_ :param uid: A unique identifier for the user profile. :param data: A comma-separated list of filters for the `data` field of the profile @@ -2345,6 +2329,9 @@ async def grant_api_key( human: t.Optional[bool] = None, password: t.Optional[str] = None, pretty: t.Optional[bool] = None, + refresh: t.Optional[ + t.Union[bool, str, t.Literal["false", "true", "wait_for"]] + ] = None, run_as: t.Optional[str] = None, username: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, @@ -2372,7 +2359,7 @@ async def grant_api_key(

    By default, API keys never expire. You can specify expiration information when you create the API keys.

    - ``_ + ``_ :param api_key: The API key. :param grant_type: The type of grant. Supported grant types are: `access_token`, @@ -2382,6 +2369,9 @@ async def grant_api_key( types. :param password: The user's password. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. + :param refresh: If 'true', Elasticsearch refreshes the affected shards to make + this operation visible to search. If 'wait_for', it waits for a refresh to + make this operation visible to search. If 'false', nothing is done with refreshes. :param run_as: The name of the user to be impersonated. :param username: The user name that identifies the user. If you specify the `password` grant type, this parameter is required. It is not valid with other grant @@ -2403,6 +2393,8 @@ async def grant_api_key( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if refresh is not None: + __query["refresh"] = refresh if not __body: if api_key is not None: __body["api_key"] = api_key @@ -2519,7 +2511,7 @@ async def has_privileges( To check the privileges of other users, you must use the run as feature.

    - ``_ + ``_ :param user: Username :param application: @@ -2584,7 +2576,7 @@ async def has_privileges_user_profile( Elastic reserves the right to change or remove this feature in future releases without prior notice.

    - ``_ + ``_ :param privileges: An object containing all the privileges to be checked. :param uids: A list of profile IDs. The privileges are checked for associated @@ -2658,7 +2650,7 @@ async def invalidate_api_key( - ``_ + ``_ :param id: :param ids: A list of API key ids. This parameter cannot be used with any of @@ -2742,7 +2734,7 @@ async def invalidate_token( If none of these two are specified, then realm_name and/or username need to be specified.

    - ``_ + ``_ :param realm_name: The name of an authentication realm. This parameter cannot be used with either `refresh_token` or `token`. @@ -2810,7 +2802,7 @@ async def oidc_authenticate( These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.

    - ``_ + ``_ :param nonce: Associate a client session with an ID token and mitigate replay attacks. This value needs to be the same as the one that was provided to @@ -2890,7 +2882,7 @@ async def oidc_logout( These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.

    - ``_ + ``_ :param token: The access token to be invalidated. :param refresh_token: The refresh token to be invalidated. @@ -2952,7 +2944,7 @@ async def oidc_prepare_authentication( These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.

    - ``_ + ``_ :param iss: In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request @@ -3048,7 +3040,7 @@ async def put_privileges(

    Action names can contain any number of printable ASCII characters and must contain at least one of the following characters: /, *, :.

    - ``_ + ``_ :param privileges: :param refresh: If `true` (the default) then refresh the affected shards to make @@ -3200,7 +3192,7 @@ async def put_role( File-based role management is not available in Elastic Serverless.

    - ``_ + ``_ :param name: The name of the role. :param applications: A list of application privilege entries. @@ -3332,7 +3324,7 @@ async def put_role_mapping( If the format of the template is set to "json" then the template is expected to produce a JSON string or an array of JSON strings for the role names.

    - ``_ + ``_ :param name: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does @@ -3434,7 +3426,7 @@ async def put_user( To change a user's password without updating any other fields, use the change password API.

    - ``_ + ``_ :param username: An identifier for the user. NOTE: Usernames must be at least 1 and no more than 507 characters. They can contain alphanumeric characters @@ -3553,7 +3545,7 @@ async def query_api_keys( If you have the read_security, manage_api_key, or greater privileges (including manage_security), this API returns all API keys regardless of ownership.

    - ``_ + ``_ :param aggregations: Any aggregations to run over the corpus of returned API keys. Aggregations and queries work together. Aggregations are computed only @@ -3696,7 +3688,7 @@ async def query_role( Also, the results can be paginated and sorted.

    - ``_ + ``_ :param from_: The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` @@ -3789,7 +3781,7 @@ async def query_user( This API is only for native users.

    - ``_ + ``_ :param from_: The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` @@ -3882,7 +3874,7 @@ async def saml_authenticate( This API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch.

    - ``_ + ``_ :param content: The SAML response as it was sent by the user's browser, usually a Base64 encoded XML document. @@ -3955,7 +3947,7 @@ async def saml_complete_logout( The caller of this API must prepare the request accordingly so that this API can handle either of them.

    - ``_ + ``_ :param ids: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. @@ -4031,7 +4023,7 @@ async def saml_invalidate( Thus the user can be redirected back to their IdP.

    - ``_ + ``_ :param query_string: The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. This query should include @@ -4106,7 +4098,7 @@ async def saml_logout( If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP supports this, the Elasticsearch response contains a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout).

    - ``_ + ``_ :param token: The access token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent token that was received @@ -4176,7 +4168,7 @@ async def saml_prepare_authentication( The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process.

    - ``_ + ``_ :param acs: The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. The realm is used to generate the authentication @@ -4237,7 +4229,7 @@ async def saml_service_provider_metadata( This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch.

    - ``_ + ``_ :param realm_name: The name of the SAML realm in Elasticsearch. """ @@ -4290,7 +4282,7 @@ async def suggest_user_profiles( Elastic reserves the right to change or remove this feature in future releases without prior notice.

    - ``_ + ``_ :param data: A comma-separated list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content, @@ -4377,7 +4369,7 @@ async def update_api_key( This change can occur if the owner user's permissions have changed since the API key was created or last modified.

    - ``_ + ``_ :param id: The ID of the API key to update. :param expiration: The expiration time for the API key. By default, API keys @@ -4465,7 +4457,7 @@ async def update_cross_cluster_api_key(

    NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API.

    - ``_ + ``_ :param id: The ID of the cross-cluster API key to update. :param access: The access to be granted to this API key. The access is composed @@ -4544,7 +4536,7 @@ async def update_settings( This API does not yet support configuring the settings for indices before they are in use.

    - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails @@ -4629,7 +4621,7 @@ async def update_user_profile_data( The update_profile_data global privilege grants privileges for updating only the allowed namespaces.

    - ``_ + ``_ :param uid: A unique identifier for the user profile. :param data: Non-searchable data that you want to associate with the user profile. diff --git a/elasticsearch/_async/client/shutdown.py b/elasticsearch/_async/client/shutdown.py index 8e7380af4..920b3f940 100644 --- a/elasticsearch/_async/client/shutdown.py +++ b/elasticsearch/_async/client/shutdown.py @@ -53,7 +53,7 @@ async def delete_node(

    If the operator privileges feature is enabled, you must be an operator to use this API.

    - ``_ + ``_ :param node_id: The node id of node to be removed from the shutdown state :param master_timeout: Period to wait for a connection to the master node. If @@ -112,7 +112,7 @@ async def get_node(

    If the operator privileges feature is enabled, you must be an operator to use this API.

    - ``_ + ``_ :param node_id: Which node for which to retrieve the shutdown status :param master_timeout: Period to wait for a connection to the master node. If @@ -187,7 +187,7 @@ async def put_node( Monitor the node shutdown status to determine when it is safe to stop Elasticsearch.

    - ``_ + ``_ :param node_id: The node identifier. This parameter is not validated against the cluster's active nodes. This enables you to register a node for shut diff --git a/elasticsearch/_async/client/simulate.py b/elasticsearch/_async/client/simulate.py index 5b2f11b2e..6880607b8 100644 --- a/elasticsearch/_async/client/simulate.py +++ b/elasticsearch/_async/client/simulate.py @@ -81,7 +81,7 @@ async def ingest( These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request.

    - ``_ + ``_ :param docs: Sample documents to test in the pipeline. :param index: The index to simulate ingesting into. This value can be overridden diff --git a/elasticsearch/_async/client/slm.py b/elasticsearch/_async/client/slm.py index 62ef2aa53..352080ac1 100644 --- a/elasticsearch/_async/client/slm.py +++ b/elasticsearch/_async/client/slm.py @@ -45,7 +45,7 @@ async def delete_lifecycle( This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots.

    - ``_ + ``_ :param policy_id: The id of the snapshot lifecycle policy to remove :param master_timeout: The period to wait for a connection to the master node. @@ -101,7 +101,7 @@ async def execute_lifecycle( The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance.

    - ``_ + ``_ :param policy_id: The id of the snapshot lifecycle policy to be executed :param master_timeout: The period to wait for a connection to the master node. @@ -156,7 +156,7 @@ async def execute_retention( The retention policy is normally applied according to its schedule.

    - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails @@ -208,7 +208,7 @@ async def get_lifecycle( Get snapshot lifecycle policy definitions and information about the latest snapshot attempts.

    - ``_ + ``_ :param policy_id: Comma-separated list of snapshot lifecycle policies to retrieve :param master_timeout: The period to wait for a connection to the master node. @@ -265,7 +265,7 @@ async def get_stats( Get global and policy-level statistics about actions taken by snapshot lifecycle management.

    - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -315,7 +315,7 @@ async def get_status(

    Get the snapshot lifecycle management status.

    - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails @@ -379,7 +379,7 @@ async def put_lifecycle( Only the latest version of a policy is stored.

    - ``_ + ``_ :param policy_id: The identifier for the snapshot lifecycle policy you want to create or update. @@ -465,7 +465,7 @@ async def start( Manually starting SLM is necessary only if it has been stopped using the stop SLM API.

    - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails @@ -523,7 +523,7 @@ async def stop( Use the get snapshot lifecycle management status API to see if SLM is running.

    - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails diff --git a/elasticsearch/_async/client/snapshot.py b/elasticsearch/_async/client/snapshot.py index 05b1b1c17..cdc7eab2b 100644 --- a/elasticsearch/_async/client/snapshot.py +++ b/elasticsearch/_async/client/snapshot.py @@ -50,7 +50,7 @@ async def cleanup_repository( Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots.

    - ``_ + ``_ :param name: Snapshot repository to clean up. :param master_timeout: Period to wait for a connection to the master node. @@ -107,7 +107,7 @@ async def clone( Clone part of all of a snapshot into another snapshot in the same repository.

    - ``_ + ``_ :param repository: A repository name :param snapshot: The name of the snapshot to clone from @@ -191,7 +191,7 @@ async def create( Take a snapshot of a cluster or of data streams and indices.

    - ``_ + ``_ :param repository: Repository for the snapshot. :param snapshot: Name of the snapshot. Must be unique in the repository. @@ -301,7 +301,7 @@ async def create_repository( Ensure there are no cluster blocks (for example, cluster.blocks.read_only and clsuter.blocks.read_only_allow_delete settings) that prevent write access.

    - ``_ + ``_ :param name: A repository name :param repository: @@ -357,6 +357,7 @@ async def delete( human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html @@ -364,11 +365,14 @@ async def delete(

    Delete snapshots.

    - ``_ + ``_ :param repository: A repository name :param snapshot: A comma-separated list of snapshot names :param master_timeout: Explicit operation timeout for connection to master node + :param wait_for_completion: If `true`, the request returns a response when the + matching snapshots are all deleted. If `false`, the request returns a response + as soon as the deletes are scheduled. """ if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'repository'") @@ -390,6 +394,8 @@ async def delete( __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if wait_for_completion is not None: + __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", @@ -420,7 +426,7 @@ async def delete_repository( The snapshots themselves are left untouched and in place.

    - ``_ + ``_ :param name: Name of the snapshot repository to unregister. Wildcard (`*`) patterns are supported. @@ -497,7 +503,7 @@ async def get(

    Get snapshot information.

    - ``_ + ``_ :param repository: Comma-separated list of snapshot repository names used to limit the request. Wildcard (*) expressions are supported. @@ -612,7 +618,7 @@ async def get_repository(

    Get snapshot repository information.

    - ``_ + ``_ :param name: A comma-separated list of repository names :param local: Return local information, do not retrieve the state from master @@ -750,7 +756,7 @@ async def repository_analyze( Some operations also verify the behavior on small blobs with sizes other than 8 bytes.

    - ``_ + ``_ :param name: The name of the repository. :param blob_count: The total number of blobs to write to the repository during @@ -877,7 +883,7 @@ async def repository_verify_integrity(

    NOTE: This API may not work correctly in a mixed-version cluster.

    - ``_ + ``_ :param name: A repository name :param blob_thread_pool_concurrency: Number of threads to use for reading blob @@ -987,7 +993,7 @@ async def restore(

    If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot.

    - ``_ + ``_ :param repository: A repository name :param snapshot: A snapshot name @@ -1091,7 +1097,7 @@ async def status( These requests can also tax machine resources and, when using cloud storage, incur high processing costs.

    - ``_ + ``_ :param repository: A repository name :param snapshot: A comma-separated list of snapshot names @@ -1154,7 +1160,7 @@ async def verify_repository( Check for common misconfigurations in a snapshot repository.

    - ``_ + ``_ :param name: A repository name :param master_timeout: Explicit operation timeout for connection to master node diff --git a/elasticsearch/_async/client/sql.py b/elasticsearch/_async/client/sql.py index 39ac7c5b9..1763739c5 100644 --- a/elasticsearch/_async/client/sql.py +++ b/elasticsearch/_async/client/sql.py @@ -44,7 +44,7 @@ async def clear_cursor(

    Clear an SQL search cursor.

    - ``_ + ``_ :param cursor: Cursor to clear. """ @@ -99,7 +99,7 @@ async def delete_async( - ``_ + ``_ :param id: The identifier for the search. """ @@ -150,7 +150,7 @@ async def get_async(

    If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API.

    - ``_ + ``_ :param id: The identifier for the search. :param delimiter: The separator for CSV results. The API supports this parameter @@ -212,7 +212,7 @@ async def get_async_status( Get the current status of an async SQL search or a stored synchronous SQL search.

    - ``_ + ``_ :param id: The identifier for the search. """ @@ -301,7 +301,7 @@ async def query( Run an SQL request.

    - ``_ + ``_ :param allow_partial_search_results: If `true`, the response has partial results when there are shard request timeouts or shard failures. If `false`, the @@ -427,7 +427,7 @@ async def translate( It accepts the same request body parameters as the SQL search API, excluding cursor.

    - ``_ + ``_ :param query: The SQL query to run. :param fetch_size: The maximum number of rows (or entries) to return in one response. diff --git a/elasticsearch/_async/client/ssl.py b/elasticsearch/_async/client/ssl.py index 9fc313614..7f9eb9ea0 100644 --- a/elasticsearch/_async/client/ssl.py +++ b/elasticsearch/_async/client/ssl.py @@ -52,7 +52,7 @@ async def certificates(

    If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ssl/certificates" diff --git a/elasticsearch/_async/client/synonyms.py b/elasticsearch/_async/client/synonyms.py index 21cbd8084..0d2c62522 100644 --- a/elasticsearch/_async/client/synonyms.py +++ b/elasticsearch/_async/client/synonyms.py @@ -53,7 +53,7 @@ async def delete_synonym( When the synonyms set is not used in analyzers, you will be able to delete it.

    - ``_ + ``_ :param id: The synonyms set identifier to delete. """ @@ -98,7 +98,7 @@ async def delete_synonym_rule( Delete a synonym rule from a synonym set.

    - ``_ + ``_ :param set_id: The ID of the synonym set to update. :param rule_id: The ID of the synonym rule to delete. @@ -151,7 +151,7 @@ async def get_synonym(

    Get a synonym set.

    - ``_ + ``_ :param id: The synonyms set identifier to retrieve. :param from_: The starting offset for query rules to retrieve. @@ -202,7 +202,7 @@ async def get_synonym_rule( Get a synonym rule from a synonym set.

    - ``_ + ``_ :param set_id: The ID of the synonym set to retrieve the synonym rule from. :param rule_id: The ID of the synonym rule to retrieve. @@ -255,7 +255,7 @@ async def get_synonyms_sets( Get a summary of all defined synonym sets.

    - ``_ + ``_ :param from_: The starting offset for synonyms sets to retrieve. :param size: The maximum number of synonyms sets to retrieve. @@ -311,7 +311,7 @@ async def put_synonym( This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set.

    - ``_ + ``_ :param id: The ID of the synonyms set to be created or updated. :param synonyms_set: The synonym rules definitions for the synonyms set. @@ -370,7 +370,7 @@ async def put_synonym_rule(

    When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule.

    - ``_ + ``_ :param set_id: The ID of the synonym set. :param rule_id: The ID of the synonym rule to be updated or created. diff --git a/elasticsearch/_async/client/tasks.py b/elasticsearch/_async/client/tasks.py index af54ecafa..9c3ca19b7 100644 --- a/elasticsearch/_async/client/tasks.py +++ b/elasticsearch/_async/client/tasks.py @@ -60,7 +60,7 @@ async def cancel( You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task.

    - ``_ + ``_ :param task_id: The task identifier. :param actions: A comma-separated list or wildcard expression of actions that @@ -128,7 +128,7 @@ async def get(

    If the task identifier is not found, a 404 response code indicates that there are no resources that match the request.

    - ``_ + ``_ :param task_id: The task identifier. :param timeout: The period to wait for a response. If no response is received @@ -238,7 +238,7 @@ async def list( The X-Opaque-Id in the children headers is the child task of the task that was initiated by the REST request.

    - ``_ + ``_ :param actions: A comma-separated list or wildcard expression of actions used to limit the request. For example, you can use `cluser:*` to retrieve all diff --git a/elasticsearch/_async/client/text_structure.py b/elasticsearch/_async/client/text_structure.py index e5d7b1e12..35e4df9b0 100644 --- a/elasticsearch/_async/client/text_structure.py +++ b/elasticsearch/_async/client/text_structure.py @@ -72,7 +72,7 @@ async def find_field_structure( It helps determine why the returned structure was chosen.

    - ``_ + ``_ :param field: The field that should be analyzed. :param index: The name of the index that contains the analyzed field. @@ -259,7 +259,7 @@ async def find_message_structure( It helps determine why the returned structure was chosen.

    - ``_ + ``_ :param messages: The list of messages you want to analyze. :param column_names: If the format is `delimited`, you can specify the column @@ -433,7 +433,7 @@ async def find_structure( However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.

    - ``_ + ``_ :param text_files: :param charset: The text's character set. It must be a character set that is @@ -620,7 +620,7 @@ async def test_grok_pattern( The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings.

    - ``_ + ``_ :param grok_pattern: The Grok pattern to run on the text. :param text: The lines of text to run the Grok pattern on. diff --git a/elasticsearch/_async/client/transform.py b/elasticsearch/_async/client/transform.py index 3d037fdb9..eae63619a 100644 --- a/elasticsearch/_async/client/transform.py +++ b/elasticsearch/_async/client/transform.py @@ -44,7 +44,7 @@ async def delete_transform(

    Delete a transform.

    - ``_ + ``_ :param transform_id: Identifier for the transform. :param delete_dest_index: If this value is true, the destination index is deleted @@ -108,7 +108,7 @@ async def get_transform( Get configuration information for transforms.

    - ``_ + ``_ :param transform_id: Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using @@ -181,7 +181,7 @@ async def get_transform_stats(

    Get usage information for transforms.

    - ``_ + ``_ :param transform_id: Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using @@ -269,7 +269,7 @@ async def preview_transform( types of the source index and the transform aggregations.

    - ``_ + ``_ :param transform_id: Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform configuration details in @@ -406,7 +406,7 @@ async def put_transform( give users any privileges on .data-frame-internal* indices.

    - ``_ + ``_ :param transform_id: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -512,7 +512,7 @@ async def reset_transform( If the destination index was created by the transform, it is deleted.

    - ``_ + ``_ :param transform_id: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -572,7 +572,7 @@ async def schedule_now_transform( is called again in the meantime.

    - ``_ + ``_ :param transform_id: Identifier for the transform. :param timeout: Controls the time to wait for the scheduling to take place @@ -635,7 +635,7 @@ async def start_transform( destination indices, the transform fails when it attempts unauthorized operations.

    - ``_ + ``_ :param transform_id: Identifier for the transform. :param from_: Restricts the set of transformed entities to those changed after @@ -693,7 +693,7 @@ async def stop_transform( Stops one or more transforms.

    - ``_ + ``_ :param transform_id: Identifier for the transform. To stop multiple transforms, use a comma-separated list or a wildcard expression. To stop all transforms, diff --git a/elasticsearch/_async/client/xpack.py b/elasticsearch/_async/client/xpack.py index c9a314fbc..b71476ae9 100644 --- a/elasticsearch/_async/client/xpack.py +++ b/elasticsearch/_async/client/xpack.py @@ -54,7 +54,7 @@ async def info( - ``_ + ``_ :param accept_enterprise: If this param is used it must be set to true :param categories: A comma-separated list of the information categories to include diff --git a/elasticsearch/_sync/client/__init__.py b/elasticsearch/_sync/client/__init__.py index bce9d43fb..69389fcff 100644 --- a/elasticsearch/_sync/client/__init__.py +++ b/elasticsearch/_sync/client/__init__.py @@ -729,7 +729,7 @@ def bulk( The other two shards that make up the index do not participate in the _bulk request at all.

    - ``_ + ``_ :param operations: :param index: The name of the data stream, index, or index alias to perform bulk @@ -854,7 +854,7 @@ def clear_scroll( Clear the search context and results for a scrolling search.

    - ``_ + ``_ :param scroll_id: The scroll IDs to clear. To clear all scroll IDs, use `_all`. """ @@ -911,7 +911,7 @@ def close_point_in_time( However, keeping points in time has a cost; close them as soon as they are no longer required for search requests.

    - ``_ + ``_ :param id: The ID of the point-in-time. """ @@ -995,7 +995,7 @@ def count( This means that replicas increase the scalability of the count.

    - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, @@ -1119,10 +1119,7 @@ def create( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, - if_primary_term: t.Optional[int] = None, - if_seq_no: t.Optional[int] = None, include_source_on_error: t.Optional[bool] = None, - op_type: t.Optional[t.Union[str, t.Literal["create", "index"]]] = None, pipeline: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ @@ -1197,7 +1194,7 @@ def create( The _shards section of the API response reveals the number of shard copies on which replication succeeded and failed.

    - ``_ + ``_ :param index: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template @@ -1207,18 +1204,8 @@ def create( :param id: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format. :param document: - :param if_primary_term: Only perform the operation if the document has this primary - term. - :param if_seq_no: Only perform the operation if the document has this sequence - number. :param include_source_on_error: True or false if to include the document source in the error message in case of parsing errors. - :param op_type: Set to `create` to only index the document if it does not already - exist (put if absent). If a document with the specified `_id` already exists, - the indexing operation will fail. The behavior is the same as using the `/_create` - endpoint. If a document ID is specified, this paramater defaults to `index`. - Otherwise, it defaults to `create`. If the request targets a data stream, - an `op_type` of `create` is required. :param pipeline: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final @@ -1270,14 +1257,8 @@ def create( __query["filter_path"] = filter_path if human is not None: __query["human"] = human - if if_primary_term is not None: - __query["if_primary_term"] = if_primary_term - if if_seq_no is not None: - __query["if_seq_no"] = if_seq_no if include_source_on_error is not None: __query["include_source_on_error"] = include_source_on_error - if op_type is not None: - __query["op_type"] = op_type if pipeline is not None: __query["pipeline"] = pipeline if pretty is not None: @@ -1364,7 +1345,7 @@ def delete( It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group.

    - ``_ + ``_ :param index: The name of the target index. :param id: A unique identifier for the document. @@ -1553,7 +1534,7 @@ def delete_by_query( The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself.

    - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, @@ -1750,7 +1731,7 @@ def delete_by_query_rethrottle( Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts.

    - ``_ + ``_ :param task_id: The ID for the task. :param requests_per_second: The throttle for this request in sub-requests per @@ -1800,7 +1781,7 @@ def delete_script( Deletes a stored script or search template.

    - ``_ + ``_ :param id: The identifier for the stored script or search template. :param master_timeout: The period to wait for a connection to the master node. @@ -1884,7 +1865,7 @@ def exists( Elasticsearch cleans up deleted documents in the background as you continue to index more data.

    - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases. It supports wildcards (`*`). @@ -2007,7 +1988,7 @@ def exists_source(

    A document's source is not available if it is disabled in the mapping.

    - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases. It supports wildcards (`*`). @@ -2113,7 +2094,7 @@ def explain( It computes a score explanation for a query and a specific document.

    - ``_ + ``_ :param index: Index names that are used to limit the request. Only a single index name can be provided to this parameter. @@ -2248,7 +2229,7 @@ def field_caps( For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the keyword family.

    - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams @@ -2409,7 +2390,7 @@ def get( Elasticsearch cleans up deleted documents in the background as you continue to index more data.

    - ``_ + ``_ :param index: The name of the index that contains the document. :param id: A unique document identifier. @@ -2516,7 +2497,7 @@ def get_script( Retrieves a stored script or search template.

    - ``_ + ``_ :param id: The identifier for the stored script or search template. :param master_timeout: The period to wait for the master node. If the master @@ -2565,7 +2546,7 @@ def get_script_context(

    Get a list of supported script contexts and their methods.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_script_context" @@ -2604,7 +2585,7 @@ def get_script_languages(

    Get a list of available script types, languages, and contexts.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_script_language" @@ -2650,7 +2631,6 @@ def get_source( source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None, - stored_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, version: t.Optional[int] = None, version_type: t.Optional[ t.Union[str, t.Literal["external", "external_gte", "force", "internal"]] @@ -2669,7 +2649,7 @@ def get_source( - ``_ + ``_ :param index: The name of the index that contains the document. :param id: A unique document identifier. @@ -2687,8 +2667,6 @@ def get_source( the response. :param source_includes: A comma-separated list of source fields to include in the response. - :param stored_fields: A comma-separated list of stored fields to return as part - of a hit. :param version: The version number for concurrency control. It must match the current version of the document for the request to succeed. :param version_type: The version type. @@ -2722,8 +2700,6 @@ def get_source( __query["_source_excludes"] = source_excludes if source_includes is not None: __query["_source_includes"] = source_includes - if stored_fields is not None: - __query["stored_fields"] = stored_fields if version is not None: __query["version"] = version if version_type is not None: @@ -2769,7 +2745,7 @@ def health_report( When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic.

    - ``_ + ``_ :param feature: A feature of the cluster, as returned by the top-level health report API. @@ -2832,6 +2808,7 @@ def index( t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, require_alias: t.Optional[bool] = None, + require_data_stream: t.Optional[bool] = None, routing: t.Optional[str] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, version: t.Optional[int] = None, @@ -2934,7 +2911,7 @@ def index( - ``_ + ``_ :param index: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template @@ -2967,6 +2944,8 @@ def index( this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. :param require_alias: If `true`, the destination must be an index alias. + :param require_data_stream: If `true`, the request's actions must target a data + stream (existing or to be created). :param routing: A custom value that is used to route operations to a specific shard. :param timeout: The period the request waits for the following operations: automatic @@ -3028,6 +3007,8 @@ def index( __query["refresh"] = refresh if require_alias is not None: __query["require_alias"] = require_alias + if require_data_stream is not None: + __query["require_data_stream"] = require_data_stream if routing is not None: __query["routing"] = routing if timeout is not None: @@ -3063,10 +3044,11 @@ def info( .. raw:: html

    Get cluster info. - Get basic build, version, and cluster information.

    + Get basic build, version, and cluster information. + ::: In Serverless, this API is retained for backward compatibility only. Some response fields, such as the version number, should be ignored.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/" @@ -3140,7 +3122,7 @@ def knn_search( - ``_ + ``_ :param index: A comma-separated list of index names to search; use `_all` or to perform the operation on all indices. @@ -3256,7 +3238,7 @@ def mget( You can include the stored_fields query parameter in the request URI to specify the defaults to use when there are no per-document instructions.

    - ``_ + ``_ :param index: Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. @@ -3391,7 +3373,7 @@ def msearch( When sending requests to this endpoint the Content-Type header should be set to application/x-ndjson.

    - ``_ + ``_ :param searches: :param index: Comma-separated list of data streams, indices, and index aliases @@ -3537,7 +3519,7 @@ def msearch_template( - ``_ + ``_ :param search_templates: :param index: A comma-separated list of data streams, indices, and aliases to @@ -3642,7 +3624,7 @@ def mtermvectors( The mapping used is determined by the specified _index.

    - ``_ + ``_ :param index: The name of the index that contains the documents. :param docs: An array of existing or artificial documents. @@ -3783,7 +3765,7 @@ def open_point_in_time( You can check how many point-in-times (that is, search contexts) are open with the nodes stats API.

    - ``_ + ``_ :param index: A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices @@ -3796,8 +3778,7 @@ def open_point_in_time( :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated - values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, - `hidden`, `none`. + values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param index_filter: Filter indices if the provided query rewrites to `match_none` @@ -3881,7 +3862,7 @@ def put_script( Creates or updates a stored script or search template.

    - ``_ + ``_ :param id: The identifier for the stored script or search template. It must be unique within the cluster. @@ -3971,7 +3952,7 @@ def rank_eval(

    Evaluate the quality of ranked search results over a set of typical search queries.

    - ``_ + ``_ :param requests: A set of typical search requests, together with their provided ratings. @@ -4203,7 +4184,7 @@ def reindex( It is not possible to configure SSL in the body of the reindex request.

    - ``_ + ``_ :param dest: The destination you are copying to. :param source: The source you are copying from. @@ -4327,7 +4308,7 @@ def reindex_rethrottle( This behavior prevents scroll timeouts.

    - ``_ + ``_ :param task_id: The task identifier, which can be found by using the tasks API. :param requests_per_second: The throttle for this request in sub-requests per @@ -4383,7 +4364,7 @@ def render_search_template(

    Render a search template as a search request body.

    - ``_ + ``_ :param id: The ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. @@ -4477,7 +4458,7 @@ def scripts_painless_execute(

    Each context requires a script, but additional parameters depend on the context you're using for that script.

    - ``_ + ``_ :param context: The context that the script should run in. NOTE: Result ordering in the field contexts is not guaranteed. @@ -4550,7 +4531,7 @@ def scroll(

    IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.

    - ``_ + ``_ :param scroll_id: The scroll ID of the search. :param rest_total_hits_as_int: If true, the API response’s hit.total property @@ -4756,7 +4737,7 @@ def search( This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.

    - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, @@ -5507,7 +5488,7 @@ def search_mvt( Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, or aliases to search :param field: Field containing geospatial data to return @@ -5681,7 +5662,7 @@ def search_shards(

    If the Elasticsearch security features are enabled, you must have the view_index_metadata or manage index privilege for the target data stream, index, or alias.

    - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, @@ -5694,7 +5675,7 @@ def search_shards( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param local: If `true`, the request retrieves information from the local node @@ -5792,7 +5773,7 @@ def search_template(

    Run a search with a search template.

    - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). @@ -5806,8 +5787,7 @@ def search_template( :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated - values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, - `hidden`, `none`. + values, such as `open,hidden`. :param explain: If `true`, returns detailed information about score calculation as part of each hit. If you specify both this and the `explain` query parameter, the API uses only the query parameter. @@ -5935,7 +5915,7 @@ def terms_enum( - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and index aliases to search. Wildcard (`*`) expressions are supported. To search all data streams @@ -6084,7 +6064,7 @@ def termvectors( Use routing only to hit a particular shard.

    - ``_ + ``_ :param index: The name of the index that contains the document. :param id: A unique identifier for the document. @@ -6255,7 +6235,7 @@ def update( In addition to _source, you can access the following variables through the ctx map: _index, _type, _id, _version, _routing, and _now (the current timestamp).

    - ``_ + ``_ :param index: The name of the target index. By default, the index is created automatically if it doesn't exist. @@ -6493,7 +6473,7 @@ def update_by_query( This API enables you to only modify the source of matching documents; you cannot move them.

    - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, @@ -6518,8 +6498,7 @@ def update_by_query( :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated - values, such as `open,hidden`. Valid values are: `all`, `open`, `closed`, - `hidden`, `none`. + values, such as `open,hidden`. :param from_: Skips the specified number of documents. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. @@ -6713,7 +6692,7 @@ def update_by_query_rethrottle( Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts.

    - ``_ + ``_ :param task_id: The ID for the task. :param requests_per_second: The throttle for this request in sub-requests per diff --git a/elasticsearch/_sync/client/async_search.py b/elasticsearch/_sync/client/async_search.py index 3759ab575..ff95875ad 100644 --- a/elasticsearch/_sync/client/async_search.py +++ b/elasticsearch/_sync/client/async_search.py @@ -44,7 +44,7 @@ def delete( If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the cancel_task cluster privilege.

    - ``_ + ``_ :param id: A unique identifier for the async search. """ @@ -94,7 +94,7 @@ def get( If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it.

    - ``_ + ``_ :param id: A unique identifier for the async search. :param keep_alive: The length of time that the async search should be available @@ -164,7 +164,7 @@ def status( - ``_ + ``_ :param id: A unique identifier for the async search. :param keep_alive: The length of time that the async search needs to be available. @@ -281,7 +281,6 @@ def submit( ] = None, lenient: t.Optional[bool] = None, max_concurrent_shard_requests: t.Optional[int] = None, - min_compatible_shard_node: t.Optional[str] = None, min_score: t.Optional[float] = None, pit: t.Optional[t.Mapping[str, t.Any]] = None, post_filter: t.Optional[t.Mapping[str, t.Any]] = None, @@ -346,7 +345,7 @@ def submit( The maximum allowed size for a stored async search response can be set by changing the search.max_async_search_response_size cluster level setting.

    - ``_ + ``_ :param index: A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices @@ -401,7 +400,6 @@ def submit( per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests - :param min_compatible_shard_node: :param min_score: Minimum _score for matching documents. Documents with a lower _score are not included in search results and results collected by aggregations. :param pit: Limits the search to a point in time (PIT). If you provide a PIT, @@ -526,8 +524,6 @@ def submit( __query["lenient"] = lenient if max_concurrent_shard_requests is not None: __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests - if min_compatible_shard_node is not None: - __query["min_compatible_shard_node"] = min_compatible_shard_node if preference is not None: __query["preference"] = preference if pretty is not None: diff --git a/elasticsearch/_sync/client/autoscaling.py b/elasticsearch/_sync/client/autoscaling.py index 6a3768a98..53cfe405a 100644 --- a/elasticsearch/_sync/client/autoscaling.py +++ b/elasticsearch/_sync/client/autoscaling.py @@ -44,7 +44,7 @@ def delete_autoscaling_policy(

    NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

    - ``_ + ``_ :param name: the name of the autoscaling policy :param master_timeout: Period to wait for a connection to the master node. If @@ -104,7 +104,7 @@ def get_autoscaling_capacity( Do not use this information to make autoscaling decisions.

    - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -151,7 +151,7 @@ def get_autoscaling_policy(

    NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

    - ``_ + ``_ :param name: the name of the autoscaling policy :param master_timeout: Period to wait for a connection to the master node. If @@ -206,7 +206,7 @@ def put_autoscaling_policy(

    NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

    - ``_ + ``_ :param name: the name of the autoscaling policy :param policy: diff --git a/elasticsearch/_sync/client/cat.py b/elasticsearch/_sync/client/cat.py index 2ca151d5a..a84d9be61 100644 --- a/elasticsearch/_sync/client/cat.py +++ b/elasticsearch/_sync/client/cat.py @@ -51,7 +51,6 @@ def aliases( help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, @@ -65,7 +64,7 @@ def aliases(

    IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.

    - ``_ + ``_ :param name: A comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. @@ -82,10 +81,6 @@ def aliases( the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - :param master_timeout: The period to wait for a connection to the master node. - If the master node is not available before the timeout expires, the request - fails and returns an error. To indicated that the request should never timeout, - you can set it to `-1`. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. @@ -115,8 +110,6 @@ def aliases( __query["human"] = human if local is not None: __query["local"] = local - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: @@ -161,7 +154,7 @@ def allocation(

    IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.

    - ``_ + ``_ :param node_id: A comma-separated list of node identifiers or names used to limit the returned information. @@ -250,7 +243,7 @@ def component_templates( They are not intended for use by applications. For application consumption, use the get component template API.

    - ``_ + ``_ :param name: The name of the component template. It accepts wildcard expressions. If it is omitted, all component templates are returned. @@ -334,7 +327,7 @@ def count( They are not intended for use by applications. For application consumption, use the count API.

    - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. It supports wildcards (`*`). To target all data streams @@ -412,7 +405,7 @@ def fielddata( They are not intended for use by applications. For application consumption, use the nodes stats API.

    - ``_ + ``_ :param fields: Comma-separated list of fields used to limit returned information. To retrieve all fields, omit this parameter. @@ -498,7 +491,7 @@ def health( You also can use the API to track the recovery of a large cluster over a longer period of time.

    - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -556,7 +549,7 @@ def help(self) -> TextApiResponse:

    Get help for the CAT APIs.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_cat" @@ -591,7 +584,9 @@ def indices( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[t.Union[str, t.Sequence[str]]] = None, - health: t.Optional[t.Union[str, t.Literal["green", "red", "yellow"]]] = None, + health: t.Optional[ + t.Union[str, t.Literal["green", "red", "unavailable", "unknown", "yellow"]] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, @@ -623,7 +618,7 @@ def indices( They are not intended for use by applications. For application consumption, use an index endpoint.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -721,7 +716,7 @@ def master(

    IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

    - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -899,7 +894,7 @@ def ml_data_frame_analytics( application consumption, use the get data frame analytics jobs statistics API.

    - ``_ + ``_ :param id: The ID of the data frame analytics to fetch :param allow_no_match: Whether to ignore if a wildcard expression matches no @@ -1067,7 +1062,7 @@ def ml_datafeeds( application consumption, use the get datafeed statistics API.

    - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. @@ -1433,7 +1428,7 @@ def ml_jobs( application consumption, use the get anomaly detection job statistics API.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param allow_no_match: Specifies what to do when the request: * Contains wildcard @@ -1618,7 +1613,7 @@ def ml_trained_models( application consumption, use the get trained models statistics API.

    - ``_ + ``_ :param model_id: A unique identifier for the trained model. :param allow_no_match: Specifies what to do when the request: contains wildcard @@ -1711,7 +1706,7 @@ def nodeattrs( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

    - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -1774,7 +1769,200 @@ def nodes( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, full_id: t.Optional[t.Union[bool, str]] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "build", + "completion.size", + "cpu", + "disk.avail", + "disk.total", + "disk.used", + "disk.used_percent", + "fielddata.evictions", + "fielddata.memory_size", + "file_desc.current", + "file_desc.max", + "file_desc.percent", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "heap.current", + "heap.max", + "heap.percent", + "http_address", + "id", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "jdk", + "load_15m", + "load_1m", + "load_5m", + "mappings.total_count", + "mappings.total_estimated_overhead_in_bytes", + "master", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "name", + "node.role", + "pid", + "port", + "query_cache.evictions", + "query_cache.hit_count", + "query_cache.memory_size", + "query_cache.miss_count", + "ram.current", + "ram.max", + "ram.percent", + "refresh.time", + "refresh.total", + "request_cache.evictions", + "request_cache.hit_count", + "request_cache.memory_size", + "request_cache.miss_count", + "script.cache_evictions", + "script.compilations", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "shard_stats.total_count", + "suggest.current", + "suggest.time", + "suggest.total", + "uptime", + "version", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "build", + "completion.size", + "cpu", + "disk.avail", + "disk.total", + "disk.used", + "disk.used_percent", + "fielddata.evictions", + "fielddata.memory_size", + "file_desc.current", + "file_desc.max", + "file_desc.percent", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "heap.current", + "heap.max", + "heap.percent", + "http_address", + "id", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "jdk", + "load_15m", + "load_1m", + "load_5m", + "mappings.total_count", + "mappings.total_estimated_overhead_in_bytes", + "master", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "name", + "node.role", + "pid", + "port", + "query_cache.evictions", + "query_cache.hit_count", + "query_cache.memory_size", + "query_cache.miss_count", + "ram.current", + "ram.max", + "ram.percent", + "refresh.time", + "refresh.total", + "request_cache.evictions", + "request_cache.hit_count", + "request_cache.memory_size", + "request_cache.miss_count", + "script.cache_evictions", + "script.compilations", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "shard_stats.total_count", + "suggest.current", + "suggest.time", + "suggest.total", + "uptime", + "version", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, @@ -1794,23 +1982,24 @@ def nodes( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

    - ``_ + ``_ :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param full_id: If `true`, return the full node ID. If `false`, return the shortened node ID. - :param h: List of columns to appear in the response. Supports simple wildcards. + :param h: A comma-separated list of columns names to display. It supports simple + wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param include_unloaded_segments: If true, the response includes information from segments that are not loaded into memory. - :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. - :param time: Unit used to display time values. + :param master_timeout: The period to wait for a connection to the master node. + :param s: A comma-separated list of column names or aliases that determines the + sort order. Sorting defaults to ascending and can be changed by setting `:asc` + or `:desc` as a suffix to the column name. + :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} @@ -1881,7 +2070,7 @@ def pending_tasks( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API.

    - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -1961,7 +2150,7 @@ def plugins( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

    - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -2029,7 +2218,74 @@ def recovery( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "bytes", + "bytes_percent", + "bytes_recovered", + "bytes_total", + "files", + "files_percent", + "files_recovered", + "files_total", + "index", + "repository", + "shard", + "snapshot", + "source_host", + "source_node", + "stage", + "start_time", + "start_time_millis", + "stop_time", + "stop_time_millis", + "target_host", + "target_node", + "time", + "translog_ops", + "translog_ops_percent", + "translog_ops_recovered", + "type", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "bytes", + "bytes_percent", + "bytes_recovered", + "bytes_total", + "files", + "files_percent", + "files_recovered", + "files_total", + "index", + "repository", + "shard", + "snapshot", + "source_host", + "source_node", + "stage", + "start_time", + "start_time_millis", + "stop_time", + "stop_time_millis", + "target_host", + "target_node", + "time", + "translog_ops", + "translog_ops_percent", + "translog_ops_recovered", + "type", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, @@ -2049,7 +2305,7 @@ def recovery( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API.

    - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2060,13 +2316,14 @@ def recovery( shard recoveries. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. - :param h: List of columns to appear in the response. Supports simple wildcards. + :param h: A comma-separated list of columns names to display. It supports simple + wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. - :param time: Unit used to display time values. + :param s: A comma-separated list of column names or aliases that determines the + sort order. Sorting defaults to ascending and can be changed by setting `:asc` + or `:desc` as a suffix to the column name. + :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2137,7 +2394,7 @@ def repositories( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API.

    - ``_ + ``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. @@ -2200,7 +2457,52 @@ def segments( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "committed", + "compound", + "docs.count", + "docs.deleted", + "generation", + "id", + "index", + "ip", + "prirep", + "searchable", + "segment", + "shard", + "size", + "size.memory", + "version", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "committed", + "compound", + "docs.count", + "docs.deleted", + "generation", + "id", + "index", + "ip", + "prirep", + "searchable", + "segment", + "shard", + "size", + "size.memory", + "version", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, @@ -2218,7 +2520,7 @@ def segments( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API.

    - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2226,7 +2528,8 @@ def segments( :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. - :param h: List of columns to appear in the response. Supports simple wildcards. + :param h: A comma-separated list of columns names to display. It supports simple + wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param local: If `true`, the request computes the list of selected nodes from @@ -2234,9 +2537,9 @@ def segments( from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. + :param s: A comma-separated list of column names or aliases that determines the + sort order. Sorting defaults to ascending and can be changed by setting `:asc` + or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2292,7 +2595,162 @@ def shards( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "completion.size", + "dataset.size", + "dense_vector.value_count", + "docs", + "dsparse_vector.value_count", + "fielddata.evictions", + "fielddata.memory_size", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "id", + "index", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "node", + "prirep", + "query_cache.evictions", + "query_cache.memory_size", + "recoverysource.type", + "refresh.time", + "refresh.total", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "seq_no.global_checkpoint", + "seq_no.local_checkpoint", + "seq_no.max", + "shard", + "state", + "store", + "suggest.current", + "suggest.time", + "suggest.total", + "sync_id", + "unassigned.at", + "unassigned.details", + "unassigned.for", + "unassigned.reason", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "completion.size", + "dataset.size", + "dense_vector.value_count", + "docs", + "dsparse_vector.value_count", + "fielddata.evictions", + "fielddata.memory_size", + "flush.total", + "flush.total_time", + "get.current", + "get.exists_time", + "get.exists_total", + "get.missing_time", + "get.missing_total", + "get.time", + "get.total", + "id", + "index", + "indexing.delete_current", + "indexing.delete_time", + "indexing.delete_total", + "indexing.index_current", + "indexing.index_failed", + "indexing.index_failed_due_to_version_conflict", + "indexing.index_time", + "indexing.index_total", + "ip", + "merges.current", + "merges.current_docs", + "merges.current_size", + "merges.total", + "merges.total_docs", + "merges.total_size", + "merges.total_time", + "node", + "prirep", + "query_cache.evictions", + "query_cache.memory_size", + "recoverysource.type", + "refresh.time", + "refresh.total", + "search.fetch_current", + "search.fetch_time", + "search.fetch_total", + "search.open_contexts", + "search.query_current", + "search.query_time", + "search.query_total", + "search.scroll_current", + "search.scroll_time", + "search.scroll_total", + "segments.count", + "segments.fixed_bitset_memory", + "segments.index_writer_memory", + "segments.memory", + "segments.version_map_memory", + "seq_no.global_checkpoint", + "seq_no.local_checkpoint", + "seq_no.max", + "shard", + "state", + "store", + "suggest.current", + "suggest.time", + "suggest.total", + "sync_id", + "unassigned.at", + "unassigned.details", + "unassigned.for", + "unassigned.reason", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, @@ -2312,7 +2770,7 @@ def shards( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.

    - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2323,11 +2781,11 @@ def shards( :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. - :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. - :param time: Unit used to display time values. + :param master_timeout: The period to wait for a connection to the master node. + :param s: A comma-separated list of column names or aliases that determines the + sort order. Sorting defaults to ascending and can be changed by setting `:asc` + or `:desc` as a suffix to the column name. + :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] @@ -2380,7 +2838,48 @@ def snapshots( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "duration", + "end_epoch", + "end_time", + "failed_shards", + "id", + "indices", + "reason", + "repository", + "start_epoch", + "start_time", + "status", + "successful_shards", + "total_shards", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "duration", + "end_epoch", + "end_time", + "failed_shards", + "id", + "indices", + "reason", + "repository", + "start_epoch", + "start_time", + "status", + "successful_shards", + "total_shards", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, @@ -2401,14 +2900,15 @@ def snapshots( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API.

    - ``_ + ``_ :param repository: A comma-separated list of snapshot repositories used to limit the request. Accepts wildcard expressions. `_all` returns all repositories. If any repository fails during the request, Elasticsearch returns an error. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. - :param h: List of columns to appear in the response. Supports simple wildcards. + :param h: A comma-separated list of columns names to display. It supports simple + wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param ignore_unavailable: If `true`, the response does not include information @@ -2494,7 +2994,7 @@ def tasks( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API.

    - ``_ + ``_ :param actions: The task action names, which are used to limit the response. :param detailed: If `true`, the response includes detailed information about @@ -2588,7 +3088,7 @@ def templates( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API.

    - ``_ + ``_ :param name: The name of the template to return. Accepts wildcard expressions. If omitted, all templates are returned. @@ -2655,7 +3155,62 @@ def thread_pool( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, - h: t.Optional[t.Union[str, t.Sequence[str]]] = None, + h: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[ + str, + t.Literal[ + "active", + "completed", + "core", + "ephemeral_id", + "host", + "ip", + "keep_alive", + "largest", + "max", + "name", + "node_id", + "node_name", + "pid", + "pool_size", + "port", + "queue", + "queue_size", + "rejected", + "size", + "type", + ], + ] + ], + t.Union[ + str, + t.Literal[ + "active", + "completed", + "core", + "ephemeral_id", + "host", + "ip", + "keep_alive", + "largest", + "max", + "name", + "node_id", + "node_name", + "pid", + "pool_size", + "port", + "queue", + "queue_size", + "rejected", + "size", + "type", + ], + ], + ] + ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, @@ -2676,7 +3231,7 @@ def thread_pool( IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

    - ``_ + ``_ :param thread_pool_patterns: A comma-separated list of thread pool names used to limit the request. Accepts wildcard expressions. @@ -2689,10 +3244,10 @@ def thread_pool( the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. - :param master_timeout: Period to wait for a connection to the master node. - :param s: List of columns that determine how the table should be sorted. Sorting - defaults to ascending and can be changed by setting `:asc` or `:desc` as - a suffix to the column name. + :param master_timeout: The period to wait for a connection to the master node. + :param s: A comma-separated list of column names or aliases that determines the + sort order. Sorting defaults to ascending and can be changed by setting `:asc` + or `:desc` as a suffix to the column name. :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ @@ -2933,7 +3488,7 @@ def transforms( application consumption, use the get transform statistics API.

    - ``_ + ``_ :param transform_id: A transform identifier or a wildcard expression. If you do not specify one of these options, the API returns information for all diff --git a/elasticsearch/_sync/client/ccr.py b/elasticsearch/_sync/client/ccr.py index ab9fe911b..cdd850fb1 100644 --- a/elasticsearch/_sync/client/ccr.py +++ b/elasticsearch/_sync/client/ccr.py @@ -43,7 +43,7 @@ def delete_auto_follow_pattern(

    Delete a collection of cross-cluster replication auto-follow patterns.

    - ``_ + ``_ :param name: The auto-follow pattern collection to delete. :param master_timeout: The period to wait for a connection to the master node. @@ -130,7 +130,7 @@ def follow( When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index.

    - ``_ + ``_ :param index: The name of the follower index. :param leader_index: The name of the index in the leader cluster to follow. @@ -259,7 +259,7 @@ def follow_info( For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused.

    - ``_ + ``_ :param index: A comma-delimited list of follower index patterns. :param master_timeout: The period to wait for a connection to the master node. @@ -311,7 +311,7 @@ def follow_stats( The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices.

    - ``_ + ``_ :param index: A comma-delimited list of index patterns. :param timeout: The period to wait for a response. If no response is received @@ -380,7 +380,7 @@ def forget_follower( The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked.

    - ``_ + ``_ :param index: the name of the leader index for which specified follower retention leases should be removed @@ -445,7 +445,7 @@ def get_auto_follow_pattern(

    Get cross-cluster replication auto-follow patterns.

    - ``_ + ``_ :param name: The auto-follow pattern collection that you want to retrieve. If you do not specify a name, the API returns information for all collections. @@ -505,7 +505,7 @@ def pause_auto_follow_pattern( Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim.

    - ``_ + ``_ :param name: The name of the auto-follow pattern to pause. :param master_timeout: The period to wait for a connection to the master node. @@ -559,7 +559,7 @@ def pause_follow( You can pause and resume a follower index to change the configuration of the following task.

    - ``_ + ``_ :param index: The name of the follower index. :param master_timeout: The period to wait for a connection to the master node. @@ -648,7 +648,7 @@ def put_auto_follow_pattern( NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns.

    - ``_ + ``_ :param name: The name of the collection of auto-follow patterns. :param remote_cluster: The remote cluster containing the leader indices to match @@ -782,7 +782,7 @@ def resume_auto_follow_pattern( Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim.

    - ``_ + ``_ :param name: The name of the auto-follow pattern to resume. :param master_timeout: The period to wait for a connection to the master node. @@ -860,7 +860,7 @@ def resume_follow( When this API returns, the follower index will resume fetching operations from the leader index.

    - ``_ + ``_ :param index: The name of the follow index to resume following. :param master_timeout: Period to wait for a connection to the master node. @@ -951,7 +951,7 @@ def stats(

    This API returns stats about auto-following and the same shard-level stats as the get follower stats API.

    - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request @@ -1009,7 +1009,7 @@ def unfollow( - ``_ + ``_ :param index: The name of the follower index. :param master_timeout: The period to wait for a connection to the master node. diff --git a/elasticsearch/_sync/client/cluster.py b/elasticsearch/_sync/client/cluster.py index 77b11fe93..77ced5e60 100644 --- a/elasticsearch/_sync/client/cluster.py +++ b/elasticsearch/_sync/client/cluster.py @@ -54,7 +54,7 @@ def allocation_explain( This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise.

    - ``_ + ``_ :param current_node: Specifies the node ID or the name of the node to only explain a shard that is currently located on the specified node. @@ -130,7 +130,7 @@ def delete_component_template( Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.

    - ``_ + ``_ :param name: Comma-separated list or wildcard expression of component template names used to limit the request. @@ -239,7 +239,7 @@ def exists_component_template( Returns information about whether a particular component template exists.

    - ``_ + ``_ :param name: Comma-separated list of component template names used to limit the request. Wildcard (*) expressions are supported. @@ -290,6 +290,7 @@ def get_component_template( local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + settings_filter: t.Optional[t.Union[str, t.Sequence[str]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html @@ -298,7 +299,7 @@ def get_component_template( Get information about component templates.

    - ``_ + ``_ :param name: Comma-separated list of component template names used to limit the request. Wildcard (`*`) expressions are supported. @@ -310,6 +311,8 @@ def get_component_template( :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. + :param settings_filter: Filter out results, for example to filter out sensitive + information. Supports wildcards or full settings keys """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: @@ -335,6 +338,8 @@ def get_component_template( __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if settings_filter is not None: + __query["settings_filter"] = settings_filter __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", @@ -365,7 +370,7 @@ def get_settings( By default, it returns only settings that have been explicitly defined.

    - ``_ + ``_ :param flat_settings: If `true`, returns settings in flat format. :param include_defaults: If `true`, returns default cluster settings from the @@ -441,7 +446,7 @@ def health( wait_for_no_relocating_shards: t.Optional[bool] = None, wait_for_nodes: t.Optional[t.Union[int, str]] = None, wait_for_status: t.Optional[ - t.Union[str, t.Literal["green", "red", "yellow"]] + t.Union[str, t.Literal["green", "red", "unavailable", "unknown", "yellow"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -457,7 +462,7 @@ def health( The cluster status is controlled by the worst index status.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target @@ -565,7 +570,7 @@ def info( Returns basic information about the cluster.

    - ``_ + ``_ :param target: Limits the information returned to the specific target. Supports a comma-separated list, such as http,ingest. @@ -614,7 +619,7 @@ def pending_tasks( However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API.

    - ``_ + ``_ :param local: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. @@ -731,6 +736,7 @@ def put_component_template( *, name: str, template: t.Optional[t.Mapping[str, t.Any]] = None, + cause: t.Optional[str] = None, create: t.Optional[bool] = None, deprecated: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, @@ -761,7 +767,7 @@ def put_component_template( To be applied, a component template must be included in an index template's composed_of list.

    - ``_ + ``_ :param name: Name of the component template to create. Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; @@ -774,6 +780,7 @@ def put_component_template( update settings API. :param template: The template to be applied which includes mappings, settings, or aliases configuration. + :param cause: User defined reason for create the component template. :param create: If `true`, this request cannot replace or update existing component templates. :param deprecated: Marks this index template as deprecated. When creating or @@ -798,6 +805,8 @@ def put_component_template( __path = f'/_component_template/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} + if cause is not None: + __query["cause"] = cause if create is not None: __query["create"] = create if error_trace is not None: @@ -866,13 +875,13 @@ def put_settings( If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration.

    - ``_ + ``_ :param flat_settings: Return settings in flat format (default: false) :param master_timeout: Explicit operation timeout for connection to master node - :param persistent: + :param persistent: The settings that persist after the cluster restarts. :param timeout: Explicit operation timeout - :param transient: + :param transient: The settings that do not persist after the cluster restarts. """ __path_parts: t.Dict[str, str] = {} __path = "/_cluster/settings" @@ -928,11 +937,11 @@ def remote_info( This API returns information that reflects current state on the local cluster. The connected field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it. Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster. - To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the resolve cluster endpoint.

    + To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the /_resolve/cluster endpoint.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_remote/info" @@ -989,7 +998,7 @@ def reroute(

    Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the ?retry_failed URI query parameter, which will attempt a single retry round for these shards.

    - ``_ + ``_ :param commands: Defines the commands to perform. :param dry_run: If true, then the request simulates the operation. It will calculate @@ -1094,7 +1103,7 @@ def state( Instead, obtain the information you require using other more stable cluster APIs.

    - ``_ + ``_ :param metric: Limit the information returned to the specified metrics :param index: A comma-separated list of index names; use `_all` or empty string @@ -1182,7 +1191,7 @@ def stats( Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins).

    - ``_ + ``_ :param node_id: Comma-separated list of node filters used to limit returned information. Defaults to all nodes in the cluster. diff --git a/elasticsearch/_sync/client/connector.py b/elasticsearch/_sync/client/connector.py index 76c7d8735..e1726c4f0 100644 --- a/elasticsearch/_sync/client/connector.py +++ b/elasticsearch/_sync/client/connector.py @@ -49,7 +49,7 @@ def check_in(

    Update the last_seen field in the connector and set it to the current timestamp.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be checked in """ @@ -98,7 +98,7 @@ def delete( These need to be removed manually.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be deleted :param delete_sync_jobs: A flag indicating if associated sync jobs should be @@ -147,7 +147,7 @@ def get(

    Get the details about a connector.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector """ @@ -247,7 +247,7 @@ def last_sync( This action is used for analytics and monitoring.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param last_access_control_sync_error: @@ -346,7 +346,7 @@ def list(

    Get information about all connectors.

    - ``_ + ``_ :param connector_name: A comma-separated list of connector names to fetch connector documents for @@ -427,7 +427,7 @@ def post( Self-managed connectors (Connector clients) are self-managed on your infrastructure.

    - ``_ + ``_ :param description: :param index_name: @@ -509,7 +509,7 @@ def put(

    Create or update a connector.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. @@ -584,7 +584,7 @@ def sync_job_cancel( The connector service is then responsible for setting the status of connector sync jobs to cancelled.

    - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job """ @@ -635,7 +635,7 @@ def sync_job_check_in( This service runs automatically on Elastic Cloud for Elastic managed connectors.

    - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job to be checked in. @@ -695,7 +695,7 @@ def sync_job_claim( This service runs automatically on Elastic Cloud for Elastic managed connectors.

    - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job. :param worker_hostname: The host name of the current system that will run the @@ -757,7 +757,7 @@ def sync_job_delete( This is a destructive action that is not recoverable.

    - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job to be deleted @@ -811,7 +811,7 @@ def sync_job_error( This service runs automatically on Elastic Cloud for Elastic managed connectors.

    - ``_ + ``_ :param connector_sync_job_id: The unique identifier for the connector sync job. :param error: The error for the connector sync job error field. @@ -865,7 +865,7 @@ def sync_job_get(

    Get a connector sync job.

    - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job """ @@ -938,7 +938,7 @@ def sync_job_list(

    Get information about all stored connector sync jobs listed by their creation date in ascending order.

    - ``_ + ``_ :param connector_id: A connector id to fetch connector sync jobs for :param from_: Starting offset (default: 0) @@ -1004,7 +1004,7 @@ def sync_job_post(

    Create a connector sync job document in the internal index and initialize its counters and timestamps with default values.

    - ``_ + ``_ :param id: The id of the associated connector :param job_type: @@ -1080,7 +1080,7 @@ def sync_job_update_stats( This service runs automatically on Elastic Cloud for Elastic managed connectors.

    - ``_ + ``_ :param connector_sync_job_id: The unique identifier of the connector sync job. :param deleted_document_count: The number of documents the sync job deleted. @@ -1163,7 +1163,7 @@ def update_active_filtering(

    Activates the valid draft filtering for a connector.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated """ @@ -1216,7 +1216,7 @@ def update_api_key_id( Self-managed connectors (connector clients) do not use this field.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param api_key_id: @@ -1275,7 +1275,7 @@ def update_configuration(

    Update the configuration field in the connector document.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param configuration: @@ -1335,7 +1335,7 @@ def update_error( Otherwise, if the error is reset to null, the connector status is updated to connected.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param error: @@ -1403,7 +1403,7 @@ def update_features( This service runs automatically on Elastic Cloud for Elastic managed connectors.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated. :param features: @@ -1464,7 +1464,7 @@ def update_filtering( The filtering property is used to configure sync rules (both basic and advanced) for a connector.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param advanced_snippet: @@ -1525,7 +1525,7 @@ def update_filtering_validation(

    Update the draft filtering validation info for a connector.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param validation: @@ -1582,7 +1582,7 @@ def update_index_name(

    Update the index_name field of a connector, specifying the index where the data ingested by the connector is stored.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param index_name: @@ -1639,7 +1639,7 @@ def update_name(

    Update the connector name and description.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param description: @@ -1696,7 +1696,7 @@ def update_native(

    Update the connector is_native flag.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param is_native: @@ -1753,7 +1753,7 @@ def update_pipeline(

    When you create a new connector, the configuration of an ingest pipeline is populated with default settings.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param pipeline: @@ -1809,7 +1809,7 @@ def update_scheduling(

    Update the connector scheduling.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param scheduling: @@ -1865,7 +1865,7 @@ def update_service_type(

    Update the connector service type.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param service_type: @@ -1928,7 +1928,7 @@ def update_status(

    Update the connector status.

    - ``_ + ``_ :param connector_id: The unique identifier of the connector to be updated :param status: diff --git a/elasticsearch/_sync/client/dangling_indices.py b/elasticsearch/_sync/client/dangling_indices.py index 28f228ca6..dd1cee4c3 100644 --- a/elasticsearch/_sync/client/dangling_indices.py +++ b/elasticsearch/_sync/client/dangling_indices.py @@ -46,7 +46,7 @@ def delete_dangling_index( For example, this can happen if you delete more than cluster.indices.tombstones.size indices while an Elasticsearch node is offline.

    - ``_ + ``_ :param index_uuid: The UUID of the index to delete. Use the get dangling indices API to find the UUID. @@ -107,7 +107,7 @@ def import_dangling_index( For example, this can happen if you delete more than cluster.indices.tombstones.size indices while an Elasticsearch node is offline.

    - ``_ + ``_ :param index_uuid: The UUID of the index to import. Use the get dangling indices API to locate the UUID. @@ -168,7 +168,7 @@ def list_dangling_indices(

    Use this API to list dangling indices, which you can then import or delete.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_dangling" diff --git a/elasticsearch/_sync/client/enrich.py b/elasticsearch/_sync/client/enrich.py index 90fc153b8..c713b62e9 100644 --- a/elasticsearch/_sync/client/enrich.py +++ b/elasticsearch/_sync/client/enrich.py @@ -43,7 +43,7 @@ def delete_policy( Deletes an existing enrich policy and its enrich index.

    - ``_ + ``_ :param name: Enrich policy to delete. :param master_timeout: Period to wait for a connection to the master node. @@ -92,7 +92,7 @@ def execute_policy( Create the enrich index for an existing enrich policy.

    - ``_ + ``_ :param name: Enrich policy to execute. :param master_timeout: Period to wait for a connection to the master node. @@ -144,7 +144,7 @@ def get_policy( Returns information about an enrich policy.

    - ``_ + ``_ :param name: Comma-separated list of enrich policy names used to limit the request. To return information for all enrich policies, omit this parameter. @@ -202,7 +202,7 @@ def put_policy( Creates an enrich policy.

    - ``_ + ``_ :param name: Name of the enrich policy to create or update. :param geo_match: Matches enrich data to incoming documents based on a `geo_shape` @@ -263,7 +263,7 @@ def stats( Returns enrich coordinator statistics and information about enrich policies that are currently executing.

    - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. """ diff --git a/elasticsearch/_sync/client/eql.py b/elasticsearch/_sync/client/eql.py index 7df6bf4b4..6c9d60365 100644 --- a/elasticsearch/_sync/client/eql.py +++ b/elasticsearch/_sync/client/eql.py @@ -93,7 +93,7 @@ def get( Get the current status and available results for an async EQL search or a stored synchronous EQL search.

    - ``_ + ``_ :param id: Identifier for the search. :param keep_alive: Period for which the search and its results are stored on @@ -147,7 +147,7 @@ def get_status( Get the current status for an async EQL search or a stored synchronous EQL search without returning results.

    - ``_ + ``_ :param id: Identifier for the search. """ @@ -204,6 +204,7 @@ def search( allow_partial_search_results: t.Optional[bool] = None, allow_partial_sequence_results: t.Optional[bool] = None, case_sensitive: t.Optional[bool] = None, + ccs_minimize_roundtrips: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, event_category_field: t.Optional[str] = None, expand_wildcards: t.Optional[ @@ -246,11 +247,13 @@ def search( EQL assumes each document in a data stream or index corresponds to an event.

    - ``_ + ``_ :param index: The name of the index to scope the operation :param query: EQL query you wish to run. - :param allow_no_indices: + :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves + into no concrete indices. (This includes `_all` string or when no indices + have been specified) :param allow_partial_search_results: Allow query execution also in case of shard failures. If true, the query will keep running and will return results based on the available shards. For sequences, the behavior can be further refined @@ -261,9 +264,12 @@ def search( If false, the sequence query will return successfully, but will always have empty results. :param case_sensitive: + :param ccs_minimize_roundtrips: Indicates whether network round-trips should + be minimized as part of cross-cluster search requests execution :param event_category_field: Field containing the event classification, such as process, file, or network. - :param expand_wildcards: + :param expand_wildcards: Whether to expand wildcard expression to concrete indices + that are open, closed or both. :param fetch_size: Maximum number of events to search at a time for sequence queries. :param fields: Array of wildcard (*) patterns. The response returns values for @@ -298,6 +304,8 @@ def search( __body: t.Dict[str, t.Any] = body if body is not None else {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices + if ccs_minimize_roundtrips is not None: + __query["ccs_minimize_roundtrips"] = ccs_minimize_roundtrips if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: diff --git a/elasticsearch/_sync/client/esql.py b/elasticsearch/_sync/client/esql.py index 410276d83..e34a26fb8 100644 --- a/elasticsearch/_sync/client/esql.py +++ b/elasticsearch/_sync/client/esql.py @@ -31,6 +31,8 @@ class EsqlClient(NamespacedClient): "columnar", "filter", "include_ccs_metadata", + "keep_alive", + "keep_on_completion", "locale", "params", "profile", @@ -84,13 +86,15 @@ def async_query(

    The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties.

    - ``_ + ``_ :param query: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. :param allow_partial_results: If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other - clusters and shards. + clusters and shards. If `false`, the query will fail if there are any failures. + To override the default behavior, you can set the `esql.query.allow_partial_results` + cluster setting to `false`. :param columnar: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one @@ -153,10 +157,6 @@ def async_query( __query["format"] = format if human is not None: __query["human"] = human - if keep_alive is not None: - __query["keep_alive"] = keep_alive - if keep_on_completion is not None: - __query["keep_on_completion"] = keep_on_completion if pretty is not None: __query["pretty"] = pretty if not __body: @@ -168,6 +168,10 @@ def async_query( __body["filter"] = filter if include_ccs_metadata is not None: __body["include_ccs_metadata"] = include_ccs_metadata + if keep_alive is not None: + __body["keep_alive"] = keep_alive + if keep_on_completion is not None: + __body["keep_on_completion"] = keep_on_completion if locale is not None: __body["locale"] = locale if params is not None: @@ -212,7 +216,7 @@ def async_query_delete( - ``_ + ``_ :param id: The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the @@ -250,6 +254,14 @@ def async_query_get( drop_null_columns: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + format: t.Optional[ + t.Union[ + str, + t.Literal[ + "arrow", "cbor", "csv", "json", "smile", "tsv", "txt", "yaml" + ], + ] + ] = None, human: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, @@ -265,7 +277,7 @@ def async_query_get( If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API.

    - ``_ + ``_ :param id: The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the @@ -275,6 +287,7 @@ def async_query_get( will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. + :param format: A short version of the Accept header, for example `json` or `yaml`. :param keep_alive: The period for which the query and its results are stored in the cluster. When this period expires, the query and its results are deleted, even if the query is still ongoing. @@ -295,6 +308,8 @@ def async_query_get( __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path + if format is not None: + __query["format"] = format if human is not None: __query["human"] = human if keep_alive is not None: @@ -332,7 +347,7 @@ def async_query_stop( If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it.

    - ``_ + ``_ :param id: The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the @@ -420,13 +435,15 @@ def query( Get search results for an ES|QL (Elasticsearch query language) query.

    - ``_ + ``_ :param query: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. :param allow_partial_results: If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other - clusters and shards. + clusters and shards. If `false`, the query will fail if there are any failures. + To override the default behavior, you can set the `esql.query.allow_partial_results` + cluster setting to `false`. :param columnar: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one diff --git a/elasticsearch/_sync/client/features.py b/elasticsearch/_sync/client/features.py index 70019a24a..a4e1964c4 100644 --- a/elasticsearch/_sync/client/features.py +++ b/elasticsearch/_sync/client/features.py @@ -48,7 +48,7 @@ def get_features( In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node.

    - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. """ @@ -102,7 +102,7 @@ def reset_features(

    IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes.

    - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. """ diff --git a/elasticsearch/_sync/client/fleet.py b/elasticsearch/_sync/client/fleet.py index ba0285e46..4ee9edc2f 100644 --- a/elasticsearch/_sync/client/fleet.py +++ b/elasticsearch/_sync/client/fleet.py @@ -53,7 +53,7 @@ def global_checkpoints( This API is designed for internal use by the Fleet server project.

    - ``_ + ``_ :param index: A single index or index alias that resolves to a single index. :param checkpoints: A comma separated list of previous global checkpoints. When @@ -138,12 +138,12 @@ def msearch( """ .. raw:: html -

    Executes several fleet searches with a single API request. - The API follows the same structure as the multi search API. However, similar to the fleet search API, it - supports the wait_for_checkpoints parameter.

    +

    Executes several fleet searches with a single API request.

    +

    The API follows the same structure as the multi search (_msearch) API. + However, similar to the fleet search API, it supports the wait_for_checkpoints parameter.

    - ``_ + ``_ :param searches: :param index: A single target to search. If the target is an index alias, it @@ -154,9 +154,9 @@ def msearch( example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. :param allow_partial_search_results: If true, returns partial results if there - are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). - If false, returns an error with no partial results. Defaults to the configured - cluster setting `search.default_allow_partial_results` which is true by default. + are shard request timeouts or shard failures. If false, returns an error + with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` + which is true by default. :param ccs_minimize_roundtrips: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. :param expand_wildcards: Type of index that wildcard expressions can match. If @@ -392,7 +392,7 @@ def search( after provided checkpoint has been processed and is visible for searches inside of Elasticsearch.

    - ``_ + ``_ :param index: A single target to search. If the target is an index alias, it must resolve to a single index. @@ -400,9 +400,9 @@ def search( :param aggs: :param allow_no_indices: :param allow_partial_search_results: If true, returns partial results if there - are shard request timeouts or [shard failures](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-replication.html#shard-failures). - If false, returns an error with no partial results. Defaults to the configured - cluster setting `search.default_allow_partial_results` which is true by default. + are shard request timeouts or shard failures. If false, returns an error + with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results` + which is true by default. :param analyze_wildcard: :param analyzer: :param batched_reduce_size: diff --git a/elasticsearch/_sync/client/graph.py b/elasticsearch/_sync/client/graph.py index 127b6172c..bd19e233e 100644 --- a/elasticsearch/_sync/client/graph.py +++ b/elasticsearch/_sync/client/graph.py @@ -55,7 +55,7 @@ def explore( You can exclude vertices that have already been returned.

    - ``_ + ``_ :param index: Name of the index. :param connections: Specifies or more fields from which you want to extract terms diff --git a/elasticsearch/_sync/client/ilm.py b/elasticsearch/_sync/client/ilm.py index f3dd02684..2e40f6d21 100644 --- a/elasticsearch/_sync/client/ilm.py +++ b/elasticsearch/_sync/client/ilm.py @@ -44,7 +44,7 @@ def delete_lifecycle( You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error.

    - ``_ + ``_ :param name: Identifier for the policy. :param master_timeout: Period to wait for a connection to the master node. If @@ -102,7 +102,7 @@ def explain_lifecycle(

    The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to target. Supports wildcards (`*`). To target all data streams and indices, use `*` @@ -163,7 +163,7 @@ def get_lifecycle(

    Get lifecycle policies.

    - ``_ + ``_ :param name: Identifier for the policy. :param master_timeout: Period to wait for a connection to the master node. If @@ -218,7 +218,7 @@ def get_status(

    Get the current index lifecycle management status.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ilm/status" @@ -274,7 +274,7 @@ def migrate_to_data_tiers( Use the stop ILM and get ILM status APIs to wait until the reported operation mode is STOPPED.

    - ``_ + ``_ :param dry_run: If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. This provides @@ -347,7 +347,7 @@ def move_to_step( An index cannot move to a step that is not part of its policy.

    - ``_ + ``_ :param index: The name of the index whose lifecycle step is to change :param current_step: The step that the index is expected to be in. @@ -415,7 +415,7 @@ def put_lifecycle(

    NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions.

    - ``_ + ``_ :param name: Identifier for the policy. :param master_timeout: Period to wait for a connection to the master node. If @@ -479,7 +479,7 @@ def remove_policy( It also stops managing the indices.

    - ``_ + ``_ :param index: The name of the index to remove policy on """ @@ -525,7 +525,7 @@ def retry( Use the explain lifecycle state API to determine whether an index is in the ERROR step.

    - ``_ + ``_ :param index: The name of the indices (comma-separated) whose failed lifecycle step is to be retry @@ -573,7 +573,7 @@ def start( Restarting ILM is necessary only when it has been stopped using the stop ILM API.

    - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -627,7 +627,7 @@ def stop( Use the get ILM status API to check whether ILM is running.

    - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and diff --git a/elasticsearch/_sync/client/indices.py b/elasticsearch/_sync/client/indices.py index a25c3bc0b..90dcf60f4 100644 --- a/elasticsearch/_sync/client/indices.py +++ b/elasticsearch/_sync/client/indices.py @@ -62,7 +62,7 @@ def add_block( Index blocks limit the operations allowed on an index by blocking specific operation types.

    - ``_ + ``_ :param index: A comma-separated list or wildcard expression of index names used to limit the request. By default, you must explicitly name the indices you @@ -173,7 +173,7 @@ def analyze( The _analyze endpoint without a specified index will always use 10000 as its limit.

    - ``_ + ``_ :param index: Index used to derive the analyzer. If specified, the `analyzer` or field parameter overrides this value. If no index is specified or the @@ -265,7 +265,7 @@ def cancel_migrate_reindex(

    Cancel a migration reindex attempt for a data stream or index.

    - ``_ + ``_ :param index: The index or data stream name """ @@ -327,7 +327,7 @@ def clear_cache( To clear the cache only of specific fields, use the fields parameter.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -338,7 +338,7 @@ def clear_cache( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param fielddata: If `true`, clears the fields cache. Use the `fields` parameter to clear the cache of specific fields only. :param fields: Comma-separated list of field names used to limit the `fielddata` @@ -449,7 +449,7 @@ def clone(

    Because the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well.

    - ``_ + ``_ :param index: Name of the source index to clone. :param target: Name of the target index to create. @@ -553,7 +553,7 @@ def close( Closing indices can be turned off with the cluster settings API by setting cluster.indices.close.enable to false.

    - ``_ + ``_ :param index: Comma-separated list or wildcard expression of index names used to limit the request. @@ -563,7 +563,7 @@ def close( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: Period to wait for a connection to the master node. If @@ -654,7 +654,7 @@ def create( Note that changing this setting will also affect the wait_for_active_shards value on all subsequent write operations.

    - ``_ + ``_ :param index: Name of the index you wish to create. :param aliases: Aliases for the index. @@ -731,7 +731,7 @@ def create_data_stream(

    You must have a matching index template with data stream enabled.

    - ``_ + ``_ :param name: Name of the data stream, which must meet the following criteria: Lowercase only; Cannot include `\\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, @@ -794,7 +794,7 @@ def create_from(

    Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values.

    - ``_ + ``_ :param source: The source index or data stream name :param dest: The destination index or data stream name @@ -861,7 +861,7 @@ def data_streams_stats(

    Get statistics for one or more data streams.

    - ``_ + ``_ :param name: Comma-separated list of data streams used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams in a @@ -930,7 +930,7 @@ def delete( You can then use the delete index API to delete the previous write index.

    - ``_ + ``_ :param index: Comma-separated list of indices to delete. You cannot specify index aliases. By default, this parameter does not support wildcards (`*`) or `_all`. @@ -942,7 +942,7 @@ def delete( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: Period to wait for a connection to the master node. If @@ -1004,7 +1004,7 @@ def delete_alias( Removes a data stream or index from an alias.

    - ``_ + ``_ :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). @@ -1072,7 +1072,7 @@ def delete_data_lifecycle( Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle.

    - ``_ + ``_ :param name: A comma-separated list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams @@ -1136,7 +1136,7 @@ def delete_data_stream( Deletes one or more data streams and their backing indices.

    - ``_ + ``_ :param name: Comma-separated list of data streams to delete. Wildcard (`*`) expressions are supported. @@ -1194,7 +1194,7 @@ def delete_index_template( existing templates.

    - ``_ + ``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. @@ -1246,10 +1246,11 @@ def delete_template( """ .. raw:: html -

    Delete a legacy index template.

    +

    Delete a legacy index template. + IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

    - ``_ + ``_ :param name: The name of the legacy index template to delete. Wildcard (`*`) expressions are supported. @@ -1321,7 +1322,7 @@ def disk_usage( The stored size of the _id field is likely underestimated while the _source field is overestimated.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. It’s recommended to execute this API with a single @@ -1404,7 +1405,7 @@ def downsample( The source index must be read only (index.blocks.write: true).

    - ``_ + ``_ :param index: Name of the time series index to downsample. :param target_index: Name of the index to create. @@ -1476,7 +1477,7 @@ def exists( Check if one or more indices, index aliases, or data streams exist.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases. Supports wildcards (`*`). @@ -1486,7 +1487,7 @@ def exists( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param flat_settings: If `true`, returns settings in flat format. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. @@ -1570,7 +1571,7 @@ def exists_alias( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. :param local: If `true`, the request retrieves information from the local node @@ -1697,7 +1698,7 @@ def exists_template(

    IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

    - ``_ + ``_ :param name: A comma-separated list of index template names used to limit the request. Wildcard (`*`) expressions are supported. @@ -1755,7 +1756,7 @@ def explain_data_lifecycle( Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution.

    - ``_ + ``_ :param index: The name of the index to explain :param include_defaults: indicates if the API should return the default values @@ -1822,7 +1823,7 @@ def field_usage_stats( A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times.

    - ``_ + ``_ :param index: Comma-separated list or wildcard expression of index names used to limit the request. @@ -1907,7 +1908,7 @@ def flush( If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to flush. Supports wildcards (`*`). To flush all data streams and indices, omit this @@ -1918,7 +1919,7 @@ def flush( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param force: If `true`, the request forces a flush even if there are no changes to commit to the index. :param ignore_unavailable: If `false`, the request returns an error if it targets @@ -2032,7 +2033,7 @@ def forcemerge( - ``_ + ``_ :param index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices @@ -2130,7 +2131,7 @@ def get( stream’s backing indices.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (*) are supported. @@ -2223,7 +2224,7 @@ def get_alias( Retrieves information for one or more data stream or index aliases.

    - ``_ + ``_ :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, @@ -2236,7 +2237,7 @@ def get_alias( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param local: If `true`, the request retrieves information from the local node @@ -2309,14 +2310,13 @@ def get_data_lifecycle(

    Get the data stream lifecycle configuration of one or more data streams.

    - ``_ + ``_ :param name: Comma-separated list of data streams to limit the request. Supports wildcards (`*`). To target all data streams, omit this parameter or use `*` or `_all`. :param expand_wildcards: Type of data stream that wildcard patterns can match. - Supports comma-separated values, such as `open,hidden`. Valid values are: - `all`, `open`, `closed`, `hidden`, `none`. + Supports comma-separated values, such as `open,hidden`. :param include_defaults: If `true`, return all default settings in the response. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -2367,7 +2367,7 @@ def get_data_lifecycle_stats( Get statistics about the data streams that are managed by a data stream lifecycle.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_lifecycle/stats" @@ -2418,7 +2418,7 @@ def get_data_stream(

    Get information about one or more data streams.

    - ``_ + ``_ :param name: Comma-separated list of data stream names used to limit the request. Wildcard (`*`) expressions are supported. If omitted, all data streams are @@ -2499,7 +2499,7 @@ def get_field_mapping(

    This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields.

    - ``_ + ``_ :param fields: Comma-separated list or wildcard expression of fields used to limit returned information. Supports wildcards (`*`). @@ -2512,7 +2512,7 @@ def get_field_mapping( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param include_defaults: If `true`, return all default settings in the response. @@ -2580,7 +2580,7 @@ def get_index_template( Get information about one or more index templates.

    - ``_ + ``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. @@ -2657,7 +2657,7 @@ def get_mapping( For data streams, the API retrieves mappings for the stream’s backing indices.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2668,7 +2668,7 @@ def get_mapping( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param local: If `true`, the request retrieves information from the local node @@ -2731,7 +2731,7 @@ def get_migrate_reindex_status(

    Get the status of a migration reindex attempt for a data stream or index.

    - ``_ + ``_ :param index: The index or data stream name. """ @@ -2791,7 +2791,7 @@ def get_settings( For data streams, it returns setting information for the stream's backing indices.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -2878,12 +2878,12 @@ def get_template( """ .. raw:: html -

    Get index templates. +

    Get legacy index templates. Get information about one or more index templates.

    IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

    - ``_ + ``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (`*`) expressions are supported. To return all index templates, @@ -2950,7 +2950,7 @@ def migrate_reindex( The persistent task ID is returned immediately and the reindexing work is completed in that task.

    - ``_ + ``_ :param reindex: """ @@ -3146,7 +3146,7 @@ def open(

    Because opening or closing an index allocates its shards, the wait_for_active_shards setting on index creation applies to the _open and _close index actions as well.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). By default, you must explicitly @@ -3160,7 +3160,7 @@ def open( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: Period to wait for a connection to the master node. If @@ -3419,8 +3419,7 @@ def put_data_lifecycle( for this data stream. A data stream lifecycle that's disabled (enabled: `false`) will have no effect on the data stream. :param expand_wildcards: Type of data stream that wildcard patterns can match. - Supports comma-separated values, such as `open,hidden`. Valid values are: - `all`, `hidden`, `open`, `closed`, `none`. + Supports comma-separated values, such as `open,hidden`. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. @@ -3716,7 +3715,7 @@ def put_mapping( Instead, add an alias field to create an alternate field name.

    - ``_ + ``_ :param index: A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. @@ -3732,7 +3731,7 @@ def put_mapping( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param field_names: Control whether field names are enabled for the index. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. @@ -3850,8 +3849,34 @@ def put_settings( Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default.

    To revert a setting to the default value, use a null value. - The list of per-index settings that can be updated dynamically on live indices can be found in index module documentation. + The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation. To preserve existing settings from being updated, set the preserve_existing parameter to true.

    +

    There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example:

    +
    {
    +            "number_of_replicas": 1
    +          }
    +          
    +

    Or you can use an index setting object:

    +
    {
    +            "index": {
    +              "number_of_replicas": 1
    +            }
    +          }
    +          
    +

    Or you can use dot annotation:

    +
    {
    +            "index.number_of_replicas": 1
    +          }
    +          
    +

    Or you can embed any of the aforementioned options in a settings object. For example:

    +
    {
    +            "settings": {
    +              "index": {
    +                "number_of_replicas": 1
    +              }
    +            }
    +          }
    +          

    NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. @@ -3862,7 +3887,7 @@ def put_settings( To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it.

    - ``_ + ``_ :param settings: :param index: Comma-separated list of data streams, indices, and aliases used @@ -3971,7 +3996,7 @@ def put_template( """ .. raw:: html -

    Create or update an index template. +

    Create or update a legacy index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name.

    IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

    @@ -3988,7 +4013,7 @@ def put_template( NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order.

    - ``_ + ``_ :param name: The name of the template :param aliases: Aliases for the index. @@ -4060,10 +4085,20 @@ def recovery( *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, active_only: t.Optional[bool] = None, + allow_no_indices: t.Optional[bool] = None, detailed: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, + expand_wildcards: t.Optional[ + t.Union[ + t.Sequence[ + t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] + ], + t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], + ] + ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -4090,14 +4125,23 @@ def recovery( This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param active_only: If `true`, the response only includes ongoing shard recoveries. + :param allow_no_indices: If `false`, the request returns an error if any wildcard + expression, index alias, or `_all` value targets only missing or closed indices. + This behavior applies even if the request targets other open indices. :param detailed: If `true`, the response includes detailed information about shard recoveries. + :param expand_wildcards: Type of index that wildcard patterns can match. If the + request can target data streams, this argument determines whether wildcard + expressions match hidden data streams. Supports comma-separated values, such + as `open,hidden`. + :param ignore_unavailable: If `false`, the request returns an error if it targets + a missing or closed index. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: @@ -4109,14 +4153,20 @@ def recovery( __query: t.Dict[str, t.Any] = {} if active_only is not None: __query["active_only"] = active_only + if allow_no_indices is not None: + __query["allow_no_indices"] = allow_no_indices if detailed is not None: __query["detailed"] = detailed if error_trace is not None: __query["error_trace"] = error_trace + if expand_wildcards is not None: + __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human + if ignore_unavailable is not None: + __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -4164,7 +4214,7 @@ def refresh( This option ensures the indexing operation waits for a periodic refresh before running the search.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -4175,7 +4225,7 @@ def refresh( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. """ @@ -4248,7 +4298,7 @@ def reload_search_analyzers( This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future.

    - ``_ + ``_ :param index: A comma-separated list of index names to reload analyzers for :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves @@ -4355,7 +4405,7 @@ def resolve_cluster( If a connection was (re-)established, this will also cause the remote/info endpoint to now indicate a connected status.

    - ``_ + ``_ :param name: A comma-separated list of names or index patterns for the indices, aliases, and data streams to resolve. Resources on remote clusters can be @@ -4374,10 +4424,9 @@ def resolve_cluster( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. - NOTE: This option is only supported when specifying an index expression. - You will get an error if you specify index options to the `_resolve/cluster` - API endpoint that takes no index expression. + as `open,hidden`. NOTE: This option is only supported when specifying an + index expression. You will get an error if you specify index options to the + `_resolve/cluster` API endpoint that takes no index expression. :param ignore_throttled: If true, concrete, expanded, or aliased indices are ignored when frozen. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to @@ -4457,7 +4506,7 @@ def resolve_index( Multiple patterns and remote clusters are supported.

    - ``_ + ``_ :param name: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified @@ -4470,7 +4519,7 @@ def resolve_index( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. """ @@ -4559,7 +4608,7 @@ def rollover( If you roll over the alias on May 7, 2099, the new index's name is my-index-2099.05.07-000002.

    - ``_ + ``_ :param alias: Name of the data stream or index alias to roll over. :param new_index: Name of the index to create. Supports date math. Data streams @@ -4674,7 +4723,7 @@ def segments( For data streams, the API returns information about the stream's backing indices.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams @@ -4685,7 +4734,7 @@ def segments( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param verbose: If `true`, the request returns a verbose response. @@ -4766,7 +4815,7 @@ def shard_stores(

    By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards.

    - ``_ + ``_ :param index: List of data streams, indices, and aliases used to limit the request. :param allow_no_indices: If false, the request returns an error if any wildcard @@ -4868,7 +4917,7 @@ def shrink( - ``_ + ``_ :param index: Name of the source index to shrink. :param target: Name of the target index to create. @@ -4949,7 +4998,7 @@ def simulate_index_template( Get the index configuration that would be applied to the specified index from an existing index template.

    - ``_ + ``_ :param name: Name of the index to simulate :param cause: User defined reason for dry-run creating the new template for simulation @@ -5039,7 +5088,7 @@ def simulate_template( Get the index configuration that would be applied by a particular index template.

    - ``_ + ``_ :param name: Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit this parameter and specify the template @@ -5211,7 +5260,7 @@ def split( - ``_ + ``_ :param index: Name of the source index to split. :param target: Name of the target index to create. @@ -5313,7 +5362,7 @@ def stats( Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed.

    - ``_ + ``_ :param index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices @@ -5420,7 +5469,7 @@ def unfreeze( When a frozen index is unfrozen, the index goes through the normal recovery process and becomes writeable again.

    - ``_ + ``_ :param index: Identifier for the index. :param allow_no_indices: If `false`, the request returns an error if any wildcard @@ -5577,7 +5626,7 @@ def validate_query( Validates a query without running it.

    - ``_ + ``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this @@ -5598,7 +5647,7 @@ def validate_query( :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such - as `open,hidden`. Valid values are: `all`, `open`, `closed`, `hidden`, `none`. + as `open,hidden`. :param explain: If `true`, the response returns detailed information if an error has occurred. :param ignore_unavailable: If `false`, the request returns an error if it targets diff --git a/elasticsearch/_sync/client/inference.py b/elasticsearch/_sync/client/inference.py index e15e6e226..1478883e8 100644 --- a/elasticsearch/_sync/client/inference.py +++ b/elasticsearch/_sync/client/inference.py @@ -47,7 +47,7 @@ def completion(

    Perform completion inference on the service

    - ``_ + ``_ :param inference_id: The inference Id :param input: Inference input. Either a string or an array of strings. @@ -123,7 +123,7 @@ def delete(

    Delete an inference endpoint

    - ``_ + ``_ :param inference_id: The inference identifier. :param task_type: The task type @@ -197,7 +197,7 @@ def get(

    Get an inference endpoint

    - ``_ + ``_ :param task_type: The task type :param inference_id: The inference Id @@ -235,7 +235,7 @@ def get( ) @_rewrite_parameters( - body_fields=("input", "query", "task_settings"), + body_fields=("input", "input_type", "query", "task_settings"), ) def inference( self, @@ -257,6 +257,7 @@ def inference( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, + input_type: t.Optional[str] = None, pretty: t.Optional[bool] = None, query: t.Optional[str] = None, task_settings: t.Optional[t.Any] = None, @@ -277,13 +278,22 @@ def inference( - ``_ + ``_ :param inference_id: The unique identifier for the inference endpoint. :param input: The text on which you want to perform the inference task. It can be a single string or an array. > info > Inference endpoints for the `completion` task type currently only support a single string as input. :param task_type: The type of inference task that the model performs. + :param input_type: Specifies the input data type for the text embedding model. + The `input_type` parameter only applies to Inference Endpoints with the `text_embedding` + task type. Possible values include: * `SEARCH` * `INGEST` * `CLASSIFICATION` + * `CLUSTERING` Not all services support all values. Unsupported values will + trigger a validation exception. Accepted values depend on the configured + inference service, refer to the relevant service-specific documentation for + more info. > info > The `input_type` parameter specified on the root level + of the request body will take precedence over the `input_type` parameter + specified in `task_settings`. :param query: The query input, which is required only for the `rerank` task. It is not required for other tasks. :param task_settings: Task settings for the individual inference request. These @@ -322,6 +332,8 @@ def inference( if not __body: if input is not None: __body["input"] = input + if input_type is not None: + __body["input_type"] = input_type if query is not None: __body["query"] = query if task_settings is not None: @@ -366,26 +378,45 @@ def put( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html -

    Create an inference endpoint. - When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    +

    Create an inference endpoint.

    IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

    +

    The following integrations are available through the inference API. You can find the available task types next to the integration name:

    +
      +
    • AlibabaCloud AI Search (completion, rerank, sparse_embedding, text_embedding)
    • +
    • Amazon Bedrock (completion, text_embedding)
    • +
    • Anthropic (completion)
    • +
    • Azure AI Studio (completion, text_embedding)
    • +
    • Azure OpenAI (completion, text_embedding)
    • +
    • Cohere (completion, rerank, text_embedding)
    • +
    • DeepSeek (completion, chat_completion)
    • +
    • Elasticsearch (rerank, sparse_embedding, text_embedding - this service is for built-in models and models uploaded through Eland)
    • +
    • ELSER (sparse_embedding)
    • +
    • Google AI Studio (completion, text_embedding)
    • +
    • Google Vertex AI (rerank, text_embedding)
    • +
    • Hugging Face (chat_completion, completion, rerank, text_embedding)
    • +
    • Mistral (chat_completion, completion, text_embedding)
    • +
    • OpenAI (chat_completion, completion, text_embedding)
    • +
    • VoyageAI (text_embedding, rerank)
    • +
    • Watsonx inference integration (text_embedding)
    • +
    • JinaAI (text_embedding, rerank)
    • +
    - ``_ + ``_ :param inference_id: The inference Id :param inference_config: - :param task_type: The task type + :param task_type: The task type. Refer to the integration list in the API description + for the available task types. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") @@ -416,6 +447,8 @@ def put( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout __body = inference_config if inference_config is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] @@ -451,6 +484,7 @@ def put_alibabacloud( human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -458,14 +492,9 @@ def put_alibabacloud(

    Create an AlibabaCloud AI Search inference endpoint.

    Create an inference endpoint to perform an inference task with the alibabacloud-ai-search service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. :param alibabacloud_inference_id: The unique identifier of the inference endpoint. @@ -476,6 +505,8 @@ def put_alibabacloud( :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -502,6 +533,8 @@ def put_alibabacloud( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -547,25 +580,21 @@ def put_amazonbedrock( human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

    Create an Amazon Bedrock inference endpoint.

    -

    Creates an inference endpoint to perform an inference task with the amazonbedrock service.

    +

    Create an inference endpoint to perform an inference task with the amazonbedrock service.

    info You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. :param amazonbedrock_inference_id: The unique identifier of the inference endpoint. @@ -576,6 +605,8 @@ def put_amazonbedrock( :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -602,6 +633,8 @@ def put_amazonbedrock( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -647,6 +680,7 @@ def put_anthropic( human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -654,14 +688,9 @@ def put_anthropic(

    Create an Anthropic inference endpoint.

    Create an inference endpoint to perform an inference task with the anthropic service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The task type. The only valid task type for the model to perform is `completion`. @@ -673,6 +702,8 @@ def put_anthropic( :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -699,6 +730,8 @@ def put_anthropic( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -744,6 +777,7 @@ def put_azureaistudio( human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -751,14 +785,9 @@ def put_azureaistudio(

    Create an Azure AI studio inference endpoint.

    Create an inference endpoint to perform an inference task with the azureaistudio service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. :param azureaistudio_inference_id: The unique identifier of the inference endpoint. @@ -769,6 +798,8 @@ def put_azureaistudio( :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -795,6 +826,8 @@ def put_azureaistudio( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -840,6 +873,7 @@ def put_azureopenai( human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -853,14 +887,9 @@ def put_azureopenai(
  • GPT-3.5
  • The list of embeddings models that you can choose from in your deployment can be found in the Azure models documentation.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. NOTE: The `chat_completion` task type only supports streaming and only through @@ -873,6 +902,8 @@ def put_azureopenai( :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -899,6 +930,8 @@ def put_azureopenai( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -944,6 +977,7 @@ def put_cohere( human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -951,14 +985,9 @@ def put_cohere(

    Create a Cohere inference endpoint.

    Create an inference endpoint to perform an inference task with the cohere service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. :param cohere_inference_id: The unique identifier of the inference endpoint. @@ -969,6 +998,8 @@ def put_cohere( :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -993,6 +1024,8 @@ def put_cohere( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -1017,6 +1050,221 @@ def put_cohere( path_parts=__path_parts, ) + @_rewrite_parameters( + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), + ) + def put_custom( + self, + *, + task_type: t.Union[ + str, t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"] + ], + custom_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["custom"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

    Create a custom inference endpoint.

    +

    The custom service gives more control over how to interact with external inference services that aren't explicitly supported through dedicated integrations. + The custom service gives you the ability to define the headers, url, query parameters, request body, and secrets. + The custom service supports the template replacement functionality, which enables you to define a template that can be replaced with the value associated with that key. + Templates are portions of a string that start with ${ and end with }. + The parameters secret_parameters and task_settings are checked for keys for template replacement. Template replacement is supported in the request, headers, url, and query_parameters. + If the definition (key) is not found for a template, an error message is returned. + In case of an endpoint definition like the following:

    +
    PUT _inference/text_embedding/test-text-embedding
    +          {
    +            "service": "custom",
    +            "service_settings": {
    +               "secret_parameters": {
    +                    "api_key": "<some api key>"
    +               },
    +               "url": "...endpoints.huggingface.cloud/v1/embeddings",
    +               "headers": {
    +                   "Authorization": "Bearer ${api_key}",
    +                   "Content-Type": "application/json"
    +               },
    +               "request": "{\\"input\\": ${input}}",
    +               "response": {
    +                   "json_parser": {
    +                       "text_embeddings":"$.data[*].embedding[*]"
    +                   }
    +               }
    +            }
    +          }
    +          
    +

    To replace ${api_key} the secret_parameters and task_settings are checked for a key named api_key.

    +
    +

    info + Templates should not be surrounded by quotes.

    +
    +

    Pre-defined templates:

    +
      +
    • ${input} refers to the array of input strings that comes from the input field of the subsequent inference requests.
    • +
    • ${input_type} refers to the input type translation values.
    • +
    • ${query} refers to the query field used specifically for reranking tasks.
    • +
    • ${top_n} refers to the top_n field available when performing rerank requests.
    • +
    • ${return_documents} refers to the return_documents field available when performing rerank requests.
    • +
    + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param custom_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `custom`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `custom` service. + :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if custom_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'custom_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "custom_inference_id": _quote(custom_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["custom_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_custom", + path_parts=__path_parts, + ) + + @_rewrite_parameters( + body_fields=("service", "service_settings", "chunking_settings"), + ) + def put_deepseek( + self, + *, + task_type: t.Union[str, t.Literal["chat_completion", "completion"]], + deepseek_inference_id: str, + service: t.Optional[t.Union[str, t.Literal["deepseek"]]] = None, + service_settings: t.Optional[t.Mapping[str, t.Any]] = None, + chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, + error_trace: t.Optional[bool] = None, + filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, + human: t.Optional[bool] = None, + pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, + body: t.Optional[t.Dict[str, t.Any]] = None, + ) -> ObjectApiResponse[t.Any]: + """ + .. raw:: html + +

    Create a DeepSeek inference endpoint.

    +

    Create an inference endpoint to perform an inference task with the deepseek service.

    + + + ``_ + + :param task_type: The type of the inference task that the model will perform. + :param deepseek_inference_id: The unique identifier of the inference endpoint. + :param service: The type of service supported for the specified task type. In + this case, `deepseek`. + :param service_settings: Settings used to install the inference model. These + settings are specific to the `deepseek` service. + :param chunking_settings: The chunking configuration object. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. + """ + if task_type in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'task_type'") + if deepseek_inference_id in SKIP_IN_PATH: + raise ValueError("Empty value passed for parameter 'deepseek_inference_id'") + if service is None and body is None: + raise ValueError("Empty value passed for parameter 'service'") + if service_settings is None and body is None: + raise ValueError("Empty value passed for parameter 'service_settings'") + __path_parts: t.Dict[str, str] = { + "task_type": _quote(task_type), + "deepseek_inference_id": _quote(deepseek_inference_id), + } + __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["deepseek_inference_id"]}' + __query: t.Dict[str, t.Any] = {} + __body: t.Dict[str, t.Any] = body if body is not None else {} + if error_trace is not None: + __query["error_trace"] = error_trace + if filter_path is not None: + __query["filter_path"] = filter_path + if human is not None: + __query["human"] = human + if pretty is not None: + __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout + if not __body: + if service is not None: + __body["service"] = service + if service_settings is not None: + __body["service_settings"] = service_settings + if chunking_settings is not None: + __body["chunking_settings"] = chunking_settings + if not __body: + __body = None # type: ignore[assignment] + __headers = {"accept": "application/json"} + if __body is not None: + __headers["content-type"] = "application/json" + return self.perform_request( # type: ignore[return-value] + "PUT", + __path, + params=__query, + headers=__headers, + body=__body, + endpoint_id="inference.put_deepseek", + path_parts=__path_parts, + ) + @_rewrite_parameters( body_fields=( "service", @@ -1040,6 +1288,7 @@ def put_elasticsearch( human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -1062,7 +1311,7 @@ def put_elasticsearch( Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. :param elasticsearch_inference_id: The unique identifier of the inference endpoint. @@ -1074,6 +1323,8 @@ def put_elasticsearch( :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -1100,6 +1351,8 @@ def put_elasticsearch( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -1139,6 +1392,7 @@ def put_elser( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -1162,7 +1416,7 @@ def put_elser( Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. :param elser_inference_id: The unique identifier of the inference endpoint. @@ -1171,6 +1425,8 @@ def put_elser( :param service_settings: Settings used to install the inference model. These settings are specific to the `elser` service. :param chunking_settings: The chunking configuration object. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -1195,6 +1451,8 @@ def put_elser( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -1232,6 +1490,7 @@ def put_googleaistudio( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -1239,14 +1498,9 @@ def put_googleaistudio(

    Create an Google AI Studio inference endpoint.

    Create an inference endpoint to perform an inference task with the googleaistudio service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. :param googleaistudio_inference_id: The unique identifier of the inference endpoint. @@ -1255,6 +1509,8 @@ def put_googleaistudio( :param service_settings: Settings used to install the inference model. These settings are specific to the `googleaistudio` service. :param chunking_settings: The chunking configuration object. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -1281,6 +1537,8 @@ def put_googleaistudio( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -1314,7 +1572,9 @@ def put_googleaistudio( def put_googlevertexai( self, *, - task_type: t.Union[str, t.Literal["rerank", "text_embedding"]], + task_type: t.Union[ + str, t.Literal["chat_completion", "completion", "rerank", "text_embedding"] + ], googlevertexai_inference_id: str, service: t.Optional[t.Union[str, t.Literal["googlevertexai"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, @@ -1324,6 +1584,7 @@ def put_googlevertexai( human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -1331,14 +1592,9 @@ def put_googlevertexai(

    Create a Google Vertex AI inference endpoint.

    Create an inference endpoint to perform an inference task with the googlevertexai service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. :param googlevertexai_inference_id: The unique identifier of the inference endpoint. @@ -1349,6 +1605,8 @@ def put_googlevertexai( :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -1375,6 +1633,8 @@ def put_googlevertexai( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -1400,12 +1660,19 @@ def put_googlevertexai( ) @_rewrite_parameters( - body_fields=("service", "service_settings", "chunking_settings"), + body_fields=( + "service", + "service_settings", + "chunking_settings", + "task_settings", + ), ) def put_hugging_face( self, *, - task_type: t.Union[str, t.Literal["text_embedding"]], + task_type: t.Union[ + str, t.Literal["chat_completion", "completion", "rerank", "text_embedding"] + ], huggingface_inference_id: str, service: t.Optional[t.Union[str, t.Literal["hugging_face"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, @@ -1414,17 +1681,22 @@ def put_hugging_face( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

    Create a Hugging Face inference endpoint.

    -

    Create an inference endpoint to perform an inference task with the hugging_face service.

    -

    You must first create an inference endpoint on the Hugging Face endpoint page to get an endpoint URL. - Select the model you want to use on the new endpoint creation page (for example intfloat/e5-small-v2), then select the sentence embeddings task under the advanced configuration section. - Create the endpoint and copy the URL after the endpoint initialization has been finished.

    -

    The following models are recommended for the Hugging Face service:

    +

    Create an inference endpoint to perform an inference task with the hugging_face service. + Supported tasks include: text_embedding, completion, and chat_completion.

    +

    To configure the endpoint, first visit the Hugging Face Inference Endpoints page and create a new endpoint. + Select a model that supports the task you intend to use.

    +

    For Elastic's text_embedding task: + The selected model must support the Sentence Embeddings task. On the new endpoint creation page, select the Sentence Embeddings task under the Advanced Configuration section. + After the endpoint has initialized, copy the generated endpoint URL. + Recommended models for text_embedding task:

    • all-MiniLM-L6-v2
    • all-MiniLM-L12-v2
    • @@ -1434,14 +1706,27 @@ def put_hugging_face(
    • multilingual-e5-base
    • multilingual-e5-small
    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    +

    For Elastic's chat_completion and completion tasks: + The selected model must support the Text Generation task and expose OpenAI API. HuggingFace supports both serverless and dedicated endpoints for Text Generation. When creating dedicated endpoint select the Text Generation task. + After the endpoint is initialized (for dedicated) or ready (for serverless), ensure it supports the OpenAI API and includes /v1/chat/completions part in URL. Then, copy the full endpoint URL for use. + Recommended models for chat_completion and completion tasks:

    +
      +
    • Mistral-7B-Instruct-v0.2
    • +
    • QwQ-32B
    • +
    • Phi-3-mini-128k-instruct
    • +
    +

    For Elastic's rerank task: + The selected model must support the sentence-ranking task and expose OpenAI API. + HuggingFace supports only dedicated (not serverless) endpoints for Rerank so far. + After the endpoint is initialized, copy the full endpoint URL for use. + Tested models for rerank task:

    +
      +
    • bge-reranker-base
    • +
    • jina-reranker-v1-turbo-en-GGUF
    • +
    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. :param huggingface_inference_id: The unique identifier of the inference endpoint. @@ -1450,6 +1735,10 @@ def put_hugging_face( :param service_settings: Settings used to install the inference model. These settings are specific to the `hugging_face` service. :param chunking_settings: The chunking configuration object. + :param task_settings: Settings to configure the inference task. These settings + are specific to the task type you specified. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -1476,6 +1765,8 @@ def put_hugging_face( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -1483,6 +1774,8 @@ def put_hugging_face( __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings + if task_settings is not None: + __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} @@ -1519,6 +1812,7 @@ def put_jinaai( human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -1528,14 +1822,9 @@ def put_jinaai(

    Create an inference endpoint to perform an inference task with the jinaai service.

    To review the available rerank models, refer to https://jina.ai/reranker. To review the available text_embedding models, refer to the https://jina.ai/embeddings/.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. :param jinaai_inference_id: The unique identifier of the inference endpoint. @@ -1546,6 +1835,8 @@ def put_jinaai( :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -1570,6 +1861,8 @@ def put_jinaai( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -1600,7 +1893,9 @@ def put_jinaai( def put_mistral( self, *, - task_type: t.Union[str, t.Literal["text_embedding"]], + task_type: t.Union[ + str, t.Literal["chat_completion", "completion", "text_embedding"] + ], mistral_inference_id: str, service: t.Optional[t.Union[str, t.Literal["mistral"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, @@ -1609,30 +1904,27 @@ def put_mistral( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

    Create a Mistral inference endpoint.

    -

    Creates an inference endpoint to perform an inference task with the mistral service.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    +

    Create an inference endpoint to perform an inference task with the mistral service.

    - ``_ + ``_ - :param task_type: The task type. The only valid task type for the model to perform - is `text_embedding`. + :param task_type: The type of the inference task that the model will perform. :param mistral_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `mistral`. :param service_settings: Settings used to install the inference model. These settings are specific to the `mistral` service. :param chunking_settings: The chunking configuration object. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -1657,6 +1949,8 @@ def put_mistral( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -1702,6 +1996,7 @@ def put_openai( human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -1709,14 +2004,9 @@ def put_openai(

    Create an OpenAI inference endpoint.

    Create an inference endpoint to perform an inference task with the openai service or openai compatible APIs.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. NOTE: The `chat_completion` task type only supports streaming and only through @@ -1729,6 +2019,8 @@ def put_openai( :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -1753,6 +2045,8 @@ def put_openai( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -1798,6 +2092,7 @@ def put_voyageai( human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -1808,7 +2103,7 @@ def put_voyageai(

    Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The type of the inference task that the model will perform. :param voyageai_inference_id: The unique identifier of the inference endpoint. @@ -1819,6 +2114,8 @@ def put_voyageai( :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -1843,6 +2140,8 @@ def put_voyageai( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -1881,6 +2180,7 @@ def put_watsonx( filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, + timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -1890,14 +2190,9 @@ def put_watsonx(

    Create an inference endpoint to perform an inference task with the watsonxai service. You need an IBM Cloud Databases for Elasticsearch deployment to use the watsonxai inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.

    -

    When you create an inference endpoint, the associated machine learning model is automatically deployed if it is not already running. - After creating the endpoint, wait for the model deployment to complete before using it. - To verify the deployment status, use the get trained model statistics API. - Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". - Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

    - ``_ + ``_ :param task_type: The task type. The only valid task type for the model to perform is `text_embedding`. @@ -1906,6 +2201,8 @@ def put_watsonx( this case, `watsonxai`. :param service_settings: Settings used to install the inference model. These settings are specific to the `watsonxai` service. + :param timeout: Specifies the amount of time to wait for the inference endpoint + to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") @@ -1930,6 +2227,8 @@ def put_watsonx( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if timeout is not None: + __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service @@ -1970,10 +2269,10 @@ def rerank( """ .. raw:: html -

    Perform rereanking inference on the service

    +

    Perform reranking inference on the service

    - ``_ + ``_ :param inference_id: The unique identifier for the inference endpoint. :param input: The text on which you want to perform the inference task. It can @@ -2049,7 +2348,7 @@ def sparse_embedding(

    Perform sparse embedding inference on the service

    - ``_ + ``_ :param inference_id: The inference Id :param input: Inference input. Either a string or an array of strings. @@ -2117,7 +2416,7 @@ def text_embedding(

    Perform text embedding inference on the service

    - ``_ + ``_ :param inference_id: The inference Id :param input: Inference input. Either a string or an array of strings. @@ -2199,7 +2498,7 @@ def update( However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

    - ``_ + ``_ :param inference_id: The unique identifier of the inference endpoint. :param inference_config: diff --git a/elasticsearch/_sync/client/ingest.py b/elasticsearch/_sync/client/ingest.py index f4d8c9241..99d75adac 100644 --- a/elasticsearch/_sync/client/ingest.py +++ b/elasticsearch/_sync/client/ingest.py @@ -98,7 +98,7 @@ def delete_ip_location_database(

    Delete IP geolocation database configurations.

    - ``_ + ``_ :param id: A comma-separated list of IP location database configurations. :param master_timeout: The period to wait for a connection to the master node. @@ -155,7 +155,7 @@ def delete_pipeline( Delete one or more ingest pipelines.

    - ``_ + ``_ :param id: Pipeline ID or wildcard expression of pipeline IDs used to limit the request. To delete all ingest pipelines in a cluster, use a value of `*`. @@ -208,7 +208,7 @@ def geo_ip_stats( Get download statistics for GeoIP2 databases that are used with the GeoIP processor.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ingest/geoip/stats" @@ -288,7 +288,6 @@ def get_ip_location_database( error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, - master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ @@ -297,15 +296,11 @@ def get_ip_location_database(

    Get IP geolocation database configurations.

    - ``_ + ``_ :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. - :param master_timeout: The period to wait for a connection to the master node. - If no response is received before the timeout expires, the request fails - and returns an error. A value of `-1` indicates that the request should never - time out. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: @@ -321,8 +316,6 @@ def get_ip_location_database( __query["filter_path"] = filter_path if human is not None: __query["human"] = human - if master_timeout is not None: - __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} @@ -355,7 +348,7 @@ def get_pipeline( This API returns a local reference of the pipeline.

    - ``_ + ``_ :param id: Comma-separated list of pipeline IDs to retrieve. Wildcard (`*`) expressions are supported. To get all ingest pipelines, omit this parameter or use `*`. @@ -412,7 +405,7 @@ def processor_grok( A grok pattern is like a regular expression that supports aliased expressions that can be reused.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ingest/processor/grok" @@ -534,7 +527,7 @@ def put_ip_location_database(

    Create or update an IP geolocation database configuration.

    - ``_ + ``_ :param id: The database configuration identifier. :param configuration: @@ -620,7 +613,7 @@ def put_pipeline( Changes made using this API take effect immediately.

    - ``_ + ``_ :param id: ID of the ingest pipeline to create or update. :param deprecated: Marks this ingest pipeline as deprecated. When a deprecated @@ -717,7 +710,7 @@ def simulate( You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.

    - ``_ + ``_ :param docs: Sample documents to test in the pipeline. :param id: The pipeline to test. If you don't specify a `pipeline` in the request diff --git a/elasticsearch/_sync/client/license.py b/elasticsearch/_sync/client/license.py index 2b1174121..cd22692df 100644 --- a/elasticsearch/_sync/client/license.py +++ b/elasticsearch/_sync/client/license.py @@ -44,7 +44,7 @@ def delete(

    If the operator privileges feature is enabled, only operator users can use this API.

    - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. :param timeout: The period to wait for a response. If no response is received @@ -98,7 +98,7 @@ def get( - ``_ + ``_ :param accept_enterprise: If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum @@ -147,7 +147,7 @@ def get_basic_status(

    Get the basic license status.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_license/basic_status" @@ -185,7 +185,7 @@ def get_trial_status(

    Get the trial status.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_license/trial_status" @@ -308,7 +308,7 @@ def post_start_basic(

    To check the status of your basic license, use the get basic license API.

    - ``_ + ``_ :param acknowledge: whether the user has acknowledged acknowledge messages (default: false) @@ -353,7 +353,7 @@ def post_start_trial( human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, - type_query_string: t.Optional[str] = None, + type: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html @@ -365,12 +365,12 @@ def post_start_trial(

    To check the status of your trial, use the get trial status API.

    - ``_ + ``_ :param acknowledge: whether the user has acknowledged acknowledge messages (default: false) :param master_timeout: Period to wait for a connection to the master node. - :param type_query_string: + :param type: The type of trial license to generate (default: "trial") """ __path_parts: t.Dict[str, str] = {} __path = "/_license/start_trial" @@ -387,8 +387,8 @@ def post_start_trial( __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty - if type_query_string is not None: - __query["type_query_string"] = type_query_string + if type is not None: + __query["type"] = type __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", diff --git a/elasticsearch/_sync/client/logstash.py b/elasticsearch/_sync/client/logstash.py index e329058a3..8f8e6ee4f 100644 --- a/elasticsearch/_sync/client/logstash.py +++ b/elasticsearch/_sync/client/logstash.py @@ -43,7 +43,7 @@ def delete_pipeline( If the request succeeds, you receive an empty response with an appropriate status code.

    - ``_ + ``_ :param id: An identifier for the pipeline. """ @@ -87,7 +87,7 @@ def get_pipeline( Get pipelines that are used for Logstash Central Management.

    - ``_ + ``_ :param id: A comma-separated list of pipeline identifiers. """ @@ -139,7 +139,7 @@ def put_pipeline( If the specified pipeline exists, it is replaced.

    - ``_ + ``_ :param id: An identifier for the pipeline. :param pipeline: diff --git a/elasticsearch/_sync/client/migration.py b/elasticsearch/_sync/client/migration.py index a9476162c..ae803e2ec 100644 --- a/elasticsearch/_sync/client/migration.py +++ b/elasticsearch/_sync/client/migration.py @@ -44,7 +44,7 @@ def deprecations( You are strongly recommended to use the Upgrade Assistant.

    - ``_ + ``_ :param index: Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. @@ -94,7 +94,7 @@ def get_feature_upgrade_status( You are strongly recommended to use the Upgrade Assistant.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_migration/system_features" @@ -136,7 +136,7 @@ def post_feature_upgrade(

    TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_migration/system_features" diff --git a/elasticsearch/_sync/client/ml.py b/elasticsearch/_sync/client/ml.py index 15ebddb41..f4b4612fd 100644 --- a/elasticsearch/_sync/client/ml.py +++ b/elasticsearch/_sync/client/ml.py @@ -45,7 +45,7 @@ def clear_trained_model_deployment_cache( Calling this API clears the caches without restarting the deployment.

    - ``_ + ``_ :param model_id: The unique identifier of the trained model. """ @@ -100,7 +100,7 @@ def close_job( When a datafeed that has a specified end date stops, it automatically closes its associated job.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection @@ -165,7 +165,7 @@ def delete_calendar(

    Remove all scheduled events from a calendar, then delete it.

    - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. """ @@ -209,7 +209,7 @@ def delete_calendar_event(

    Delete events from a calendar.

    - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param event_id: Identifier for the scheduled event. You can obtain this identifier @@ -260,7 +260,7 @@ def delete_calendar_job(

    Delete anomaly jobs from a calendar.

    - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param job_id: An identifier for the anomaly detection jobs. It can be a job @@ -312,7 +312,7 @@ def delete_data_frame_analytics(

    Delete a data frame analytics job.

    - ``_ + ``_ :param id: Identifier for the data frame analytics job. :param force: If `true`, it deletes a job that is not stopped; this method is @@ -363,7 +363,7 @@ def delete_datafeed(

    Delete a datafeed.

    - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -426,7 +426,7 @@ def delete_expired_data( <job_id>.

    - ``_ + ``_ :param job_id: Identifier for an anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. @@ -490,7 +490,7 @@ def delete_filter( filter. You must update or delete the job before you can delete the filter.

    - ``_ + ``_ :param filter_id: A string that uniquely identifies a filter. """ @@ -540,7 +540,7 @@ def delete_forecast( forecasts before they expire.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param forecast_id: A comma-separated list of forecast identifiers. If you do @@ -616,7 +616,7 @@ def delete_job( delete job request.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param delete_user_annotations: Specifies whether annotations that have been @@ -676,7 +676,7 @@ def delete_model_snapshot( the model_snapshot_id in the results from the get jobs API.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: Identifier for the model snapshot. @@ -728,7 +728,7 @@ def delete_trained_model(

    The request deletes a trained inference model that is not referenced by an ingest pipeline.

    - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param force: Forcefully deletes a trained model that is referenced by ingest @@ -783,7 +783,7 @@ def delete_trained_model_alias( by the model_id, this API returns an error.

    - ``_ + ``_ :param model_id: The trained model ID to which the model alias refers. :param model_alias: The model alias to delete. @@ -844,7 +844,7 @@ def estimate_model_memory( estimates for the fields it references.

    - ``_ + ``_ :param analysis_config: For a list of the properties that you can specify in the `analysis_config` component of the body of this API. @@ -916,7 +916,7 @@ def evaluate_data_frame( field and an analytics result field to be present.

    - ``_ + ``_ :param evaluation: Defines the type of evaluation you want to perform. :param index: Defines the `index` in which the evaluation will be performed. @@ -1001,7 +1001,7 @@ def explain_data_frame_analytics( - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -1112,7 +1112,7 @@ def flush_job( analyzing further data.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param advance_time: Refer to the description for the `advance_time` query parameter. @@ -1187,7 +1187,7 @@ def forecast( based on historical data.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. The job must be open when you create a forecast; otherwise, an error occurs. @@ -1273,7 +1273,7 @@ def get_buckets( The API presents a chronological view of the records, grouped by bucket.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param timestamp: The timestamp of a single bucket result. If you do not specify @@ -1371,7 +1371,7 @@ def get_calendar_events(

    Get info about events in calendars.

    - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids @@ -1440,7 +1440,7 @@ def get_calendars(

    Get calendar configuration info.

    - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids @@ -1516,7 +1516,7 @@ def get_categories(

    Get anomaly detection job results for categories.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param category_id: Identifier for the category, which is unique in the job. @@ -1604,7 +1604,7 @@ def get_data_frame_analytics( wildcard expression.

    - ``_ + ``_ :param id: Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame @@ -1679,7 +1679,7 @@ def get_data_frame_analytics_stats(

    Get data frame analytics job stats.

    - ``_ + ``_ :param id: Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame @@ -1753,7 +1753,7 @@ def get_datafeed_stats( This API returns a maximum of 10,000 datafeeds.

    - ``_ + ``_ :param datafeed_id: Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the @@ -1817,7 +1817,7 @@ def get_datafeeds( This API returns a maximum of 10,000 datafeeds.

    - ``_ + ``_ :param datafeed_id: Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the @@ -1884,7 +1884,7 @@ def get_filters( You can get a single filter or all filters.

    - ``_ + ``_ :param filter_id: A string that uniquely identifies a filter. :param from_: Skips the specified number of filters. @@ -1952,7 +1952,7 @@ def get_influencers( influencer_field_name is specified in the job configuration.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param desc: If true, the results are sorted in descending order. @@ -2036,7 +2036,7 @@ def get_job_stats(

    Get anomaly detection job stats.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. If @@ -2100,7 +2100,7 @@ def get_jobs( _all, by specifying * as the <job_id>, or by omitting the <job_id>.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. If you do not specify one of these @@ -2166,7 +2166,7 @@ def get_memory_stats( on each node, both within the JVM heap, and natively, outside of the JVM.

    - ``_ + ``_ :param node_id: The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or `ml:true` @@ -2224,7 +2224,7 @@ def get_model_snapshot_upgrade_stats(

    Get anomaly detection job model snapshot upgrade usage info.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: A numerical character string that uniquely identifies the @@ -2298,7 +2298,7 @@ def get_model_snapshots(

    Get model snapshots info.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: A numerical character string that uniquely identifies the @@ -2418,7 +2418,7 @@ def get_overall_buckets( jobs' largest bucket span.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, a comma-separated list of jobs or groups, or a wildcard expression. @@ -2528,7 +2528,7 @@ def get_records( number of detectors.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param desc: Refer to the description for the `desc` query parameter. @@ -2627,7 +2627,7 @@ def get_trained_models(

    Get trained model configuration info.

    - ``_ + ``_ :param model_id: The unique identifier of the trained model or a model alias. You can get information for multiple trained models in a single API request @@ -2718,7 +2718,7 @@ def get_trained_models_stats( models in a single API request by using a comma-separated list of model IDs or a wildcard expression.

    - ``_ + ``_ :param model_id: The unique identifier of the trained model or a model alias. It can be a comma-separated list or a wildcard expression. @@ -2784,7 +2784,7 @@ def infer_trained_model(

    Evaluate a trained model.

    - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param docs: An array of objects to pass to the model for inference. The objects @@ -2851,7 +2851,7 @@ def info( cluster configuration.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ml/info" @@ -2900,7 +2900,7 @@ def open_job( new data is received.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param timeout: Refer to the description for the `timeout` query parameter. @@ -2957,7 +2957,7 @@ def post_calendar_events(

    Add scheduled events to the calendar.

    - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param events: A list of one of more scheduled events. The event’s start and @@ -3018,7 +3018,7 @@ def post_data( It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. @@ -3085,7 +3085,7 @@ def preview_data_frame_analytics( Preview the extracted features used by a data frame analytics config.

    - ``_ + ``_ :param id: Identifier for the data frame analytics job. :param config: A data frame analytics config as described in create data frame @@ -3158,7 +3158,7 @@ def preview_datafeed( You can also use secondary authorization headers to supply the credentials.

    - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -3237,7 +3237,7 @@ def put_calendar(

    Create a calendar.

    - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param description: A description of the calendar. @@ -3294,7 +3294,7 @@ def put_calendar_job(

    Add anomaly detection job to calendar.

    - ``_ + ``_ :param calendar_id: A string that uniquely identifies a calendar. :param job_id: An identifier for the anomaly detection jobs. It can be a job @@ -3377,7 +3377,7 @@ def put_data_frame_analytics(

    If you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters.

    - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -3562,7 +3562,7 @@ def put_datafeed( directly to the .ml-config index. Do not give users write privileges on the .ml-config index.

    - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -3724,7 +3724,7 @@ def put_filter( Specifically, filters are referenced in the custom_rules property of detector configuration objects.

    - ``_ + ``_ :param filter_id: A string that uniquely identifies a filter. :param description: A description of the filter. @@ -3826,7 +3826,7 @@ def put_job( If you include a datafeed_config but do not provide a query, the datafeed uses {"match_all": {"boost": 1}}.

    - ``_ + ``_ :param job_id: The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and @@ -3876,13 +3876,7 @@ def put_job( :param description: A description of the job. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard - expressions match hidden data streams. Supports comma-separated values. Valid - values are: * `all`: Match any data stream or index, including hidden ones. - * `closed`: Match closed, non-hidden indices. Also matches any non-hidden - data stream. Data streams cannot be closed. * `hidden`: Match hidden data - streams and hidden indices. Must be combined with `open`, `closed`, or both. - * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden - indices. Also matches any non-hidden data stream. + expressions match hidden data streams. Supports comma-separated values. :param groups: A list of job groups. A job can belong to no groups or many. :param ignore_throttled: If `true`, concrete, expanded or aliased indices are ignored when frozen. @@ -4034,7 +4028,7 @@ def put_trained_model( Enable you to supply a trained model that is not created by data frame analytics.

    - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param compressed_definition: The compressed (GZipped and Base64 encoded) inference @@ -4155,7 +4149,7 @@ def put_trained_model_alias( returns a warning.

    - ``_ + ``_ :param model_id: The identifier for the trained model that the alias refers to. :param model_alias: The alias to create or update. This value cannot end in numbers. @@ -4216,7 +4210,7 @@ def put_trained_model_definition_part(

    Create part of a trained model definition.

    - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param part: The definition part number. When the definition is loaded for inference @@ -4298,7 +4292,7 @@ def put_trained_model_vocabulary( The vocabulary is stored in the index as described in inference_config.*.vocabulary of the trained model definition.

    - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param vocabulary: The model vocabulary, which must not be empty. @@ -4361,7 +4355,7 @@ def reset_job( comma separated list.

    - ``_ + ``_ :param job_id: The ID of the job to reset. :param delete_user_annotations: Specifies whether annotations that have been @@ -4425,7 +4419,7 @@ def revert_model_snapshot( snapshot after Black Friday or a critical system failure.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: You can specify `empty` as the . Reverting to @@ -4500,7 +4494,7 @@ def set_upgrade_mode( machine learning info API.

    - ``_ + ``_ :param enabled: When `true`, it enables `upgrade_mode` which temporarily halts all job and datafeed tasks and prohibits new job and datafeed tasks from @@ -4560,7 +4554,7 @@ def start_data_frame_analytics( the destination index in advance with custom settings and mappings.

    - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -4623,7 +4617,7 @@ def start_datafeed( authorization headers when you created or updated the datafeed, those credentials are used instead.

    - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -4700,7 +4694,7 @@ def start_trained_model_deployment( It allocates the model to every machine learning node.

    - ``_ + ``_ :param model_id: The unique identifier of the trained model. Currently, only PyTorch models are supported. @@ -4801,7 +4795,7 @@ def stop_data_frame_analytics( throughout its lifecycle.

    - ``_ + ``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -4871,7 +4865,7 @@ def stop_datafeed( multiple times throughout its lifecycle.

    - ``_ + ``_ :param datafeed_id: Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a @@ -4936,7 +4930,7 @@ def stop_trained_model_deployment(

    Stop a trained model deployment.

    - ``_ + ``_ :param model_id: The unique identifier of the trained model. :param allow_no_match: Specifies what to do when the request: contains wildcard @@ -5119,7 +5113,7 @@ def update_datafeed( those credentials are used instead.

    - ``_ + ``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z @@ -5145,13 +5139,7 @@ def update_datafeed( check runs only on real-time datafeeds. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard - expressions match hidden data streams. Supports comma-separated values. Valid - values are: * `all`: Match any data stream or index, including hidden ones. - * `closed`: Match closed, non-hidden indices. Also matches any non-hidden - data stream. Data streams cannot be closed. * `hidden`: Match hidden data - streams and hidden indices. Must be combined with `open`, `closed`, or both. - * `none`: Wildcard patterns are not accepted. * `open`: Match open, non-hidden - indices. Also matches any non-hidden data stream. + expressions match hidden data streams. Supports comma-separated values. :param frequency: The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the @@ -5286,7 +5274,7 @@ def update_filter( Updates the description of a filter, adds items, or removes items from the list.

    - ``_ + ``_ :param filter_id: A string that uniquely identifies a filter. :param add_items: The items to add to the filter. @@ -5380,7 +5368,7 @@ def update_job( Updates certain properties of an anomaly detection job.

    - ``_ + ``_ :param job_id: Identifier for the job. :param allow_lazy_open: Advanced configuration option. Specifies whether this @@ -5512,7 +5500,7 @@ def update_model_snapshot( Updates certain properties of a snapshot.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: Identifier for the model snapshot. @@ -5654,7 +5642,7 @@ def upgrade_job_snapshot( job.

    - ``_ + ``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: A numerical character string that uniquely identifies the @@ -5733,7 +5721,7 @@ def validate(

    Validate an anomaly detection job.

    - ``_ + ``_ :param analysis_config: :param analysis_limits: diff --git a/elasticsearch/_sync/client/nodes.py b/elasticsearch/_sync/client/nodes.py index b64f3abf0..97a897f52 100644 --- a/elasticsearch/_sync/client/nodes.py +++ b/elasticsearch/_sync/client/nodes.py @@ -50,7 +50,7 @@ def clear_repositories_metering_archive( Clear the archived repositories metering information in the cluster.

    - ``_ + ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. @@ -105,10 +105,11 @@ def get_repositories_metering_info( Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts.

    - ``_ + ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned - information. All the nodes selective options are explained [here](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html#cluster-nodes). + information. For more information about the nodes selective options, refer + to the node specification documentation. """ if node_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'node_id'") @@ -162,7 +163,7 @@ def hot_threads( The output is plain text with a breakdown of the top hot threads for each node.

    - ``_ + ``_ :param node_id: List of node IDs or names used to limit returned information. :param ignore_idle_threads: If true, known idle threads (e.g. waiting in a socket @@ -235,7 +236,7 @@ def info(

    By default, the API returns all attributes and core settings for cluster nodes.

    - ``_ + ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. @@ -308,7 +309,7 @@ def reload_secure_settings( Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password.

    - ``_ + ``_ :param node_id: The names of particular nodes in the cluster to target. :param secure_settings_password: The password for the Elasticsearch keystore. @@ -383,7 +384,7 @@ def stats( By default, all stats are returned. You can limit the returned information by using metrics.

    - ``_ + ``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. @@ -498,7 +499,7 @@ def usage(

    Get feature usage information.

    - ``_ + ``_ :param node_id: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting diff --git a/elasticsearch/_sync/client/query_rules.py b/elasticsearch/_sync/client/query_rules.py index 2b322949c..f1134650b 100644 --- a/elasticsearch/_sync/client/query_rules.py +++ b/elasticsearch/_sync/client/query_rules.py @@ -44,7 +44,7 @@ def delete_rule( This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API.

    - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset containing the rule to delete @@ -97,7 +97,7 @@ def delete_ruleset( This is a destructive action that is not recoverable.

    - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset to delete """ @@ -142,7 +142,7 @@ def get_rule( Get details about a query rule within a query ruleset.

    - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset containing the rule to retrieve @@ -194,7 +194,7 @@ def get_ruleset( Get details about a query ruleset.

    - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset """ @@ -241,7 +241,7 @@ def list_rulesets( Get summarized information about the query rulesets.

    - ``_ + ``_ :param from_: The offset from the first result to fetch. :param size: The maximum number of results to retrieve. @@ -302,7 +302,7 @@ def put_rule( If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.

    - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset containing the rule to be created or updated. @@ -389,7 +389,7 @@ def put_ruleset( If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.

    - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset to be created or updated. @@ -446,7 +446,7 @@ def test( Evaluate match criteria against a query ruleset to identify the rules that would match that criteria.

    - ``_ + ``_ :param ruleset_id: The unique identifier of the query ruleset to be created or updated diff --git a/elasticsearch/_sync/client/rollup.py b/elasticsearch/_sync/client/rollup.py index 84ca3d410..ae8b0c946 100644 --- a/elasticsearch/_sync/client/rollup.py +++ b/elasticsearch/_sync/client/rollup.py @@ -67,7 +67,7 @@ def delete_job( - ``_ + ``_ :param id: Identifier for the job. """ @@ -115,7 +115,7 @@ def get_jobs( For details about a historical rollup job, the rollup capabilities API may be more useful.

    - ``_ + ``_ :param id: Identifier for the rollup job. If it is `_all` or omitted, the API returns all rollup jobs. @@ -171,7 +171,7 @@ def get_rollup_caps( - ``_ + ``_ :param id: Index, indices or index-pattern to return rollup capabilities for. `_all` may be used to fetch rollup capabilities from all jobs. @@ -225,7 +225,7 @@ def get_rollup_index_caps( - ``_ + ``_ :param index: Data stream or index to check for rollup capabilities. Wildcard (`*`) expressions are supported. @@ -295,7 +295,7 @@ def put_job(

    Jobs are created in a STOPPED state. You can start them with the start rollup jobs API.

    - ``_ + ``_ :param id: Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the data that is associated with the rollup job. @@ -443,7 +443,7 @@ def rollup_search( During the merging process, if there is any overlap in buckets between the two responses, the buckets from the non-rollup index are used.

    - ``_ + ``_ :param index: A comma-separated list of data streams and indices used to limit the request. This parameter has the following rules: * At least one data @@ -521,7 +521,7 @@ def start_job( If you try to start a job that is already started, nothing happens.

    - ``_ + ``_ :param id: Identifier for the rollup job. """ @@ -575,7 +575,7 @@ def stop_job( If the specified time elapses without the job moving to STOPPED, a timeout exception occurs.

    - ``_ + ``_ :param id: Identifier for the rollup job. :param timeout: If `wait_for_completion` is `true`, the API blocks for (at maximum) diff --git a/elasticsearch/_sync/client/search_application.py b/elasticsearch/_sync/client/search_application.py index 7309f9ec4..d50dc454b 100644 --- a/elasticsearch/_sync/client/search_application.py +++ b/elasticsearch/_sync/client/search_application.py @@ -49,7 +49,7 @@ def delete(

    Remove a search application and its associated alias. Indices attached to the search application are not removed.

    - ``_ + ``_ :param name: The name of the search application to delete. """ @@ -94,7 +94,7 @@ def delete_behavioral_analytics( The associated data stream is also deleted.

    - ``_ + ``_ :param name: The name of the analytics collection to be deleted """ @@ -138,7 +138,7 @@ def get(

    Get search application details.

    - ``_ + ``_ :param name: The name of the search application """ @@ -182,7 +182,7 @@ def get_behavioral_analytics(

    Get behavioral analytics collections.

    - ``_ + ``_ :param name: A list of analytics collections to limit the returned information """ @@ -234,7 +234,7 @@ def list( Get information about search applications.

    - ``_ + ``_ :param from_: Starting offset. :param q: Query in the Lucene query string syntax. @@ -290,7 +290,7 @@ def post_behavioral_analytics_event(

    Create a behavioral analytics collection event.

    - ``_ + ``_ :param collection_name: The name of the behavioral analytics collection. :param event_type: The analytics event type. @@ -357,7 +357,7 @@ def put(

    Create or update a search application.

    - ``_ + ``_ :param name: The name of the search application to be created or updated. :param search_application: @@ -414,7 +414,7 @@ def put_behavioral_analytics(

    Create a behavioral analytics collection.

    - ``_ + ``_ :param name: The name of the analytics collection to be created or updated. """ @@ -467,7 +467,7 @@ def render_query(

    You must have read privileges on the backing alias of the search application.

    - ``_ + ``_ :param name: The name of the search application to render teh query for. :param params: @@ -531,7 +531,7 @@ def search( Unspecified template parameters are assigned their default values if applicable.

    - ``_ + ``_ :param name: The name of the search application to be searched. :param params: Query parameters specific to this request, which will override diff --git a/elasticsearch/_sync/client/searchable_snapshots.py b/elasticsearch/_sync/client/searchable_snapshots.py index 0d5575c0c..b5eee7a42 100644 --- a/elasticsearch/_sync/client/searchable_snapshots.py +++ b/elasticsearch/_sync/client/searchable_snapshots.py @@ -50,7 +50,7 @@ def cache_stats( Get statistics about the shared cache for partially mounted indices.

    - ``_ + ``_ :param node_id: The names of the nodes in the cluster to target. :param master_timeout: @@ -111,7 +111,7 @@ def clear_cache( Clear indices and data streams from the shared cache for partially mounted indices.

    - ``_ + ``_ :param index: A comma-separated list of data streams, indices, and aliases to clear from the cache. It supports wildcards (`*`). @@ -190,7 +190,7 @@ def mount( Manually mounting ILM-managed snapshots can interfere with ILM processes.

    - ``_ + ``_ :param repository: The name of the repository containing the snapshot of the index to mount. @@ -278,7 +278,7 @@ def stats(

    Get searchable snapshot statistics.

    - ``_ + ``_ :param index: A comma-separated list of data streams and indices to retrieve statistics for. diff --git a/elasticsearch/_sync/client/security.py b/elasticsearch/_sync/client/security.py index c0c7840ec..c6099fb8f 100644 --- a/elasticsearch/_sync/client/security.py +++ b/elasticsearch/_sync/client/security.py @@ -58,7 +58,7 @@ def activate_user_profile( Any updates do not change existing content for either the labels or data fields.

    - ``_ + ``_ :param grant_type: The type of grant. :param access_token: The user's Elasticsearch access token or JWT. Both `access` @@ -124,7 +124,7 @@ def authenticate( If the user cannot be authenticated, this API returns a 401 status code.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/_authenticate" @@ -171,7 +171,7 @@ def bulk_delete_role( The bulk delete roles API cannot delete roles that are defined in roles files.

    - ``_ + ``_ :param names: An array of role names to delete :param refresh: If `true` (the default) then refresh the affected shards to make @@ -232,7 +232,7 @@ def bulk_put_role( The bulk create or update roles API cannot update roles that are defined in roles files.

    - ``_ + ``_ :param roles: A dictionary of role name to RoleDescriptor objects to add or update :param refresh: If `true` (the default) then refresh the affected shards to make @@ -300,7 +300,7 @@ def bulk_update_api_keys(

    A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update.

    - ``_ + ``_ :param ids: The API key identifiers. :param expiration: Expiration time for the API keys. By default, API keys never @@ -378,7 +378,7 @@ def change_password(

    Change the passwords of users in the native realm and built-in users.

    - ``_ + ``_ :param username: The user whose password you want to change. If you do not specify this parameter, the password is changed for the current user. @@ -445,7 +445,7 @@ def clear_api_key_cache( The cache is also automatically cleared on state changes of the security index.

    - ``_ + ``_ :param ids: Comma-separated list of API key IDs to evict from the API key cache. To evict all API keys, use `*`. Does not support other wildcard patterns. @@ -491,7 +491,7 @@ def clear_cached_privileges( The cache is also automatically cleared for applications that have their privileges updated.

    - ``_ + ``_ :param application: A comma-separated list of applications. To clear all applications, use an asterism (`*`). It does not support other wildcard patterns. @@ -541,7 +541,7 @@ def clear_cached_realms( For more information, refer to the documentation about controlling the user cache.

    - ``_ + ``_ :param realms: A comma-separated list of realms. To clear all realms, use an asterisk (`*`). It does not support other wildcard patterns. @@ -591,7 +591,7 @@ def clear_cached_roles(

    Evict roles from the native role cache.

    - ``_ + ``_ :param name: A comma-separated list of roles to evict from the role cache. To evict all roles, use an asterisk (`*`). It does not support other wildcard @@ -643,7 +643,7 @@ def clear_cached_service_tokens( The cache for tokens backed by the service_tokens file is cleared automatically on file changes.

    - ``_ + ``_ :param namespace: The namespace, which is a top-level grouping of service accounts. :param service: The name of the service, which must be unique within its namespace. @@ -715,7 +715,7 @@ def create_api_key( To configure or turn off the API key service, refer to API key service setting documentation.

    - ``_ + ``_ :param expiration: The expiration time for the API key. By default, API keys never expire. @@ -805,7 +805,7 @@ def create_cross_cluster_api_key( Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error.

    - ``_ + ``_ :param access: The access to be granted to this API key. The access is composed of permissions for cross-cluster search and cross-cluster replication. At @@ -880,7 +880,7 @@ def create_service_token( You must actively delete them if they are no longer needed.

    - ``_ + ``_ :param namespace: The name of the namespace, which is a top-level grouping of service accounts. @@ -966,7 +966,7 @@ def delegate_pki( The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token.

    - ``_ + ``_ :param x509_certificate_chain: The X509Certificate chain, which is represented as an ordered string array. Each string in the array is a base64-encoded @@ -1030,7 +1030,7 @@ def delete_privileges( - ``_ + ``_ :param application: The name of the application. Application privileges are always associated with exactly one application. @@ -1093,7 +1093,7 @@ def delete_role( The delete roles API cannot remove roles that are defined in roles files.

    - ``_ + ``_ :param name: The name of the role. :param refresh: If `true` (the default) then refresh the affected shards to make @@ -1147,7 +1147,7 @@ def delete_role_mapping( The delete role mappings API cannot remove role mappings that are defined in role mapping files.

    - ``_ + ``_ :param name: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does @@ -1203,7 +1203,7 @@ def delete_service_token(

    Delete service account tokens for a service in a specified namespace.

    - ``_ + ``_ :param namespace: The namespace, which is a top-level grouping of service accounts. :param service: The service name. @@ -1265,7 +1265,7 @@ def delete_user(

    Delete users from the native realm.

    - ``_ + ``_ :param username: An identifier for the user. :param refresh: If `true` (the default) then refresh the affected shards to make @@ -1319,7 +1319,7 @@ def disable_user( You can use this API to revoke a user's access to Elasticsearch.

    - ``_ + ``_ :param username: An identifier for the user. :param refresh: If `true` (the default) then refresh the affected shards to make @@ -1376,7 +1376,7 @@ def disable_user_profile( To re-enable a disabled user profile, use the enable user profile API .

    - ``_ + ``_ :param uid: Unique identifier for the user profile. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make @@ -1429,7 +1429,7 @@ def enable_user( By default, when you create users, they are enabled.

    - ``_ + ``_ :param username: An identifier for the user. :param refresh: If `true` (the default) then refresh the affected shards to make @@ -1486,7 +1486,7 @@ def enable_user_profile( If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again.

    - ``_ + ``_ :param uid: A unique identifier for the user profile. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make @@ -1536,7 +1536,7 @@ def enroll_kibana( Kibana uses this API internally to configure itself for communications with an Elasticsearch cluster that already has security features enabled.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/enroll/kibana" @@ -1577,7 +1577,7 @@ def enroll_node( The response contains key and certificate material that allows the caller to generate valid signed certificates for the HTTP layer of all nodes in the cluster.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/enroll/node" @@ -1626,7 +1626,7 @@ def get_api_key( If you have read_security, manage_api_key or greater privileges (including manage_security), this API returns all API keys regardless of ownership.

    - ``_ + ``_ :param active_only: A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, @@ -1704,7 +1704,7 @@ def get_builtin_privileges(

    Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/privilege/_builtin" @@ -1749,7 +1749,7 @@ def get_privileges( - ``_ + ``_ :param application: The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, @@ -1805,7 +1805,7 @@ def get_role( The get roles API cannot retrieve roles that are defined in roles files.

    - ``_ + ``_ :param name: The name of the role. You can specify multiple roles as a comma-separated list. If you do not specify this parameter, the API returns information about @@ -1856,7 +1856,7 @@ def get_role_mapping( The get role mappings API cannot retrieve role mappings that are defined in role mapping files.

    - ``_ + ``_ :param name: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does @@ -1909,7 +1909,7 @@ def get_service_accounts(

    NOTE: Currently, only the elastic/fleet-server service account is available.

    - ``_ + ``_ :param namespace: The name of the namespace. Omit this parameter to retrieve information about all service accounts. If you omit this parameter, you must @@ -1967,7 +1967,7 @@ def get_service_credentials( Tokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens.

    - ``_ + ``_ :param namespace: The name of the namespace. :param service: The service name. @@ -2023,7 +2023,7 @@ def get_settings( - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -2099,7 +2099,7 @@ def get_token( If you want to invalidate a token immediately, you can do so by using the invalidate token API.

    - ``_ + ``_ :param grant_type: The type of grant. Supported grant types are: `password`, `_kerberos`, `client_credentials`, and `refresh_token`. @@ -2173,7 +2173,7 @@ def get_user(

    Get information about users in the native realm and built-in users.

    - ``_ + ``_ :param username: An identifier for the user. You can specify multiple usernames as a comma-separated list. If you omit this parameter, the API retrieves @@ -2213,13 +2213,10 @@ def get_user( def get_user_privileges( self, *, - application: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, - priviledge: t.Optional[str] = None, - username: t.Optional[t.Union[None, str]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html @@ -2231,20 +2228,11 @@ def get_user_privileges( To check whether a user has a specific list of privileges, use the has privileges API.

    - ``_ - - :param application: The name of the application. Application privileges are always - associated with exactly one application. If you do not specify this parameter, - the API returns information about all privileges for all applications. - :param priviledge: The name of the privilege. If you do not specify this parameter, - the API returns information about all privileges for the requested application. - :param username: + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/user/_privileges" __query: t.Dict[str, t.Any] = {} - if application is not None: - __query["application"] = application if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: @@ -2253,10 +2241,6 @@ def get_user_privileges( __query["human"] = human if pretty is not None: __query["pretty"] = pretty - if priviledge is not None: - __query["priviledge"] = priviledge - if username is not None: - __query["username"] = username __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", @@ -2288,7 +2272,7 @@ def get_user_profile( Elastic reserves the right to change or remove this feature in future releases without prior notice.

    - ``_ + ``_ :param uid: A unique identifier for the user profile. :param data: A comma-separated list of filters for the `data` field of the profile @@ -2345,6 +2329,9 @@ def grant_api_key( human: t.Optional[bool] = None, password: t.Optional[str] = None, pretty: t.Optional[bool] = None, + refresh: t.Optional[ + t.Union[bool, str, t.Literal["false", "true", "wait_for"]] + ] = None, run_as: t.Optional[str] = None, username: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, @@ -2372,7 +2359,7 @@ def grant_api_key(

    By default, API keys never expire. You can specify expiration information when you create the API keys.

    - ``_ + ``_ :param api_key: The API key. :param grant_type: The type of grant. Supported grant types are: `access_token`, @@ -2382,6 +2369,9 @@ def grant_api_key( types. :param password: The user's password. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. + :param refresh: If 'true', Elasticsearch refreshes the affected shards to make + this operation visible to search. If 'wait_for', it waits for a refresh to + make this operation visible to search. If 'false', nothing is done with refreshes. :param run_as: The name of the user to be impersonated. :param username: The user name that identifies the user. If you specify the `password` grant type, this parameter is required. It is not valid with other grant @@ -2403,6 +2393,8 @@ def grant_api_key( __query["human"] = human if pretty is not None: __query["pretty"] = pretty + if refresh is not None: + __query["refresh"] = refresh if not __body: if api_key is not None: __body["api_key"] = api_key @@ -2519,7 +2511,7 @@ def has_privileges( To check the privileges of other users, you must use the run as feature.

    - ``_ + ``_ :param user: Username :param application: @@ -2584,7 +2576,7 @@ def has_privileges_user_profile( Elastic reserves the right to change or remove this feature in future releases without prior notice.

    - ``_ + ``_ :param privileges: An object containing all the privileges to be checked. :param uids: A list of profile IDs. The privileges are checked for associated @@ -2658,7 +2650,7 @@ def invalidate_api_key( - ``_ + ``_ :param id: :param ids: A list of API key ids. This parameter cannot be used with any of @@ -2742,7 +2734,7 @@ def invalidate_token( If none of these two are specified, then realm_name and/or username need to be specified.

    - ``_ + ``_ :param realm_name: The name of an authentication realm. This parameter cannot be used with either `refresh_token` or `token`. @@ -2810,7 +2802,7 @@ def oidc_authenticate( These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.

    - ``_ + ``_ :param nonce: Associate a client session with an ID token and mitigate replay attacks. This value needs to be the same as the one that was provided to @@ -2890,7 +2882,7 @@ def oidc_logout( These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.

    - ``_ + ``_ :param token: The access token to be invalidated. :param refresh_token: The refresh token to be invalidated. @@ -2952,7 +2944,7 @@ def oidc_prepare_authentication( These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.

    - ``_ + ``_ :param iss: In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request @@ -3048,7 +3040,7 @@ def put_privileges(

    Action names can contain any number of printable ASCII characters and must contain at least one of the following characters: /, *, :.

    - ``_ + ``_ :param privileges: :param refresh: If `true` (the default) then refresh the affected shards to make @@ -3200,7 +3192,7 @@ def put_role( File-based role management is not available in Elastic Serverless.

    - ``_ + ``_ :param name: The name of the role. :param applications: A list of application privilege entries. @@ -3332,7 +3324,7 @@ def put_role_mapping( If the format of the template is set to "json" then the template is expected to produce a JSON string or an array of JSON strings for the role names.

    - ``_ + ``_ :param name: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does @@ -3434,7 +3426,7 @@ def put_user( To change a user's password without updating any other fields, use the change password API.

    - ``_ + ``_ :param username: An identifier for the user. NOTE: Usernames must be at least 1 and no more than 507 characters. They can contain alphanumeric characters @@ -3553,7 +3545,7 @@ def query_api_keys( If you have the read_security, manage_api_key, or greater privileges (including manage_security), this API returns all API keys regardless of ownership.

    - ``_ + ``_ :param aggregations: Any aggregations to run over the corpus of returned API keys. Aggregations and queries work together. Aggregations are computed only @@ -3696,7 +3688,7 @@ def query_role( Also, the results can be paginated and sorted.

    - ``_ + ``_ :param from_: The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` @@ -3789,7 +3781,7 @@ def query_user( This API is only for native users.

    - ``_ + ``_ :param from_: The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` @@ -3882,7 +3874,7 @@ def saml_authenticate( This API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch.

    - ``_ + ``_ :param content: The SAML response as it was sent by the user's browser, usually a Base64 encoded XML document. @@ -3955,7 +3947,7 @@ def saml_complete_logout( The caller of this API must prepare the request accordingly so that this API can handle either of them.

    - ``_ + ``_ :param ids: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. @@ -4031,7 +4023,7 @@ def saml_invalidate( Thus the user can be redirected back to their IdP.

    - ``_ + ``_ :param query_string: The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. This query should include @@ -4106,7 +4098,7 @@ def saml_logout( If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP supports this, the Elasticsearch response contains a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout).

    - ``_ + ``_ :param token: The access token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent token that was received @@ -4176,7 +4168,7 @@ def saml_prepare_authentication( The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process.

    - ``_ + ``_ :param acs: The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. The realm is used to generate the authentication @@ -4237,7 +4229,7 @@ def saml_service_provider_metadata( This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch.

    - ``_ + ``_ :param realm_name: The name of the SAML realm in Elasticsearch. """ @@ -4290,7 +4282,7 @@ def suggest_user_profiles( Elastic reserves the right to change or remove this feature in future releases without prior notice.

    - ``_ + ``_ :param data: A comma-separated list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content, @@ -4377,7 +4369,7 @@ def update_api_key( This change can occur if the owner user's permissions have changed since the API key was created or last modified.

    - ``_ + ``_ :param id: The ID of the API key to update. :param expiration: The expiration time for the API key. By default, API keys @@ -4465,7 +4457,7 @@ def update_cross_cluster_api_key(

    NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API.

    - ``_ + ``_ :param id: The ID of the cross-cluster API key to update. :param access: The access to be granted to this API key. The access is composed @@ -4544,7 +4536,7 @@ def update_settings( This API does not yet support configuring the settings for indices before they are in use.

    - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails @@ -4629,7 +4621,7 @@ def update_user_profile_data( The update_profile_data global privilege grants privileges for updating only the allowed namespaces.

    - ``_ + ``_ :param uid: A unique identifier for the user profile. :param data: Non-searchable data that you want to associate with the user profile. diff --git a/elasticsearch/_sync/client/shutdown.py b/elasticsearch/_sync/client/shutdown.py index 9b30f3f51..6d0f2da87 100644 --- a/elasticsearch/_sync/client/shutdown.py +++ b/elasticsearch/_sync/client/shutdown.py @@ -53,7 +53,7 @@ def delete_node(

    If the operator privileges feature is enabled, you must be an operator to use this API.

    - ``_ + ``_ :param node_id: The node id of node to be removed from the shutdown state :param master_timeout: Period to wait for a connection to the master node. If @@ -112,7 +112,7 @@ def get_node(

    If the operator privileges feature is enabled, you must be an operator to use this API.

    - ``_ + ``_ :param node_id: Which node for which to retrieve the shutdown status :param master_timeout: Period to wait for a connection to the master node. If @@ -187,7 +187,7 @@ def put_node( Monitor the node shutdown status to determine when it is safe to stop Elasticsearch.

    - ``_ + ``_ :param node_id: The node identifier. This parameter is not validated against the cluster's active nodes. This enables you to register a node for shut diff --git a/elasticsearch/_sync/client/simulate.py b/elasticsearch/_sync/client/simulate.py index 939754394..3a32869ee 100644 --- a/elasticsearch/_sync/client/simulate.py +++ b/elasticsearch/_sync/client/simulate.py @@ -81,7 +81,7 @@ def ingest( These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request.

    - ``_ + ``_ :param docs: Sample documents to test in the pipeline. :param index: The index to simulate ingesting into. This value can be overridden diff --git a/elasticsearch/_sync/client/slm.py b/elasticsearch/_sync/client/slm.py index 9196bc57c..8647cd972 100644 --- a/elasticsearch/_sync/client/slm.py +++ b/elasticsearch/_sync/client/slm.py @@ -45,7 +45,7 @@ def delete_lifecycle( This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots.

    - ``_ + ``_ :param policy_id: The id of the snapshot lifecycle policy to remove :param master_timeout: The period to wait for a connection to the master node. @@ -101,7 +101,7 @@ def execute_lifecycle( The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance.

    - ``_ + ``_ :param policy_id: The id of the snapshot lifecycle policy to be executed :param master_timeout: The period to wait for a connection to the master node. @@ -156,7 +156,7 @@ def execute_retention( The retention policy is normally applied according to its schedule.

    - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails @@ -208,7 +208,7 @@ def get_lifecycle( Get snapshot lifecycle policy definitions and information about the latest snapshot attempts.

    - ``_ + ``_ :param policy_id: Comma-separated list of snapshot lifecycle policies to retrieve :param master_timeout: The period to wait for a connection to the master node. @@ -265,7 +265,7 @@ def get_stats( Get global and policy-level statistics about actions taken by snapshot lifecycle management.

    - ``_ + ``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and @@ -315,7 +315,7 @@ def get_status(

    Get the snapshot lifecycle management status.

    - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails @@ -379,7 +379,7 @@ def put_lifecycle( Only the latest version of a policy is stored.

    - ``_ + ``_ :param policy_id: The identifier for the snapshot lifecycle policy you want to create or update. @@ -465,7 +465,7 @@ def start( Manually starting SLM is necessary only if it has been stopped using the stop SLM API.

    - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails @@ -523,7 +523,7 @@ def stop( Use the get snapshot lifecycle management status API to see if SLM is running.

    - ``_ + ``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails diff --git a/elasticsearch/_sync/client/snapshot.py b/elasticsearch/_sync/client/snapshot.py index 99d89a35b..8c98d6cd4 100644 --- a/elasticsearch/_sync/client/snapshot.py +++ b/elasticsearch/_sync/client/snapshot.py @@ -50,7 +50,7 @@ def cleanup_repository( Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots.

    - ``_ + ``_ :param name: Snapshot repository to clean up. :param master_timeout: Period to wait for a connection to the master node. @@ -107,7 +107,7 @@ def clone( Clone part of all of a snapshot into another snapshot in the same repository.

    - ``_ + ``_ :param repository: A repository name :param snapshot: The name of the snapshot to clone from @@ -191,7 +191,7 @@ def create( Take a snapshot of a cluster or of data streams and indices.

    - ``_ + ``_ :param repository: Repository for the snapshot. :param snapshot: Name of the snapshot. Must be unique in the repository. @@ -301,7 +301,7 @@ def create_repository( Ensure there are no cluster blocks (for example, cluster.blocks.read_only and clsuter.blocks.read_only_allow_delete settings) that prevent write access.

    - ``_ + ``_ :param name: A repository name :param repository: @@ -357,6 +357,7 @@ def delete( human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, + wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html @@ -364,11 +365,14 @@ def delete(

    Delete snapshots.

    - ``_ + ``_ :param repository: A repository name :param snapshot: A comma-separated list of snapshot names :param master_timeout: Explicit operation timeout for connection to master node + :param wait_for_completion: If `true`, the request returns a response when the + matching snapshots are all deleted. If `false`, the request returns a response + as soon as the deletes are scheduled. """ if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'repository'") @@ -390,6 +394,8 @@ def delete( __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty + if wait_for_completion is not None: + __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", @@ -420,7 +426,7 @@ def delete_repository( The snapshots themselves are left untouched and in place.

    - ``_ + ``_ :param name: Name of the snapshot repository to unregister. Wildcard (`*`) patterns are supported. @@ -497,7 +503,7 @@ def get(

    Get snapshot information.

    - ``_ + ``_ :param repository: Comma-separated list of snapshot repository names used to limit the request. Wildcard (*) expressions are supported. @@ -612,7 +618,7 @@ def get_repository(

    Get snapshot repository information.

    - ``_ + ``_ :param name: A comma-separated list of repository names :param local: Return local information, do not retrieve the state from master @@ -750,7 +756,7 @@ def repository_analyze( Some operations also verify the behavior on small blobs with sizes other than 8 bytes.

    - ``_ + ``_ :param name: The name of the repository. :param blob_count: The total number of blobs to write to the repository during @@ -877,7 +883,7 @@ def repository_verify_integrity(

    NOTE: This API may not work correctly in a mixed-version cluster.

    - ``_ + ``_ :param name: A repository name :param blob_thread_pool_concurrency: Number of threads to use for reading blob @@ -987,7 +993,7 @@ def restore(

    If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot.

    - ``_ + ``_ :param repository: A repository name :param snapshot: A snapshot name @@ -1091,7 +1097,7 @@ def status( These requests can also tax machine resources and, when using cloud storage, incur high processing costs.

    - ``_ + ``_ :param repository: A repository name :param snapshot: A comma-separated list of snapshot names @@ -1154,7 +1160,7 @@ def verify_repository( Check for common misconfigurations in a snapshot repository.

    - ``_ + ``_ :param name: A repository name :param master_timeout: Explicit operation timeout for connection to master node diff --git a/elasticsearch/_sync/client/sql.py b/elasticsearch/_sync/client/sql.py index ecdb49a22..cde458be5 100644 --- a/elasticsearch/_sync/client/sql.py +++ b/elasticsearch/_sync/client/sql.py @@ -44,7 +44,7 @@ def clear_cursor(

    Clear an SQL search cursor.

    - ``_ + ``_ :param cursor: Cursor to clear. """ @@ -99,7 +99,7 @@ def delete_async( - ``_ + ``_ :param id: The identifier for the search. """ @@ -150,7 +150,7 @@ def get_async(

    If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API.

    - ``_ + ``_ :param id: The identifier for the search. :param delimiter: The separator for CSV results. The API supports this parameter @@ -212,7 +212,7 @@ def get_async_status( Get the current status of an async SQL search or a stored synchronous SQL search.

    - ``_ + ``_ :param id: The identifier for the search. """ @@ -301,7 +301,7 @@ def query( Run an SQL request.

    - ``_ + ``_ :param allow_partial_search_results: If `true`, the response has partial results when there are shard request timeouts or shard failures. If `false`, the @@ -427,7 +427,7 @@ def translate( It accepts the same request body parameters as the SQL search API, excluding cursor.

    - ``_ + ``_ :param query: The SQL query to run. :param fetch_size: The maximum number of rows (or entries) to return in one response. diff --git a/elasticsearch/_sync/client/ssl.py b/elasticsearch/_sync/client/ssl.py index d65003825..69e531aab 100644 --- a/elasticsearch/_sync/client/ssl.py +++ b/elasticsearch/_sync/client/ssl.py @@ -52,7 +52,7 @@ def certificates(

    If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster.

    - ``_ + ``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ssl/certificates" diff --git a/elasticsearch/_sync/client/synonyms.py b/elasticsearch/_sync/client/synonyms.py index e58ab5ee4..837b39eca 100644 --- a/elasticsearch/_sync/client/synonyms.py +++ b/elasticsearch/_sync/client/synonyms.py @@ -53,7 +53,7 @@ def delete_synonym( When the synonyms set is not used in analyzers, you will be able to delete it.

    - ``_ + ``_ :param id: The synonyms set identifier to delete. """ @@ -98,7 +98,7 @@ def delete_synonym_rule( Delete a synonym rule from a synonym set.

    - ``_ + ``_ :param set_id: The ID of the synonym set to update. :param rule_id: The ID of the synonym rule to delete. @@ -151,7 +151,7 @@ def get_synonym(

    Get a synonym set.

    - ``_ + ``_ :param id: The synonyms set identifier to retrieve. :param from_: The starting offset for query rules to retrieve. @@ -202,7 +202,7 @@ def get_synonym_rule( Get a synonym rule from a synonym set.

    - ``_ + ``_ :param set_id: The ID of the synonym set to retrieve the synonym rule from. :param rule_id: The ID of the synonym rule to retrieve. @@ -255,7 +255,7 @@ def get_synonyms_sets( Get a summary of all defined synonym sets.

    - ``_ + ``_ :param from_: The starting offset for synonyms sets to retrieve. :param size: The maximum number of synonyms sets to retrieve. @@ -311,7 +311,7 @@ def put_synonym( This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set.

    - ``_ + ``_ :param id: The ID of the synonyms set to be created or updated. :param synonyms_set: The synonym rules definitions for the synonyms set. @@ -370,7 +370,7 @@ def put_synonym_rule(

    When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule.

    - ``_ + ``_ :param set_id: The ID of the synonym set. :param rule_id: The ID of the synonym rule to be updated or created. diff --git a/elasticsearch/_sync/client/tasks.py b/elasticsearch/_sync/client/tasks.py index 758925370..946b911ba 100644 --- a/elasticsearch/_sync/client/tasks.py +++ b/elasticsearch/_sync/client/tasks.py @@ -60,7 +60,7 @@ def cancel( You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task.

    - ``_ + ``_ :param task_id: The task identifier. :param actions: A comma-separated list or wildcard expression of actions that @@ -128,7 +128,7 @@ def get(

    If the task identifier is not found, a 404 response code indicates that there are no resources that match the request.

    - ``_ + ``_ :param task_id: The task identifier. :param timeout: The period to wait for a response. If no response is received @@ -238,7 +238,7 @@ def list( The X-Opaque-Id in the children headers is the child task of the task that was initiated by the REST request.

    - ``_ + ``_ :param actions: A comma-separated list or wildcard expression of actions used to limit the request. For example, you can use `cluser:*` to retrieve all diff --git a/elasticsearch/_sync/client/text_structure.py b/elasticsearch/_sync/client/text_structure.py index b5c7b67d2..4050bcc16 100644 --- a/elasticsearch/_sync/client/text_structure.py +++ b/elasticsearch/_sync/client/text_structure.py @@ -72,7 +72,7 @@ def find_field_structure( It helps determine why the returned structure was chosen.

    - ``_ + ``_ :param field: The field that should be analyzed. :param index: The name of the index that contains the analyzed field. @@ -259,7 +259,7 @@ def find_message_structure( It helps determine why the returned structure was chosen.

    - ``_ + ``_ :param messages: The list of messages you want to analyze. :param column_names: If the format is `delimited`, you can specify the column @@ -433,7 +433,7 @@ def find_structure( However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.

    - ``_ + ``_ :param text_files: :param charset: The text's character set. It must be a character set that is @@ -620,7 +620,7 @@ def test_grok_pattern( The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings.

    - ``_ + ``_ :param grok_pattern: The Grok pattern to run on the text. :param text: The lines of text to run the Grok pattern on. diff --git a/elasticsearch/_sync/client/transform.py b/elasticsearch/_sync/client/transform.py index 13ad2c232..7afb9d45d 100644 --- a/elasticsearch/_sync/client/transform.py +++ b/elasticsearch/_sync/client/transform.py @@ -44,7 +44,7 @@ def delete_transform(

    Delete a transform.

    - ``_ + ``_ :param transform_id: Identifier for the transform. :param delete_dest_index: If this value is true, the destination index is deleted @@ -108,7 +108,7 @@ def get_transform( Get configuration information for transforms.

    - ``_ + ``_ :param transform_id: Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using @@ -181,7 +181,7 @@ def get_transform_stats(

    Get usage information for transforms.

    - ``_ + ``_ :param transform_id: Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using @@ -269,7 +269,7 @@ def preview_transform( types of the source index and the transform aggregations.

    - ``_ + ``_ :param transform_id: Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform configuration details in @@ -406,7 +406,7 @@ def put_transform( give users any privileges on .data-frame-internal* indices.

    - ``_ + ``_ :param transform_id: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -512,7 +512,7 @@ def reset_transform( If the destination index was created by the transform, it is deleted.

    - ``_ + ``_ :param transform_id: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. @@ -572,7 +572,7 @@ def schedule_now_transform( is called again in the meantime.

    - ``_ + ``_ :param transform_id: Identifier for the transform. :param timeout: Controls the time to wait for the scheduling to take place @@ -635,7 +635,7 @@ def start_transform( destination indices, the transform fails when it attempts unauthorized operations.

    - ``_ + ``_ :param transform_id: Identifier for the transform. :param from_: Restricts the set of transformed entities to those changed after @@ -693,7 +693,7 @@ def stop_transform( Stops one or more transforms.

    - ``_ + ``_ :param transform_id: Identifier for the transform. To stop multiple transforms, use a comma-separated list or a wildcard expression. To stop all transforms, diff --git a/elasticsearch/_sync/client/xpack.py b/elasticsearch/_sync/client/xpack.py index 73121d5cb..36b5fdecf 100644 --- a/elasticsearch/_sync/client/xpack.py +++ b/elasticsearch/_sync/client/xpack.py @@ -54,7 +54,7 @@ def info( - ``_ + ``_ :param accept_enterprise: If this param is used it must be set to true :param categories: A comma-separated list of the information categories to include diff --git a/elasticsearch/dsl/types.py b/elasticsearch/dsl/types.py index 5d7b88b32..2e616f644 100644 --- a/elasticsearch/dsl/types.py +++ b/elasticsearch/dsl/types.py @@ -5099,9 +5099,11 @@ def buckets_as_dict(self) -> Mapping[str, "FiltersBucket"]: class FiltersBucket(AttrDict[Any]): """ :arg doc_count: (required) + :arg key: """ doc_count: int + key: str class FrequentItemSetsAggregate(AttrDict[Any]): From 3808515ab5faf074bd73e9926fda5b2cb04ce091 Mon Sep 17 00:00:00 2001 From: Miguel Grinberg Date: Wed, 30 Jul 2025 11:15:51 +0100 Subject: [PATCH 62/65] Release 8.19.0 (#3014) * Release 8.19.0 * updated examples --- .../00fea15cbca83be9d5f1a024ff2ec708.asciidoc | 2 +- .../010d5e901a2690fa7b2396edbe6cd463.asciidoc | 2 +- .../015e6e6132b6d6d44bddb06bc3b316ed.asciidoc | 2 +- .../0165d22da5f2fc7678392b31d8eb5566.asciidoc | 2 +- .../01ae196538fac197eedbbf458a4ef31b.asciidoc | 2 +- .../01b23f09d2b7f140faf649eadbbf3ac3.asciidoc | 2 +- .../01cd0ea360282a2c591a366679d7187d.asciidoc | 2 +- .../0246f73cc2ed3dfec577119e8cd15404.asciidoc | 2 +- .../02b6aa3e5652839f03de3a655854b897.asciidoc | 2 +- .../0350410d11579f4e876c798ce1eaef5b.asciidoc | 2 +- .../03891265df2111a38e0b6b24c1b967e1.asciidoc | 2 +- .../0737ebaea33631f001fb3f4226948492.asciidoc | 2 +- .../074e4602d1ca54412380a40867d078bc.asciidoc | 2 +- .../07dadb9b0a774bd8e7f3527cf8a44afc.asciidoc | 2 +- .../083b92e8ea264e49bf9fd40fc6a3094b.asciidoc | 2 +- .../0a650401134f07e40216f0d0d1a66a32.asciidoc | 2 +- .../0bc6155e0c88062a4d8490da49db3aa8.asciidoc | 2 +- .../0bee07a581c5776e068f6f4efad5a399.asciidoc | 2 +- .../0c2d9ac7e3f28d4d802e21cbbbcfeb34.asciidoc | 2 +- .../0d689ac6e78be5d438f9b5d441be2b44.asciidoc | 2 +- .../0dfa9733c94bc43c6f14c7b6984c98fb.asciidoc | 2 +- .../0e31b8ad176b31028becf9500989bcbd.asciidoc | 2 +- .../0e71a18d1aac61720cdc6b3f91fe643f.asciidoc | 2 +- .../0f028f71f04c1d569fab402869565a84.asciidoc | 15 ---- .../0f7aa40ad26d59a9268630b980a3d594.asciidoc | 2 +- .../0fc4b589df5388da784c6d981e769e31.asciidoc | 2 +- .../103296e16b4233926ad1f07360385606.asciidoc | 2 +- .../10f0c8fed98455c460c374b50ffbb204.asciidoc | 2 +- .../12433d2b637d002e8d5c9a1adce69d3b.asciidoc | 2 +- .../12adea5d76f73d94d80d42f53f67563f.asciidoc | 11 --- .../13d90ba227131aefbf4fcfd5992e662a.asciidoc | 2 +- .../148edc235fcfbc263561f87f5533e688.asciidoc | 2 +- .../151d2b11807ec684b0c01aa89189a801.asciidoc | 2 +- .../162b5b693b713f0bfab1209d59443c46.asciidoc | 2 +- .../17b1647c8509543f2388c886f2584a20.asciidoc | 2 +- .../187733e50c60350f3f75921bea3b72c2.asciidoc | 2 +- .../18de6782bd18f4a9baec2feec8c02a8b.asciidoc | 18 ++++ .../1a7483796087053ba55029d0dc2ab356.asciidoc | 2 +- .../1a9efb56adb2cd84faa9825a129381b9.asciidoc | 2 +- .../1aa91d3d48140d6367b6cabca8737b8f.asciidoc | 2 +- ...1af9742c71ce0587cd49a73ec7fc1f6c.asciidoc} | 2 +- .../1ba7afe23a26fe9ac7856d8c5bc1059d.asciidoc | 2 +- ...1cbaa43d1c741ddd8a13e21791709527.asciidoc} | 4 +- .../1d9b695a17cffd910c496c9b03c75d6f.asciidoc | 2 +- .../1e0b85750d4e63ebbc927d4627c44bf8.asciidoc | 2 +- .../1e26353d546d733634187b8c3a7837a7.asciidoc | 2 +- .../20005d8a6555b259b299d862cd218701.asciidoc | 2 +- .../216e24f05cbb82c1718713fbab8623d2.asciidoc | 2 +- .../2171361cd8e70dcde91bdb9bbd8702f1.asciidoc | 30 +++++++ .../21d41e8cbd107fbdf0901f885834dafc.asciidoc | 2 +- .../222e49c924ca8bac7b41bc952a39261c.asciidoc | 2 +- .../246763219ec06172f7aa57bba28d344a.asciidoc | 2 +- .../24f4dfdf9922d5aa79151675b7767742.asciidoc | 2 +- .../25576b6773322f0929d4c635a940dba0.asciidoc | 2 +- .../270549e6b062228312c4e7a54a2c2209.asciidoc | 2 +- .../272e27bf1fcc4fe5dbd4092679dd0342.asciidoc | 11 --- .../28543836b62b5622a402e6f7731d68f0.asciidoc | 2 +- .../2864a24608b3ac59d21f604f8a31d131.asciidoc | 2 +- .../2a21674c40f9b182a8944769d20b2357.asciidoc | 2 +- .../2a71e2d7f7179dd76183d30789046808.asciidoc | 2 +- .../2afd49985950cbcccf727fa858d00067.asciidoc | 2 +- .../2afdf0d83724953aa2875b5fb37d60cc.asciidoc | 2 +- .../2b7687e3d7c06824950e00618c297864.asciidoc | 2 +- .../2bb41b0b4876ce98cd0cd8fb6d337f18.asciidoc | 16 ++++ .../2c23285eb9f52010ecb1d5cab640ff78.asciidoc | 29 ++++++ .../2c3207c0c985d253b2ecccc14e69e25a.asciidoc | 2 +- ...2cd8439db5054c93c49f1bf50433e1bb.asciidoc} | 1 + .../2d0244c020075595acb625aa5ba8f455.asciidoc | 2 +- .../2dad2b0c8ba503228f4b11cecca0b348.asciidoc | 2 +- .../2ee239df3243c98418f7d9a5c7be4cfd.asciidoc | 2 +- .../2f2fd35905feef0b561c05d70c7064c1.asciidoc | 2 +- .../2f67db5e4d6c958258c3d70fb2d0b1c8.asciidoc | 13 +++ .../2f72a63c73dd672ac2dc3997ad15dd41.asciidoc | 2 +- .../2f9574fee2ebecd6f7d917ee99b26bcc.asciidoc | 2 +- .../2f98924c3d593ea2b60edb9cef5bee22.asciidoc | 2 +- .../2fea3e324939cc7e9c396964aeee7111.asciidoc | 2 +- .../30d051f534aeb884176eedb2c11dac85.asciidoc | 2 +- .../30f3e3b9df46afd12e68bc71f18483b4.asciidoc | 2 +- .../3166455372f2d96622caff076e91ebe7.asciidoc | 2 +- .../31bc93e429ad0de11dd2dd231e8f2c5e.asciidoc | 10 +++ .../320645d771e952af2a67bb7445c3688d.asciidoc | 2 +- .../327466380bcd55361973b4a96c6dccb2.asciidoc | 2 +- .../3312c82f81816bf76629db9582991812.asciidoc | 2 +- .../334811cfceb6858aeec5b3461717dd63.asciidoc | 2 +- .../346f28d82acb5427c304aa574fea0008.asciidoc | 2 +- ...35b686d9d9e915d0dea7a4251781767d.asciidoc} | 1 + .../36792c81c053e0555407d1e83e7e054f.asciidoc | 2 +- .../36d229f734adcdab00be266a7ce038b1.asciidoc | 2 +- .../3758b8f2ab9f6f28a764ee6c42c85766.asciidoc | 2 +- .../37c73410bf13429279cbc61a413957d8.asciidoc | 2 +- .../39ce44333d28ed2b833722d3e3cb06f3.asciidoc | 2 +- .../3a204b57072a104d9b50f3a9e064a8f6.asciidoc | 19 ---- .../3afc6dacf90b42900ab571aad8a61d75.asciidoc | 2 +- .../3bc4a3681e3ea9cb3de49f72085807d8.asciidoc | 2 +- .../3c7621a81fa982b79f040a6d2611530e.asciidoc | 2 +- .../3cd93a48906069709b76420c66930c01.asciidoc | 2 +- .../3d924850fb2372fff4739fd145660f88.asciidoc | 20 +++++ .../3faf5e2873de340acfe0a617017db784.asciidoc | 2 +- .../40b73b5c7ca144dc3f63f5b741f33d80.asciidoc | 2 +- .../40bd86e400d27e68b8f0ae580c29d32d.asciidoc | 2 +- .../40c3e7bb1fdc125a1ab21bd7d7326694.asciidoc | 2 +- .../40f97f70e8e743c6a6296c81b920aeb0.asciidoc | 2 +- .../425eaaf9c7e3b1e77a3474fbab4183b4.asciidoc | 2 +- .../4342ccf6cc24fd80bd3cd1f9a4c2ef8e.asciidoc | 2 +- .../43d9e314431336a6f084cea76dfd6489.asciidoc | 2 +- ...45b0b2420c807bcb31fcb051daa7099a.asciidoc} | 3 +- .../46276fbcba7b5e9541dd56ec3f20cf2a.asciidoc | 18 ++++ .../46658f00edc4865dfe472a392374cd0f.asciidoc | 2 +- .../472ec8c57fec8457e31fe6dd7f6e3713.asciidoc | 2 +- .../47909e194d10743093f4a22c27a85925.asciidoc | 2 +- .../488f6df1df71972392b670ce557f7ff3.asciidoc | 2 +- .../49b31e23f8b9667b6a7b2734d55fb6ed.asciidoc | 24 +++++ .../49e8773a34fcbf825de38426cff5509c.asciidoc | 2 +- ...4ab7c13d27ec8f9e6705a0e77fe53c20.asciidoc} | 4 +- .../4b3a49710fafa35d6d41a8ec12434515.asciidoc | 2 +- .../4bef98a2dac575a50ee0783c2269f1db.asciidoc | 2 +- .../4c174e228b6b74497b73ef2be80de7ad.asciidoc | 2 +- .../4c3db8987d7b2d3d3df78ff1e71e7ede.asciidoc | 2 +- .../4c9350ed09b28f00e297ebe73c3b95a2.asciidoc | 2 +- .../4c95d54b32df4dc49e9762b6c1ae2c05.asciidoc | 2 +- .../4ca15672fc5ab1d80a127d086b6d2837.asciidoc | 2 +- .../4dab4c5168047ba596af1beb0e55b845.asciidoc | 10 +++ .../4e50d9d25bfb07ac73e3a2be5d2fbbf7.asciidoc | 2 +- .../4ed946065faa92f9950f04e402676a97.asciidoc | 2 +- .../4f6694ef147a73b1163bde3c13779d26.asciidoc | 2 +- .../50b5c0332949d2154c72b629b5fa6222.asciidoc | 2 +- .../50d9c0508ddb0fc5ba5a893eec219dd8.asciidoc | 2 +- .../51390ca10aa22d7104e8970f09ea4512.asciidoc | 33 +++++++ .../519e46350316a33162740e5d7968aa2c.asciidoc | 2 +- .../51b44224feee6e2e5974824334474c77.asciidoc | 2 +- .../51f6a62c0f0a02aa97a42c6a80542aa4.asciidoc | 28 ++++++ .../52f1c1689ab35353858cdeaab7597546.asciidoc | 2 +- .../52f4c5eb08d39f98e2e2f5527ece9731.asciidoc | 2 +- .../551799fef2f86e393db83a967e4a30d1.asciidoc | 2 +- .../56a1aa4f7fa62f2289e20607e3039bf3.asciidoc | 2 +- .../56da252798b8e7b006738428aa1a7f4c.asciidoc | 2 +- .../57a62f66b67a08c38942f0edb0fbaa26.asciidoc | 26 ++++++ .../584f502cf840134f2db5f39e2483ced1.asciidoc | 2 +- ...58fb1f324597070bee2cf854c162d388.asciidoc} | 10 ++- .../59aa5216630f80c5dc298fc5bba4a819.asciidoc | 10 --- .../5a4a6cf06b3a10f55012639166630290.asciidoc | 31 +++++++ .../5b2a13366bd4e1ab4b25d04d360570dc.asciidoc | 2 +- .../5bba213a7f543190139d1a69ab2ed076.asciidoc | 2 +- .../5daf8ede198be9b118da5bee9896cb00.asciidoc | 2 +- .../5e21dbac92f34d236a8f0cc0d3a39cdd.asciidoc | 2 +- .../5f1ed9cfdc149763b444acfbe10b0e16.asciidoc | 2 +- .../60d3f9a99cc91b43aaa7524a9a74dba0.asciidoc | 2 +- ...611c1e05f4ebb48a1a8c8488238ce34d.asciidoc} | 4 +- .../6220087321e6d288024a70c6b09bd720.asciidoc | 2 +- .../62d3c8fccb11471bdc12555c1a7777f2.asciidoc | 2 +- .../631a8d49bfbe622fd07c6868489b9fb0.asciidoc | 19 ++++ .../63a53fcb0717ae9033a679cbfc932851.asciidoc | 2 +- .../63ecdab34940af053acc409164914c32.asciidoc | 2 +- .../642161d70dacf7d153767d37d3726838.asciidoc | 2 +- .../67154a4837cf996a9a9c3e61d6e9d1b3.asciidoc | 15 ++++ .../67b71a95b6fe6c83faae51ea038a1bf1.asciidoc | 2 +- .../69541f0bb81ab3797926bb2a00607cda.asciidoc | 2 +- .../6b0288acb739c4667d41339e5100c327.asciidoc | 2 +- .../6b67c6121efb86ee100d40c2646f77b5.asciidoc | 2 +- .../6ba332596f5eb29660c90ab2d480e7dc.asciidoc | 2 +- .../6baf72c04d48cb04c2f8be609ff3b3b5.asciidoc | 2 +- .../6dcd3916679f6aa64f79524c75991ebd.asciidoc | 2 +- .../6e498b9dc753b94abf2618c407fa5cd8.asciidoc | 16 ---- .../6e6b78e6b689a5d6aa637271b6d084e2.asciidoc | 2 +- .../7039aa15511e9b876c2e00d067751e2e.asciidoc | 22 +++++ ...708fe682185b46704fce563465933ded.asciidoc} | 5 +- .../71998bb300ac2a58419b0772cdc1c586.asciidoc | 2 +- ...725c957303ead2d509f148e0428d7250.asciidoc} | 3 +- .../72abd8a82b4a8fc2fc06f54150d23b76.asciidoc | 54 ++++++++++++ .../73d1a6c5ef90b7e35d43a0bfdc1e158d.asciidoc | 2 +- .../744aeb2af40f519e430e21e004e3c3b7.asciidoc | 2 +- .../746e0a1cb5984f2672963b363505c7b3.asciidoc | 2 +- .../75e6d66e94e61bd8a555beaaee255c36.asciidoc | 2 +- .../76e02434835630cb830724beb92df354.asciidoc | 2 +- .../7709a48020a6cefbbe547fb944541cdb.asciidoc | 2 +- .../7752b677825523bfb0c38ad9325a6d47.asciidoc | 2 +- .../78043831fd32004a82930c8ac8a1d809.asciidoc | 2 +- .../7846974b47a3eab1832a475663d23ad9.asciidoc | 2 +- .../7885ca9d7c61050095288eef6bc6cca9.asciidoc | 2 +- .../79d206a528be704050a437adce2496dd.asciidoc | 2 +- .../7a32f44a1511ecb0d3f0b0ff2aca5c44.asciidoc | 23 +++++ .../7bdc283b96c7a965fae23013647b8578.asciidoc | 2 +- .../7cd23457e220c8b64c5b0041d2acc27a.asciidoc | 2 +- .../7d3a74fe0ba3fe95d1c3275365ff9315.asciidoc | 2 +- .../7daff6b7e668ab8a762b8ab5dff7a167.asciidoc | 2 +- .../7ea7ab20df76c47b391a582ae4bce803.asciidoc | 16 ++++ .../7f1fade93225f8cf6000b93334d76ce4.asciidoc | 2 +- .../7f2d511cb64743c006225e5933a14bb4.asciidoc | 2 +- .../7f56755fb6c42f7e6203339a6d0cb6e6.asciidoc | 2 +- .../7fde3ff91c4a2e7080444af37d5cd287.asciidoc | 2 +- .../80d2ccb7e2056d66bcf95096ae61af4b.asciidoc | 15 ++++ .../820f689eaaef15fc07abd1073fa880f8.asciidoc | 2 +- .../828f0045747fde4888a947bb99e190e3.asciidoc | 2 +- .../853fc710cea79fb4e1a85fb6d149f9c5.asciidoc | 2 +- .../85479e02af00681210e17e3d0ff51e21.asciidoc | 2 +- .../858fde15fb0a0340873b123043f8c3b4.asciidoc | 2 +- .../85f6667f148d16d075493fddf07e2932.asciidoc | 2 +- .../85f9fc6f98e8573efed9b034e853d5ae.asciidoc | 2 +- .../8619bd17bbfe33490b1f277007f654db.asciidoc | 2 +- .../8621c05cc7cf3880bde751f6670a0c3a.asciidoc | 15 ---- .../863253bf0ab7d227ff72a0a384f4de8c.asciidoc | 2 +- .../8634c9993485d622fb12d24f4f242264.asciidoc | 2 +- .../86f426ffa67416a50f2702f7131d35de.asciidoc | 20 +++++ .../87457bb3467484bec3e9df4e25942ba6.asciidoc | 2 +- .../8a0b5f759de3f27f0801c1176e616117.asciidoc | 2 +- .../8a1b6eae4893c5dd27b3d81fd8d70f5b.asciidoc | 2 +- .../8b8b6aac2111b2d8b93758ac737e6543.asciidoc | 2 +- .../8c47c80139f40f25db44f5781ca2dfbe.asciidoc | 10 --- .../8cd00a3aba7c3c158277bc032aac2830.asciidoc | 2 +- .../8d05862be1f9e7edaba162b1888b5677.asciidoc | 2 +- .../8d9b04f2a97f4229dec9e620126de049.asciidoc | 2 +- .../8de6fed6ba2b94ce6a12ce076be2b4d7.asciidoc | 2 +- .../8ecefdcf8f153cf91588e9fdde8f3e6b.asciidoc | 2 +- .../8ed31628081db2b6e9106d61d1e142be.asciidoc | 2 +- ...8f56ae0bf05093986e46a29fb0890a4f.asciidoc} | 3 +- .../8f6f7ea5abf56152b4a5639ddf40848f.asciidoc | 2 +- .../8fec06a98d0151c1d717a01491d0b8f0.asciidoc | 2 +- .../9169d19a80175ec94f80865d0f9bef4c.asciidoc | 2 +- .../91750571c195718f0ff246e058e4bc63.asciidoc | 2 +- .../927b20a221f975b75d1227b67d0eb7e2.asciidoc | 2 +- .../9382f022086c692ba05efb0acae65946.asciidoc | 2 +- .../944806221eb89f5af2298ccdf2902277.asciidoc | 2 +- .../948418e0ef1b7e7cfee2f11be715d7d2.asciidoc | 2 +- .../9501e6c8e95c21838653ea15b9b7ed5f.asciidoc | 2 +- .../95414139c7b1203e3c2d99a354415801.asciidoc | 2 +- .../957d2e6ddbb9a9b16549c5e67b93b41b.asciidoc | 2 +- .../968fb5b92aa65af09544f7c002b0953e.asciidoc | 2 +- .../96ea0e80323d6d2d99964625c004a44d.asciidoc | 2 +- .../971fd23adb81bb5842c7750e0379336a.asciidoc | 2 +- .../97c6c07f46f4177f0565a04bc50924a3.asciidoc | 2 +- .../986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc | 2 +- .../9a203aae3e1412d919546276fb52a5ca.asciidoc | 2 +- .../9ab351893dae65ec97fd8cb6832950fb.asciidoc | 2 +- ...9be8f52aab94b6bc8b8603234551475d.asciidoc} | 2 +- .../9c2ce0132e4527077443f007d27b1158.asciidoc | 2 +- .../9d66cb59711f24e6b4ff85608c9b5a1b.asciidoc | 2 +- .../9de4edafd22a8b9cb557632b2c8779cd.asciidoc | 2 +- .../9f03a8340761e729e8bdc8c7b6f66d98.asciidoc | 20 +++++ .../9feff356f302ea4915347ab71cc4887a.asciidoc | 2 +- .../a00311843b5f8f3e9f7d511334a828b1.asciidoc | 2 +- .../a1070cf2f5969d42d71cda057223f152.asciidoc | 2 +- .../a1b668795243398f5bc40bcc9bead884.asciidoc | 2 +- .../a1ccd51eef37e43c935a047b0ee15daa.asciidoc | 2 +- .../a1dda7e7c01be96a4acf7b725d70385f.asciidoc | 2 +- .../a1f70bc71b763b58206814c40a7440e7.asciidoc | 2 +- .../a2566a2ed14ecc504ca27f85771c2638.asciidoc | 29 ++++++ .../a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc | 2 +- .../a3779f21f132787c48681bfb50453592.asciidoc | 2 +- .../a46f566ca031375658c22f89b87dc6d2.asciidoc | 12 --- .../a5aeb2c8bdf91f6146026ec8edc476b6.asciidoc | 2 +- .../a5e6ad9e65615f6f92ae6a19674dd742.asciidoc | 2 +- .../a5f9eb40087921e67d820775acf71522.asciidoc | 2 +- .../a692b4c0ca7825c467880b346841f5a5.asciidoc | 2 +- .../a6ccac9f80c5e5efdaab992f3a32d919.asciidoc | 2 +- .../a72613de3774571ba24def4b495161b5.asciidoc | 2 +- .../a7471238a42c7cd0f8a7ac9ea14dfbcb.asciidoc | 62 +++++++++++++ .../a7d814caf2a995d2aeadecc3495011be.asciidoc | 2 +- .../a8dff54362184b2732b9bd248cf6df8a.asciidoc | 2 +- .../a960b43e720b4934edb74ab4b085ca77.asciidoc | 2 +- .../a999b5661bebb802bbbfe04faacf1971.asciidoc | 2 +- .../a9f14efc26fdd3c37a71f06c310163d9.asciidoc | 2 +- .../aa676d54a59dee87ecd28bcc1edce59b.asciidoc | 2 +- .../aa814309ad5f1630886ba75255b444f5.asciidoc | 2 +- .../aad7d80990a6a3c391ff555ce09ae9dc.asciidoc | 2 +- .../ac22cc2b0f4ad659055feed2852a2d59.asciidoc | 2 +- .../ac497917ef707538198a8458ae3d5c6b.asciidoc | 2 +- .../ac5b91aa75696f9880451c9439fd9eec.asciidoc | 2 +- .../acc6cd860032167e34fa5e0c043ab3b0.asciidoc | 2 +- ...ad0204602a9ebdf748b06ce9ade218e4.asciidoc} | 10 +-- .../ad2416ca0581316cee6c63129685bca5.asciidoc | 2 +- .../add240aa149d8b11139947502b279ee0.asciidoc | 2 +- .../ae4b57e167b81ffed537a8e6eaf7f855.asciidoc | 22 +++++ .../ae9ccfaa146731ab9176df90670db1c2.asciidoc | 2 +- .../af607715d0693587dd12962266359a96.asciidoc | 2 +- .../afa11ebb493ebbfd77acbbe50d2ce6db.asciidoc | 2 +- .../afef5cac988592b97ae289ab39c2f437.asciidoc | 2 +- .../b0ee6f19875fe5bad8aab02d60e3532c.asciidoc | 2 +- .../b11a0675e49df0709be693297ca73a2c.asciidoc | 2 +- .../b3cd07f02059165fd62a2f148be3dc58.asciidoc | 2 +- .../b557f114e21dbc6f531d4e7621a08e8f.asciidoc | 2 +- .../b583bf8d3a2f49d633aa2cfed5606418.asciidoc | 2 +- .../b5bc1bb7278f2f95bc54790c78c928e0.asciidoc | 2 +- .../b68ed7037042719945a2452d23e64c78.asciidoc | 2 +- .../b7c99eb38d4b37e22de1ffcb0e88ae4c.asciidoc | 2 +- .../b87438263ccd68624b1d69d8750f9432.asciidoc | 2 +- .../b9370fa1aa18fe4bc00cf81ef0c0d45b.asciidoc | 2 +- .../b94cee0f74f57742b3948f9b784dfdd4.asciidoc | 2 +- .../ba0e7e0b18fc9ec6c623d40186d1f61b.asciidoc | 2 +- .../ba650046f9063f6c43d76f47e0f94403.asciidoc | 2 +- .../bb2ba5d1885f87506f90dbb002e518f4.asciidoc | 2 +- .../bb5a67e3d2d9cd3016e487e627769fe8.asciidoc | 2 +- .../bdaf00d791706d7fde25fd65d3735b94.asciidoc | 2 +- .../be5c5a9c25901737585e4fff9195da3c.asciidoc | 2 +- .../beb0b9ff4f68672273fcff1b7bae706b.asciidoc | 2 +- .../befa73a8a419fcf3b7798548b54a20bf.asciidoc | 2 +- .../bf1de9fa1b825fa875d27fa08821a6d1.asciidoc | 2 +- .../bfdad8a928ea30d7cf60d0a0a6bc6e2e.asciidoc | 2 +- .../c067182d385f59ce5952fb9a716fbf05.asciidoc | 2 +- .../c14bd2a793721615d2f42bce5eea9f1f.asciidoc | 26 ++++++ .../c18100d62ed31bc9e05f62900156e6a8.asciidoc | 2 +- ...c2a9233c00ffa0aeb921edc072dd0c6f.asciidoc} | 8 +- .../c2c21e2824fbf6b7198ede30419da82b.asciidoc | 2 +- ...c38abc4b33115bce453748288837db8f.asciidoc} | 4 +- .../c4607ca79b2bcde39305d6f4f21cad37.asciidoc | 2 +- .../c49ce88ff64cdeadb7029959e80c8f84.asciidoc | 24 +++++ .../c526fca1609b4c3c1d12dfd218d69a50.asciidoc | 2 +- .../c5ed7d83ade97a417aef28b9e2871e5d.asciidoc | 2 +- .../c6339d09f85000a6432304b0ec63b8f6.asciidoc | 2 +- .../c6bdd5c7de79d6d9ac8e33a397b511e8.asciidoc | 2 +- .../c793efe7280e9b6e09981c4d4f832348.asciidoc | 2 +- .../c9c396b94bb88098477e2b08b55a12ee.asciidoc | 2 +- .../ca5dda98e977125d40a7fe1e178e213f.asciidoc | 2 +- .../cc5eefcc2102aae7e87b0c87b4af10b8.asciidoc | 2 +- .../cdb68b3f565df7c85e52a55864b37d40.asciidoc | 2 +- .../ce2c2e8f5a2e4daf051b6e10122e5aae.asciidoc | 2 +- .../cf23f18761df33f08bc6f6d1875496fd.asciidoc | 2 +- .../d01a590fa9ea8a0cb34ed8dda502296c.asciidoc | 11 +++ ...d0595401ef09dc23579c9df111049c20.asciidoc} | 13 +-- .../d0fde00ef381e61b8a9e99f18cb5970a.asciidoc | 2 +- ...d1a285aa244ec461d68f13e7078a33c0.asciidoc} | 3 +- .../d1d8b6e642db1a7c70dbbf0fe6d8e92d.asciidoc | 2 +- .../d260225cf97e068ead2a8a6bb5aefd90.asciidoc | 2 +- .../d3a0f648d0fd50b54a4e9ebe363c5047.asciidoc | 2 +- .../d3dccdb15822e971ededb9f6f7d8ada1.asciidoc | 2 +- .../d5dcddc6398b473b6ad9bce5c6adf986.asciidoc | 2 +- .../d603e76ab70131f7ec6b08758f95a0e3.asciidoc | 2 +- .../d6a21afa4a94b9baa734eac430940bcf.asciidoc | 2 +- .../d6a4548b29e939fb197189c20c7c016f.asciidoc | 17 ---- .../d7919fb6f4d02dde1390775eb8365b79.asciidoc | 2 +- .../d7a55a7c491e97079e429483085f1d58.asciidoc | 2 +- .../d7a5b0159ffdcdd1ab9078b38829a08b.asciidoc | 2 +- .../d7fe687201ac87b307cd06ed015dd317.asciidoc | 2 +- .../d803ed00d8f45f81c33e415e1c1ecb8c.asciidoc | 2 +- .../d8496fa0e5a394fd758617ed6a6c956f.asciidoc | 2 +- .../d851282dba548251d10db5954a339307.asciidoc | 2 +- .../da8db0769dff7305f178c12b1111bc99.asciidoc | 2 +- .../dcf82f3aacae49c0bb4ccbc673f13e9f.asciidoc | 2 +- .../dd16c9c981551c9da47ebb5ef5105fa0.asciidoc | 57 ------------ .../dddb6a6ebd145f8411c5b4910d332f87.asciidoc | 2 +- .../df04e2e9af66d5e30b1bfdbd458cab13.asciidoc | 2 +- .../e017c2de6f93a8dd97f5c6e002dd5c4f.asciidoc | 28 ------ .../e0db84e8f7dce49b9301ce997ae831dd.asciidoc | 22 +++++ ...e197ab718ad6c3cfbf2dd908f0ccd60f.asciidoc} | 0 .../e1c08f5774e81da31cd75aa1bdc2c548.asciidoc | 2 +- .../e308899a306e61d1a590868308689955.asciidoc | 2 +- .../e318759f4cc932154f540b748e85d6e1.asciidoc | 22 +++++ .../e3fe842951dc873d7d00c8f6a010c53f.asciidoc | 2 +- .../e4b6a6a921c97b4c0bbe97bd89f4cf33.asciidoc | 2 +- .../e4be53736bcc02b03068fd72fdbfe271.asciidoc | 2 +- .../e563da4a2054efcdf0e53ead11caac1d.asciidoc | 11 +++ .../e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc | 2 +- .../e715fb8c792bf09ac98f0ceca99beb84.asciidoc | 10 --- .../e821d27a8b810821707ba860e31f8b78.asciidoc | 2 +- .../e8ea65153d7775f25b08dfdfe6954498.asciidoc | 2 +- .../e95e61988dc3073a007f7b7445dd233b.asciidoc | 2 +- .../ea68e3428cc2ca3455bf312d09451489.asciidoc | 2 +- .../eb54506fbc71a7d250e86b22d0600114.asciidoc | 2 +- .../ec135f0cc0d3f526df68000b2a95c65b.asciidoc | 12 --- .../ec4b43c3ebd8816799fa004596b2f0cb.asciidoc | 2 +- .../ec5a2ce156c36aaa267fa31dd9367307.asciidoc | 2 +- .../edb25dc0162b039d477cb06aed2d6275.asciidoc | 2 +- .../ef866d06ffd96099957b077a53127c6c.asciidoc | 17 ++++ .../f097c02541056f3c0fc855e7bbeef8a8.asciidoc | 2 +- .../f298c4eb50ea97b34c57f8756eb350d3.asciidoc | 2 +- .../f29b2674299ddf51a25ed87619025ede.asciidoc | 2 +- .../f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc | 2 +- .../f32f0c19b42de3b87dd764fe4ca17e7c.asciidoc | 2 +- .../f39512478cae2db8f4566a1e4af9e8f5.asciidoc | 2 +- .../f3fb52680482925c202c2e2f8af6f044.asciidoc | 2 +- ...f4c0e6a9b58b640200d02047f5aa36bf.asciidoc} | 15 ++-- .../f60d6a7a9e7fd63f9ce1384f88c044cb.asciidoc | 11 +++ .../f6566395f85d3afe917228643d7318d6.asciidoc | 2 +- .../f6d6d1f5c84cf4f6ccd71a84d193e483.asciidoc | 24 +++++ .../f70a54cd9a9f4811bf962e469f2ca2ea.asciidoc | 2 +- .../f7ed127048694a1f5735e07a208b93ad.asciidoc | 20 +++++ .../f8a0010753b1ff563dc42d703902d2fa.asciidoc | 2 +- .../f8cb1a04c2e487ff006b5ae0e1a7afbd.asciidoc | 2 +- .../f8f960550104c33e00dc78bc8723ccef.asciidoc | 2 +- .../f96d8131e8a592fbf6dfd686173940a9.asciidoc | 2 +- .../f994498dd6576be657dedce2822d2b9e.asciidoc | 2 +- .../fa88f6f5a7d728ec4f1d05244228cb09.asciidoc | 2 +- .../fad26f4fb5a1bc9c38db33394e877d94.asciidoc | 2 +- .../fb0152f6c70f647a8b6709969113486d.asciidoc | 2 +- .../fe208d94ec93eabf3bd06139fa70701e.asciidoc | 2 +- .../fe54f3e53dbe7dee40ec3108a461d19a.asciidoc | 2 +- .../fe6429d0d82174aa5acf95e96e237380.asciidoc | 2 +- .../fe7169bab8e626f582c9ea87585d0f35.asciidoc | 2 +- docs/guide/release-notes.asciidoc | 32 +++++++ elasticsearch/_version.py | 2 +- .../generate-docs-examples/package-lock.json | 88 ++----------------- 390 files changed, 1222 insertions(+), 685 deletions(-) delete mode 100644 docs/examples/0f028f71f04c1d569fab402869565a84.asciidoc delete mode 100644 docs/examples/12adea5d76f73d94d80d42f53f67563f.asciidoc create mode 100644 docs/examples/18de6782bd18f4a9baec2feec8c02a8b.asciidoc rename docs/examples/{f95a4d7ab02bf400246c8822f0245f02.asciidoc => 1af9742c71ce0587cd49a73ec7fc1f6c.asciidoc} (92%) rename docs/examples/{ef10e8d07d9fae945e035d5dee1e9754.asciidoc => 1cbaa43d1c741ddd8a13e21791709527.asciidoc} (72%) create mode 100644 docs/examples/2171361cd8e70dcde91bdb9bbd8702f1.asciidoc delete mode 100644 docs/examples/272e27bf1fcc4fe5dbd4092679dd0342.asciidoc create mode 100644 docs/examples/2bb41b0b4876ce98cd0cd8fb6d337f18.asciidoc create mode 100644 docs/examples/2c23285eb9f52010ecb1d5cab640ff78.asciidoc rename docs/examples/{9aedc45f83e022732789e8d796f5a43c.asciidoc => 2cd8439db5054c93c49f1bf50433e1bb.asciidoc} (96%) create mode 100644 docs/examples/2f67db5e4d6c958258c3d70fb2d0b1c8.asciidoc create mode 100644 docs/examples/31bc93e429ad0de11dd2dd231e8f2c5e.asciidoc rename docs/examples/{750ac969f9a05567f5cdf4f93d6244b6.asciidoc => 35b686d9d9e915d0dea7a4251781767d.asciidoc} (95%) delete mode 100644 docs/examples/3a204b57072a104d9b50f3a9e064a8f6.asciidoc create mode 100644 docs/examples/3d924850fb2372fff4739fd145660f88.asciidoc rename docs/examples/{6cb1dae368c945ecf7c9ec332a5743a2.asciidoc => 45b0b2420c807bcb31fcb051daa7099a.asciidoc} (92%) create mode 100644 docs/examples/46276fbcba7b5e9541dd56ec3f20cf2a.asciidoc create mode 100644 docs/examples/49b31e23f8b9667b6a7b2734d55fb6ed.asciidoc rename docs/examples/{643e19c3b6ac1134554dd890e2249c2b.asciidoc => 4ab7c13d27ec8f9e6705a0e77fe53c20.asciidoc} (84%) create mode 100644 docs/examples/4dab4c5168047ba596af1beb0e55b845.asciidoc create mode 100644 docs/examples/51390ca10aa22d7104e8970f09ea4512.asciidoc create mode 100644 docs/examples/51f6a62c0f0a02aa97a42c6a80542aa4.asciidoc create mode 100644 docs/examples/57a62f66b67a08c38942f0edb0fbaa26.asciidoc rename docs/examples/{f9bad6fd369764185e1cb09b89ee39cc.asciidoc => 58fb1f324597070bee2cf854c162d388.asciidoc} (73%) delete mode 100644 docs/examples/59aa5216630f80c5dc298fc5bba4a819.asciidoc create mode 100644 docs/examples/5a4a6cf06b3a10f55012639166630290.asciidoc rename docs/examples/{0709a38613d2de90d418ce12b36af30e.asciidoc => 611c1e05f4ebb48a1a8c8488238ce34d.asciidoc} (74%) create mode 100644 docs/examples/631a8d49bfbe622fd07c6868489b9fb0.asciidoc create mode 100644 docs/examples/67154a4837cf996a9a9c3e61d6e9d1b3.asciidoc delete mode 100644 docs/examples/6e498b9dc753b94abf2618c407fa5cd8.asciidoc create mode 100644 docs/examples/7039aa15511e9b876c2e00d067751e2e.asciidoc rename docs/examples/{2fa7ded8515b32f26c54394ea598f573.asciidoc => 708fe682185b46704fce563465933ded.asciidoc} (85%) rename docs/examples/{e784fc00894635470adfd78a0c46b427.asciidoc => 725c957303ead2d509f148e0428d7250.asciidoc} (85%) create mode 100644 docs/examples/72abd8a82b4a8fc2fc06f54150d23b76.asciidoc create mode 100644 docs/examples/7a32f44a1511ecb0d3f0b0ff2aca5c44.asciidoc create mode 100644 docs/examples/7ea7ab20df76c47b391a582ae4bce803.asciidoc create mode 100644 docs/examples/80d2ccb7e2056d66bcf95096ae61af4b.asciidoc delete mode 100644 docs/examples/8621c05cc7cf3880bde751f6670a0c3a.asciidoc create mode 100644 docs/examples/86f426ffa67416a50f2702f7131d35de.asciidoc delete mode 100644 docs/examples/8c47c80139f40f25db44f5781ca2dfbe.asciidoc rename docs/examples/{e5f50b31f165462d883ecbff45f74985.asciidoc => 8f56ae0bf05093986e46a29fb0890a4f.asciidoc} (85%) rename docs/examples/{2c27a8eb6528126f37a843d434cd88b6.asciidoc => 9be8f52aab94b6bc8b8603234551475d.asciidoc} (86%) create mode 100644 docs/examples/9f03a8340761e729e8bdc8c7b6f66d98.asciidoc create mode 100644 docs/examples/a2566a2ed14ecc504ca27f85771c2638.asciidoc delete mode 100644 docs/examples/a46f566ca031375658c22f89b87dc6d2.asciidoc create mode 100644 docs/examples/a7471238a42c7cd0f8a7ac9ea14dfbcb.asciidoc rename docs/examples/{4de4bb55bbc0a76c75d256f245a3ee3f.asciidoc => ad0204602a9ebdf748b06ce9ade218e4.asciidoc} (50%) create mode 100644 docs/examples/ae4b57e167b81ffed537a8e6eaf7f855.asciidoc create mode 100644 docs/examples/c14bd2a793721615d2f42bce5eea9f1f.asciidoc rename docs/examples/{ffda10edaa7ce087703193c3cb95a426.asciidoc => c2a9233c00ffa0aeb921edc072dd0c6f.asciidoc} (94%) rename docs/examples/{134384b8c63cfbd8d762fb01757bb3f9.asciidoc => c38abc4b33115bce453748288837db8f.asciidoc} (84%) create mode 100644 docs/examples/c49ce88ff64cdeadb7029959e80c8f84.asciidoc create mode 100644 docs/examples/d01a590fa9ea8a0cb34ed8dda502296c.asciidoc rename docs/examples/{b3479ee4586c15020549afae58d94d65.asciidoc => d0595401ef09dc23579c9df111049c20.asciidoc} (84%) rename docs/examples/{3ab8f65fcb55a0e3664c55749ec41efd.asciidoc => d1a285aa244ec461d68f13e7078a33c0.asciidoc} (93%) delete mode 100644 docs/examples/d6a4548b29e939fb197189c20c7c016f.asciidoc delete mode 100644 docs/examples/dd16c9c981551c9da47ebb5ef5105fa0.asciidoc delete mode 100644 docs/examples/e017c2de6f93a8dd97f5c6e002dd5c4f.asciidoc create mode 100644 docs/examples/e0db84e8f7dce49b9301ce997ae831dd.asciidoc rename docs/examples/{3649194a97d265a3bc758f8b38f7561e.asciidoc => e197ab718ad6c3cfbf2dd908f0ccd60f.asciidoc} (100%) create mode 100644 docs/examples/e318759f4cc932154f540b748e85d6e1.asciidoc create mode 100644 docs/examples/e563da4a2054efcdf0e53ead11caac1d.asciidoc delete mode 100644 docs/examples/e715fb8c792bf09ac98f0ceca99beb84.asciidoc delete mode 100644 docs/examples/ec135f0cc0d3f526df68000b2a95c65b.asciidoc create mode 100644 docs/examples/ef866d06ffd96099957b077a53127c6c.asciidoc rename docs/examples/{b3f442a7d9eb391121dcab991787f9d6.asciidoc => f4c0e6a9b58b640200d02047f5aa36bf.asciidoc} (70%) create mode 100644 docs/examples/f60d6a7a9e7fd63f9ce1384f88c044cb.asciidoc create mode 100644 docs/examples/f6d6d1f5c84cf4f6ccd71a84d193e483.asciidoc create mode 100644 docs/examples/f7ed127048694a1f5735e07a208b93ad.asciidoc diff --git a/docs/examples/00fea15cbca83be9d5f1a024ff2ec708.asciidoc b/docs/examples/00fea15cbca83be9d5f1a024ff2ec708.asciidoc index c9322a6a6..d6ce47875 100644 --- a/docs/examples/00fea15cbca83be9d5f1a024ff2ec708.asciidoc +++ b/docs/examples/00fea15cbca83be9d5f1a024ff2ec708.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-elasticsearch.asciidoc:204 +// inference/service-elasticsearch.asciidoc:199 [source, python] ---- diff --git a/docs/examples/010d5e901a2690fa7b2396edbe6cd463.asciidoc b/docs/examples/010d5e901a2690fa7b2396edbe6cd463.asciidoc index c76a26693..90bdb6112 100644 --- a/docs/examples/010d5e901a2690fa7b2396edbe6cd463.asciidoc +++ b/docs/examples/010d5e901a2690fa7b2396edbe6cd463.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/common-log-format-example.asciidoc:161 +// ingest/common-log-format-example.asciidoc:162 [source, python] ---- diff --git a/docs/examples/015e6e6132b6d6d44bddb06bc3b316ed.asciidoc b/docs/examples/015e6e6132b6d6d44bddb06bc3b316ed.asciidoc index 1930760fd..2a6a12a51 100644 --- a/docs/examples/015e6e6132b6d6d44bddb06bc3b316ed.asciidoc +++ b/docs/examples/015e6e6132b6d6d44bddb06bc3b316ed.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-examples.asciidoc:1051 +// search/search-your-data/retrievers-examples.asciidoc:1322 [source, python] ---- diff --git a/docs/examples/0165d22da5f2fc7678392b31d8eb5566.asciidoc b/docs/examples/0165d22da5f2fc7678392b31d8eb5566.asciidoc index b716f8df7..d2e7ad7ff 100644 --- a/docs/examples/0165d22da5f2fc7678392b31d8eb5566.asciidoc +++ b/docs/examples/0165d22da5f2fc7678392b31d8eb5566.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-examples.asciidoc:1363 +// search/search-your-data/retrievers-examples.asciidoc:1655 [source, python] ---- diff --git a/docs/examples/01ae196538fac197eedbbf458a4ef31b.asciidoc b/docs/examples/01ae196538fac197eedbbf458a4ef31b.asciidoc index e087a80ec..cf35df41e 100644 --- a/docs/examples/01ae196538fac197eedbbf458a4ef31b.asciidoc +++ b/docs/examples/01ae196538fac197eedbbf458a4ef31b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/keyword.asciidoc:260 +// mapping/types/keyword.asciidoc:254 [source, python] ---- diff --git a/docs/examples/01b23f09d2b7f140faf649eadbbf3ac3.asciidoc b/docs/examples/01b23f09d2b7f140faf649eadbbf3ac3.asciidoc index 7d37ed747..1f3839c3f 100644 --- a/docs/examples/01b23f09d2b7f140faf649eadbbf3ac3.asciidoc +++ b/docs/examples/01b23f09d2b7f140faf649eadbbf3ac3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/index-templates.asciidoc:86 +// indices/index-templates.asciidoc:85 [source, python] ---- diff --git a/docs/examples/01cd0ea360282a2c591a366679d7187d.asciidoc b/docs/examples/01cd0ea360282a2c591a366679d7187d.asciidoc index d52222ef9..79f5d0185 100644 --- a/docs/examples/01cd0ea360282a2c591a366679d7187d.asciidoc +++ b/docs/examples/01cd0ea360282a2c591a366679d7187d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/task-queue-backlog.asciidoc:83 +// troubleshooting/common-issues/task-queue-backlog.asciidoc:60 [source, python] ---- diff --git a/docs/examples/0246f73cc2ed3dfec577119e8cd15404.asciidoc b/docs/examples/0246f73cc2ed3dfec577119e8cd15404.asciidoc index 670c9daa3..22265deef 100644 --- a/docs/examples/0246f73cc2ed3dfec577119e8cd15404.asciidoc +++ b/docs/examples/0246f73cc2ed3dfec577119e8cd15404.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:183 +// indices/put-mapping.asciidoc:189 [source, python] ---- diff --git a/docs/examples/02b6aa3e5652839f03de3a655854b897.asciidoc b/docs/examples/02b6aa3e5652839f03de3a655854b897.asciidoc index bc1939367..99fe5374a 100644 --- a/docs/examples/02b6aa3e5652839f03de3a655854b897.asciidoc +++ b/docs/examples/02b6aa3e5652839f03de3a655854b897.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/downsampling-manual.asciidoc:466 +// data-streams/downsampling-manual.asciidoc:469 [source, python] ---- diff --git a/docs/examples/0350410d11579f4e876c798ce1eaef5b.asciidoc b/docs/examples/0350410d11579f4e876c798ce1eaef5b.asciidoc index 41f174407..3aad0b601 100644 --- a/docs/examples/0350410d11579f4e876c798ce1eaef5b.asciidoc +++ b/docs/examples/0350410d11579f4e876c798ce1eaef5b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/percolate-query.asciidoc:565 +// query-dsl/percolate-query.asciidoc:563 [source, python] ---- diff --git a/docs/examples/03891265df2111a38e0b6b24c1b967e1.asciidoc b/docs/examples/03891265df2111a38e0b6b24c1b967e1.asciidoc index cf65de095..b3429bbde 100644 --- a/docs/examples/03891265df2111a38e0b6b24c1b967e1.asciidoc +++ b/docs/examples/03891265df2111a38e0b6b24c1b967e1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/security/get-service-accounts.asciidoc:320 +// rest-api/security/get-service-accounts.asciidoc:288 [source, python] ---- diff --git a/docs/examples/0737ebaea33631f001fb3f4226948492.asciidoc b/docs/examples/0737ebaea33631f001fb3f4226948492.asciidoc index 8b47a2a5b..d4a082a27 100644 --- a/docs/examples/0737ebaea33631f001fb3f4226948492.asciidoc +++ b/docs/examples/0737ebaea33631f001fb3f4226948492.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/processors/geoip.asciidoc:237 +// ingest/processors/geoip.asciidoc:238 [source, python] ---- diff --git a/docs/examples/074e4602d1ca54412380a40867d078bc.asciidoc b/docs/examples/074e4602d1ca54412380a40867d078bc.asciidoc index 8eab49b0b..0714cd63d 100644 --- a/docs/examples/074e4602d1ca54412380a40867d078bc.asciidoc +++ b/docs/examples/074e4602d1ca54412380a40867d078bc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// index-modules/slowlog.asciidoc:180 +// index-modules/slowlog.asciidoc:181 [source, python] ---- diff --git a/docs/examples/07dadb9b0a774bd8e7f3527cf8a44afc.asciidoc b/docs/examples/07dadb9b0a774bd8e7f3527cf8a44afc.asciidoc index a2827083f..1994efccb 100644 --- a/docs/examples/07dadb9b0a774bd8e7f3527cf8a44afc.asciidoc +++ b/docs/examples/07dadb9b0a774bd8e7f3527cf8a44afc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/semantic-query.asciidoc:17 +// query-dsl/semantic-query.asciidoc:15 [source, python] ---- diff --git a/docs/examples/083b92e8ea264e49bf9fd40fc6a3094b.asciidoc b/docs/examples/083b92e8ea264e49bf9fd40fc6a3094b.asciidoc index 6c94b6da2..ee247349f 100644 --- a/docs/examples/083b92e8ea264e49bf9fd40fc6a3094b.asciidoc +++ b/docs/examples/083b92e8ea264e49bf9fd40fc6a3094b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-elasticsearch.asciidoc:264 +// inference/service-elasticsearch.asciidoc:259 [source, python] ---- diff --git a/docs/examples/0a650401134f07e40216f0d0d1a66a32.asciidoc b/docs/examples/0a650401134f07e40216f0d0d1a66a32.asciidoc index 7dfc1b717..79ea752ef 100644 --- a/docs/examples/0a650401134f07e40216f0d0d1a66a32.asciidoc +++ b/docs/examples/0a650401134f07e40216f0d0d1a66a32.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/allocation.asciidoc:126 +// cat/allocation.asciidoc:125 [source, python] ---- diff --git a/docs/examples/0bc6155e0c88062a4d8490da49db3aa8.asciidoc b/docs/examples/0bc6155e0c88062a4d8490da49db3aa8.asciidoc index 77db1349e..ddef8a6b5 100644 --- a/docs/examples/0bc6155e0c88062a4d8490da49db3aa8.asciidoc +++ b/docs/examples/0bc6155e0c88062a4d8490da49db3aa8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-examples.asciidoc:812 +// search/search-your-data/retrievers-examples.asciidoc:1083 [source, python] ---- diff --git a/docs/examples/0bee07a581c5776e068f6f4efad5a399.asciidoc b/docs/examples/0bee07a581c5776e068f6f4efad5a399.asciidoc index 5ef14fcc9..0b33c28da 100644 --- a/docs/examples/0bee07a581c5776e068f6f4efad5a399.asciidoc +++ b/docs/examples/0bee07a581c5776e068f6f4efad5a399.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-across-clusters.asciidoc:194 +// esql/esql-across-clusters.asciidoc:195 [source, python] ---- diff --git a/docs/examples/0c2d9ac7e3f28d4d802e21cbbbcfeb34.asciidoc b/docs/examples/0c2d9ac7e3f28d4d802e21cbbbcfeb34.asciidoc index 1ccb32524..13c62b64c 100644 --- a/docs/examples/0c2d9ac7e3f28d4d802e21cbbbcfeb34.asciidoc +++ b/docs/examples/0c2d9ac7e3f28d4d802e21cbbbcfeb34.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/recovery.asciidoc:118 +// cat/recovery.asciidoc:214 [source, python] ---- diff --git a/docs/examples/0d689ac6e78be5d438f9b5d441be2b44.asciidoc b/docs/examples/0d689ac6e78be5d438f9b5d441be2b44.asciidoc index 89f2a7eeb..00c710d69 100644 --- a/docs/examples/0d689ac6e78be5d438f9b5d441be2b44.asciidoc +++ b/docs/examples/0d689ac6e78be5d438f9b5d441be2b44.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-examples.asciidoc:1191 +// search/search-your-data/retrievers-examples.asciidoc:1462 [source, python] ---- diff --git a/docs/examples/0dfa9733c94bc43c6f14c7b6984c98fb.asciidoc b/docs/examples/0dfa9733c94bc43c6f14c7b6984c98fb.asciidoc index 1501cad9e..eaf7b69ae 100644 --- a/docs/examples/0dfa9733c94bc43c6f14c7b6984c98fb.asciidoc +++ b/docs/examples/0dfa9733c94bc43c6f14c7b6984c98fb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/component-templates.asciidoc:113 +// cat/component-templates.asciidoc:112 [source, python] ---- diff --git a/docs/examples/0e31b8ad176b31028becf9500989bcbd.asciidoc b/docs/examples/0e31b8ad176b31028becf9500989bcbd.asciidoc index fed70ff60..3f84ecc3e 100644 --- a/docs/examples/0e31b8ad176b31028becf9500989bcbd.asciidoc +++ b/docs/examples/0e31b8ad176b31028becf9500989bcbd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-watsonx-ai.asciidoc:102 +// inference/service-watsonx-ai.asciidoc:123 [source, python] ---- diff --git a/docs/examples/0e71a18d1aac61720cdc6b3f91fe643f.asciidoc b/docs/examples/0e71a18d1aac61720cdc6b3f91fe643f.asciidoc index 51907cae9..d5adcaa23 100644 --- a/docs/examples/0e71a18d1aac61720cdc6b3f91fe643f.asciidoc +++ b/docs/examples/0e71a18d1aac61720cdc6b3f91fe643f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/simple-query-string-query.asciidoc:153 +// query-dsl/simple-query-string-query.asciidoc:156 [source, python] ---- diff --git a/docs/examples/0f028f71f04c1d569fab402869565a84.asciidoc b/docs/examples/0f028f71f04c1d569fab402869565a84.asciidoc deleted file mode 100644 index 295df000b..000000000 --- a/docs/examples/0f028f71f04c1d569fab402869565a84.asciidoc +++ /dev/null @@ -1,15 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// migration/migrate_9_0.asciidoc:476 - -[source, python] ----- -resp = client.indices.put_settings( - index=".reindexed-v9-ml-anomalies-custom-example", - settings={ - "index": { - "number_of_replicas": "" - } - }, -) -print(resp) ----- diff --git a/docs/examples/0f7aa40ad26d59a9268630b980a3d594.asciidoc b/docs/examples/0f7aa40ad26d59a9268630b980a3d594.asciidoc index d76d2022a..a96668bfe 100644 --- a/docs/examples/0f7aa40ad26d59a9268630b980a3d594.asciidoc +++ b/docs/examples/0f7aa40ad26d59a9268630b980a3d594.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/simulate-template.asciidoc:61 +// indices/simulate-template.asciidoc:60 [source, python] ---- diff --git a/docs/examples/0fc4b589df5388da784c6d981e769e31.asciidoc b/docs/examples/0fc4b589df5388da784c6d981e769e31.asciidoc index e499f3289..6c8b86ea6 100644 --- a/docs/examples/0fc4b589df5388da784c6d981e769e31.asciidoc +++ b/docs/examples/0fc4b589df5388da784c6d981e769e31.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-index-template-v1.asciidoc:155 +// indices/put-index-template-v1.asciidoc:154 [source, python] ---- diff --git a/docs/examples/103296e16b4233926ad1f07360385606.asciidoc b/docs/examples/103296e16b4233926ad1f07360385606.asciidoc index 311b42d6c..8becee40d 100644 --- a/docs/examples/103296e16b4233926ad1f07360385606.asciidoc +++ b/docs/examples/103296e16b4233926ad1f07360385606.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// analysis/analyzers/lang-analyzer.asciidoc:1794 +// analysis/analyzers/lang-analyzer.asciidoc:1793 [source, python] ---- diff --git a/docs/examples/10f0c8fed98455c460c374b50ffbb204.asciidoc b/docs/examples/10f0c8fed98455c460c374b50ffbb204.asciidoc index 5aedd6b3c..7496a2077 100644 --- a/docs/examples/10f0c8fed98455c460c374b50ffbb204.asciidoc +++ b/docs/examples/10f0c8fed98455c460c374b50ffbb204.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc:301 +// data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc:317 [source, python] ---- diff --git a/docs/examples/12433d2b637d002e8d5c9a1adce69d3b.asciidoc b/docs/examples/12433d2b637d002e8d5c9a1adce69d3b.asciidoc index 3d588dde2..f978c0bfa 100644 --- a/docs/examples/12433d2b637d002e8d5c9a1adce69d3b.asciidoc +++ b/docs/examples/12433d2b637d002e8d5c9a1adce69d3b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:106 +// indices/put-mapping.asciidoc:112 [source, python] ---- diff --git a/docs/examples/12adea5d76f73d94d80d42f53f67563f.asciidoc b/docs/examples/12adea5d76f73d94d80d42f53f67563f.asciidoc deleted file mode 100644 index 18270ad1c..000000000 --- a/docs/examples/12adea5d76f73d94d80d42f53f67563f.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// migration/migrate_9_0.asciidoc:393 - -[source, python] ----- -resp = client.indices.add_block( - index=".ml-anomalies-custom-example", - block="read_only", -) -print(resp) ----- diff --git a/docs/examples/13d90ba227131aefbf4fcfd5992e662a.asciidoc b/docs/examples/13d90ba227131aefbf4fcfd5992e662a.asciidoc index fc457fe4e..1d7ddd0d8 100644 --- a/docs/examples/13d90ba227131aefbf4fcfd5992e662a.asciidoc +++ b/docs/examples/13d90ba227131aefbf4fcfd5992e662a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/bool-query.asciidoc:159 +// query-dsl/bool-query.asciidoc:202 [source, python] ---- diff --git a/docs/examples/148edc235fcfbc263561f87f5533e688.asciidoc b/docs/examples/148edc235fcfbc263561f87f5533e688.asciidoc index 8b97c4933..9ed85a21a 100644 --- a/docs/examples/148edc235fcfbc263561f87f5533e688.asciidoc +++ b/docs/examples/148edc235fcfbc263561f87f5533e688.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/percolate-query.asciidoc:196 +// query-dsl/percolate-query.asciidoc:194 [source, python] ---- diff --git a/docs/examples/151d2b11807ec684b0c01aa89189a801.asciidoc b/docs/examples/151d2b11807ec684b0c01aa89189a801.asciidoc index b0926a099..ce49b8d45 100644 --- a/docs/examples/151d2b11807ec684b0c01aa89189a801.asciidoc +++ b/docs/examples/151d2b11807ec684b0c01aa89189a801.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/query-string-query.asciidoc:474 +// query-dsl/query-string-query.asciidoc:477 [source, python] ---- diff --git a/docs/examples/162b5b693b713f0bfab1209d59443c46.asciidoc b/docs/examples/162b5b693b713f0bfab1209d59443c46.asciidoc index 044c538d9..83ec7dd0b 100644 --- a/docs/examples/162b5b693b713f0bfab1209d59443c46.asciidoc +++ b/docs/examples/162b5b693b713f0bfab1209d59443c46.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/bool-query.asciidoc:133 +// query-dsl/bool-query.asciidoc:176 [source, python] ---- diff --git a/docs/examples/17b1647c8509543f2388c886f2584a20.asciidoc b/docs/examples/17b1647c8509543f2388c886f2584a20.asciidoc index bd99a92db..2e2f73e5c 100644 --- a/docs/examples/17b1647c8509543f2388c886f2584a20.asciidoc +++ b/docs/examples/17b1647c8509543f2388c886f2584a20.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// reranking/semantic-reranking.asciidoc:107 +// reranking/semantic-reranking.asciidoc:105 [source, python] ---- diff --git a/docs/examples/187733e50c60350f3f75921bea3b72c2.asciidoc b/docs/examples/187733e50c60350f3f75921bea3b72c2.asciidoc index 718144564..4d54d2a58 100644 --- a/docs/examples/187733e50c60350f3f75921bea3b72c2.asciidoc +++ b/docs/examples/187733e50c60350f3f75921bea3b72c2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/paginate-search-results.asciidoc:615 +// search/search-your-data/paginate-search-results.asciidoc:630 [source, python] ---- diff --git a/docs/examples/18de6782bd18f4a9baec2feec8c02a8b.asciidoc b/docs/examples/18de6782bd18f4a9baec2feec8c02a8b.asciidoc new file mode 100644 index 000000000..999873e69 --- /dev/null +++ b/docs/examples/18de6782bd18f4a9baec2feec8c02a8b.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// migration/migrate_8_0/migrate_to_java_time.asciidoc:268 + +[source, python] +---- +resp = client.indices.create( + index="my-index-000002", + mappings={ + "properties": { + "datetime": { + "type": "date", + "format": "uuuu/MM/dd HH:mm:ss||uuuu/MM/dd||epoch_millis" + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/1a7483796087053ba55029d0dc2ab356.asciidoc b/docs/examples/1a7483796087053ba55029d0dc2ab356.asciidoc index c48bf98b9..6f2df178e 100644 --- a/docs/examples/1a7483796087053ba55029d0dc2ab356.asciidoc +++ b/docs/examples/1a7483796087053ba55029d0dc2ab356.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/multivalued-fields.asciidoc:191 +// esql/multivalued-fields.asciidoc:199 [source, python] ---- diff --git a/docs/examples/1a9efb56adb2cd84faa9825a129381b9.asciidoc b/docs/examples/1a9efb56adb2cd84faa9825a129381b9.asciidoc index e0d883c89..870811976 100644 --- a/docs/examples/1a9efb56adb2cd84faa9825a129381b9.asciidoc +++ b/docs/examples/1a9efb56adb2cd84faa9825a129381b9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/rollup-search.asciidoc:222 +// rollup/apis/rollup-search.asciidoc:219 [source, python] ---- diff --git a/docs/examples/1aa91d3d48140d6367b6cabca8737b8f.asciidoc b/docs/examples/1aa91d3d48140d6367b6cabca8737b8f.asciidoc index 17dc48f5e..05c6b61e9 100644 --- a/docs/examples/1aa91d3d48140d6367b6cabca8737b8f.asciidoc +++ b/docs/examples/1aa91d3d48140d6367b6cabca8737b8f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/bulk.asciidoc:642 +// docs/bulk.asciidoc:638 [source, python] ---- diff --git a/docs/examples/f95a4d7ab02bf400246c8822f0245f02.asciidoc b/docs/examples/1af9742c71ce0587cd49a73ec7fc1f6c.asciidoc similarity index 92% rename from docs/examples/f95a4d7ab02bf400246c8822f0245f02.asciidoc rename to docs/examples/1af9742c71ce0587cd49a73ec7fc1f6c.asciidoc index 0d7391c3e..413fb5602 100644 --- a/docs/examples/f95a4d7ab02bf400246c8822f0245f02.asciidoc +++ b/docs/examples/1af9742c71ce0587cd49a73ec7fc1f6c.asciidoc @@ -5,7 +5,7 @@ ---- resp = client.cat.ml_trained_models( h="c,o,l,ct,v", - v=True, + v="ture", ) print(resp) ---- diff --git a/docs/examples/1ba7afe23a26fe9ac7856d8c5bc1059d.asciidoc b/docs/examples/1ba7afe23a26fe9ac7856d8c5bc1059d.asciidoc index 67df96154..dc02df84d 100644 --- a/docs/examples/1ba7afe23a26fe9ac7856d8c5bc1059d.asciidoc +++ b/docs/examples/1ba7afe23a26fe9ac7856d8c5bc1059d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// analysis/analyzers/lang-analyzer.asciidoc:1502 +// analysis/analyzers/lang-analyzer.asciidoc:1501 [source, python] ---- diff --git a/docs/examples/ef10e8d07d9fae945e035d5dee1e9754.asciidoc b/docs/examples/1cbaa43d1c741ddd8a13e21791709527.asciidoc similarity index 72% rename from docs/examples/ef10e8d07d9fae945e035d5dee1e9754.asciidoc rename to docs/examples/1cbaa43d1c741ddd8a13e21791709527.asciidoc index fd86870f5..f50b0b860 100644 --- a/docs/examples/ef10e8d07d9fae945e035d5dee1e9754.asciidoc +++ b/docs/examples/1cbaa43d1c741ddd8a13e21791709527.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// analysis/tokenfilters/flatten-graph-tokenfilter.asciidoc:118 +// analysis/tokenfilters/flatten-graph-tokenfilter.asciidoc:126 [source, python] ---- @@ -9,7 +9,7 @@ resp = client.indices.analyze( { "type": "synonym_graph", "synonyms": [ - "dns, domain name system" + "internet phonebook, domain name system" ] }, "flatten_graph" diff --git a/docs/examples/1d9b695a17cffd910c496c9b03c75d6f.asciidoc b/docs/examples/1d9b695a17cffd910c496c9b03c75d6f.asciidoc index c9c7bce87..86081dab0 100644 --- a/docs/examples/1d9b695a17cffd910c496c9b03c75d6f.asciidoc +++ b/docs/examples/1d9b695a17cffd910c496c9b03c75d6f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc:34 +// data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc:42 [source, python] ---- diff --git a/docs/examples/1e0b85750d4e63ebbc927d4627c44bf8.asciidoc b/docs/examples/1e0b85750d4e63ebbc927d4627c44bf8.asciidoc index b4d009c90..7083e786e 100644 --- a/docs/examples/1e0b85750d4e63ebbc927d4627c44bf8.asciidoc +++ b/docs/examples/1e0b85750d4e63ebbc927d4627c44bf8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// how-to/size-your-shards.asciidoc:604 +// how-to/size-your-shards.asciidoc:601 [source, python] ---- diff --git a/docs/examples/1e26353d546d733634187b8c3a7837a7.asciidoc b/docs/examples/1e26353d546d733634187b8c3a7837a7.asciidoc index 37cc447f6..0266f4383 100644 --- a/docs/examples/1e26353d546d733634187b8c3a7837a7.asciidoc +++ b/docs/examples/1e26353d546d733634187b8c3a7837a7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/list-connectors-api.asciidoc:110 +// connector/apis/list-connectors-api.asciidoc:107 [source, python] ---- diff --git a/docs/examples/20005d8a6555b259b299d862cd218701.asciidoc b/docs/examples/20005d8a6555b259b299d862cd218701.asciidoc index 0a4219e1e..cb4910e7d 100644 --- a/docs/examples/20005d8a6555b259b299d862cd218701.asciidoc +++ b/docs/examples/20005d8a6555b259b299d862cd218701.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/match-query.asciidoc:190 +// query-dsl/match-query.asciidoc:194 [source, python] ---- diff --git a/docs/examples/216e24f05cbb82c1718713fbab8623d2.asciidoc b/docs/examples/216e24f05cbb82c1718713fbab8623d2.asciidoc index 9ddf0fe44..e158982a7 100644 --- a/docs/examples/216e24f05cbb82c1718713fbab8623d2.asciidoc +++ b/docs/examples/216e24f05cbb82c1718713fbab8623d2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/processors/geoip.asciidoc:136 +// ingest/processors/geoip.asciidoc:137 [source, python] ---- diff --git a/docs/examples/2171361cd8e70dcde91bdb9bbd8702f1.asciidoc b/docs/examples/2171361cd8e70dcde91bdb9bbd8702f1.asciidoc new file mode 100644 index 000000000..f18bb87d5 --- /dev/null +++ b/docs/examples/2171361cd8e70dcde91bdb9bbd8702f1.asciidoc @@ -0,0 +1,30 @@ +// This file is autogenerated, DO NOT EDIT +// esql/esql-lookup-join.asciidoc:142 + +[source, python] +---- +resp = client.bulk( + index="threat_list", + operations=[ + { + "index": {} + }, + { + "source.ip": "203.0.113.5", + "threat_level": "high", + "threat_type": "C2_SERVER", + "last_updated": "2025-04-22" + }, + { + "index": {} + }, + { + "source.ip": "198.51.100.2", + "threat_level": "medium", + "threat_type": "SCANNER", + "last_updated": "2025-04-23" + } + ], +) +print(resp) +---- diff --git a/docs/examples/21d41e8cbd107fbdf0901f885834dafc.asciidoc b/docs/examples/21d41e8cbd107fbdf0901f885834dafc.asciidoc index 4ce3834f3..dfd21919b 100644 --- a/docs/examples/21d41e8cbd107fbdf0901f885834dafc.asciidoc +++ b/docs/examples/21d41e8cbd107fbdf0901f885834dafc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/wildcard.asciidoc:139 +// mapping/types/wildcard.asciidoc:140 [source, python] ---- diff --git a/docs/examples/222e49c924ca8bac7b41bc952a39261c.asciidoc b/docs/examples/222e49c924ca8bac7b41bc952a39261c.asciidoc index faebb1e6e..f7ee79fc2 100644 --- a/docs/examples/222e49c924ca8bac7b41bc952a39261c.asciidoc +++ b/docs/examples/222e49c924ca8bac7b41bc952a39261c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/semantic-query.asciidoc:55 +// query-dsl/semantic-query.asciidoc:53 [source, python] ---- diff --git a/docs/examples/246763219ec06172f7aa57bba28d344a.asciidoc b/docs/examples/246763219ec06172f7aa57bba28d344a.asciidoc index 5a340dc07..8b1b1a996 100644 --- a/docs/examples/246763219ec06172f7aa57bba28d344a.asciidoc +++ b/docs/examples/246763219ec06172f7aa57bba28d344a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/rank-vectors.asciidoc:159 +// mapping/types/rank-vectors.asciidoc:152 [source, python] ---- diff --git a/docs/examples/24f4dfdf9922d5aa79151675b7767742.asciidoc b/docs/examples/24f4dfdf9922d5aa79151675b7767742.asciidoc index 454f46a64..2a298c3d4 100644 --- a/docs/examples/24f4dfdf9922d5aa79151675b7767742.asciidoc +++ b/docs/examples/24f4dfdf9922d5aa79151675b7767742.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/paginate-search-results.asciidoc:385 +// search/search-your-data/paginate-search-results.asciidoc:400 [source, python] ---- diff --git a/docs/examples/25576b6773322f0929d4c635a940dba0.asciidoc b/docs/examples/25576b6773322f0929d4c635a940dba0.asciidoc index 56e0c8101..b1c18d64a 100644 --- a/docs/examples/25576b6773322f0929d4c635a940dba0.asciidoc +++ b/docs/examples/25576b6773322f0929d4c635a940dba0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/query-string-query.asciidoc:530 +// query-dsl/query-string-query.asciidoc:533 [source, python] ---- diff --git a/docs/examples/270549e6b062228312c4e7a54a2c2209.asciidoc b/docs/examples/270549e6b062228312c4e7a54a2c2209.asciidoc index f2fbab21c..9ae9ebbce 100644 --- a/docs/examples/270549e6b062228312c4e7a54a2c2209.asciidoc +++ b/docs/examples/270549e6b062228312c4e7a54a2c2209.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/task-queue-backlog.asciidoc:55 +// troubleshooting/common-issues/task-queue-backlog.asciidoc:39 [source, python] ---- diff --git a/docs/examples/272e27bf1fcc4fe5dbd4092679dd0342.asciidoc b/docs/examples/272e27bf1fcc4fe5dbd4092679dd0342.asciidoc deleted file mode 100644 index f7955285c..000000000 --- a/docs/examples/272e27bf1fcc4fe5dbd4092679dd0342.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// migration/migrate_9_0.asciidoc:604 - -[source, python] ----- -resp = client.indices.add_block( - index=".ml-anomalies-custom-example", - block="write", -) -print(resp) ----- diff --git a/docs/examples/28543836b62b5622a402e6f7731d68f0.asciidoc b/docs/examples/28543836b62b5622a402e6f7731d68f0.asciidoc index ef9cd5175..b72a520b3 100644 --- a/docs/examples/28543836b62b5622a402e6f7731d68f0.asciidoc +++ b/docs/examples/28543836b62b5622a402e6f7731d68f0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/downsampling-manual.asciidoc:421 +// data-streams/downsampling-manual.asciidoc:424 [source, python] ---- diff --git a/docs/examples/2864a24608b3ac59d21f604f8a31d131.asciidoc b/docs/examples/2864a24608b3ac59d21f604f8a31d131.asciidoc index 98fb98017..2f3d3401f 100644 --- a/docs/examples/2864a24608b3ac59d21f604f8a31d131.asciidoc +++ b/docs/examples/2864a24608b3ac59d21f604f8a31d131.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// security/authentication/jwt-realm.asciidoc:504 +// security/authentication/jwt-realm.asciidoc:514 [source, python] ---- diff --git a/docs/examples/2a21674c40f9b182a8944769d20b2357.asciidoc b/docs/examples/2a21674c40f9b182a8944769d20b2357.asciidoc index a9b622970..80622a16c 100644 --- a/docs/examples/2a21674c40f9b182a8944769d20b2357.asciidoc +++ b/docs/examples/2a21674c40f9b182a8944769d20b2357.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/rank-vectors.asciidoc:137 +// mapping/types/rank-vectors.asciidoc:130 [source, python] ---- diff --git a/docs/examples/2a71e2d7f7179dd76183d30789046808.asciidoc b/docs/examples/2a71e2d7f7179dd76183d30789046808.asciidoc index 3e5c29b2d..08f15d6cd 100644 --- a/docs/examples/2a71e2d7f7179dd76183d30789046808.asciidoc +++ b/docs/examples/2a71e2d7f7179dd76183d30789046808.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/multivalued-fields.asciidoc:224 +// esql/multivalued-fields.asciidoc:234 [source, python] ---- diff --git a/docs/examples/2afd49985950cbcccf727fa858d00067.asciidoc b/docs/examples/2afd49985950cbcccf727fa858d00067.asciidoc index 19ceadeaa..5ec0e9037 100644 --- a/docs/examples/2afd49985950cbcccf727fa858d00067.asciidoc +++ b/docs/examples/2afd49985950cbcccf727fa858d00067.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/semantic-text.asciidoc:159 +// mapping/types/semantic-text.asciidoc:258 [source, python] ---- diff --git a/docs/examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc b/docs/examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc index 2a307f890..168651f02 100644 --- a/docs/examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc +++ b/docs/examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-rest.asciidoc:384 +// esql/esql-rest.asciidoc:387 [source, python] ---- diff --git a/docs/examples/2b7687e3d7c06824950e00618c297864.asciidoc b/docs/examples/2b7687e3d7c06824950e00618c297864.asciidoc index 6c9103348..dc642b896 100644 --- a/docs/examples/2b7687e3d7c06824950e00618c297864.asciidoc +++ b/docs/examples/2b7687e3d7c06824950e00618c297864.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/resolve-cluster.asciidoc:205 +// indices/resolve-cluster.asciidoc:209 [source, python] ---- diff --git a/docs/examples/2bb41b0b4876ce98cd0cd8fb6d337f18.asciidoc b/docs/examples/2bb41b0b4876ce98cd0cd8fb6d337f18.asciidoc new file mode 100644 index 000000000..64dfcb7b5 --- /dev/null +++ b/docs/examples/2bb41b0b4876ce98cd0cd8fb6d337f18.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// migration/transient-settings-migration-guide.asciidoc:64 + +[source, python] +---- +resp = client.cluster.put_settings( + persistent={ + "cluster.indices.close.enable": False, + "indices.recovery.max_bytes_per_sec": "50mb" + }, + transient={ + "*": None + }, +) +print(resp) +---- diff --git a/docs/examples/2c23285eb9f52010ecb1d5cab640ff78.asciidoc b/docs/examples/2c23285eb9f52010ecb1d5cab640ff78.asciidoc new file mode 100644 index 000000000..db2012b10 --- /dev/null +++ b/docs/examples/2c23285eb9f52010ecb1d5cab640ff78.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// esql/esql-lookup-join.asciidoc:103 + +[source, python] +---- +resp = client.indices.create( + index="threat_list", + settings={ + "index.mode": "lookup" + }, + mappings={ + "properties": { + "source.ip": { + "type": "ip" + }, + "threat_level": { + "type": "keyword" + }, + "threat_type": { + "type": "keyword" + }, + "last_updated": { + "type": "date" + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/2c3207c0c985d253b2ecccc14e69e25a.asciidoc b/docs/examples/2c3207c0c985d253b2ecccc14e69e25a.asciidoc index 8822e4d52..4946350ea 100644 --- a/docs/examples/2c3207c0c985d253b2ecccc14e69e25a.asciidoc +++ b/docs/examples/2c3207c0c985d253b2ecccc14e69e25a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/downsampling-manual.asciidoc:412 +// data-streams/downsampling-manual.asciidoc:415 [source, python] ---- diff --git a/docs/examples/9aedc45f83e022732789e8d796f5a43c.asciidoc b/docs/examples/2cd8439db5054c93c49f1bf50433e1bb.asciidoc similarity index 96% rename from docs/examples/9aedc45f83e022732789e8d796f5a43c.asciidoc rename to docs/examples/2cd8439db5054c93c49f1bf50433e1bb.asciidoc index 0636b0a05..47c71c482 100644 --- a/docs/examples/9aedc45f83e022732789e8d796f5a43c.asciidoc +++ b/docs/examples/2cd8439db5054c93c49f1bf50433e1bb.asciidoc @@ -4,6 +4,7 @@ [source, python] ---- resp = client.cluster.reroute( + metric="none", commands=[ { "move": { diff --git a/docs/examples/2d0244c020075595acb625aa5ba8f455.asciidoc b/docs/examples/2d0244c020075595acb625aa5ba8f455.asciidoc index 483a812fc..7711e2bc4 100644 --- a/docs/examples/2d0244c020075595acb625aa5ba8f455.asciidoc +++ b/docs/examples/2d0244c020075595acb625aa5ba8f455.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/fields/synthetic-source.asciidoc:253 +// mapping/fields/synthetic-source.asciidoc:255 [source, python] ---- diff --git a/docs/examples/2dad2b0c8ba503228f4b11cecca0b348.asciidoc b/docs/examples/2dad2b0c8ba503228f4b11cecca0b348.asciidoc index b073c0fa0..c251c2bf6 100644 --- a/docs/examples/2dad2b0c8ba503228f4b11cecca0b348.asciidoc +++ b/docs/examples/2dad2b0c8ba503228f4b11cecca0b348.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc:222 +// data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc:234 [source, python] ---- diff --git a/docs/examples/2ee239df3243c98418f7d9a5c7be4cfd.asciidoc b/docs/examples/2ee239df3243c98418f7d9a5c7be4cfd.asciidoc index d916e38fa..27f78f221 100644 --- a/docs/examples/2ee239df3243c98418f7d9a5c7be4cfd.asciidoc +++ b/docs/examples/2ee239df3243c98418f7d9a5c7be4cfd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// analysis/tokenfilters/flatten-graph-tokenfilter.asciidoc:203 +// analysis/tokenfilters/flatten-graph-tokenfilter.asciidoc:218 [source, python] ---- diff --git a/docs/examples/2f2fd35905feef0b561c05d70c7064c1.asciidoc b/docs/examples/2f2fd35905feef0b561c05d70c7064c1.asciidoc index d7bf20ff3..98ad01d8c 100644 --- a/docs/examples/2f2fd35905feef0b561c05d70c7064c1.asciidoc +++ b/docs/examples/2f2fd35905feef0b561c05d70c7064c1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/dynamic/templates.asciidoc:570 +// migration/migrate_8_0/migrate_to_java_time.asciidoc:239 [source, python] ---- diff --git a/docs/examples/2f67db5e4d6c958258c3d70fb2d0b1c8.asciidoc b/docs/examples/2f67db5e4d6c958258c3d70fb2d0b1c8.asciidoc new file mode 100644 index 000000000..00612b5df --- /dev/null +++ b/docs/examples/2f67db5e4d6c958258c3d70fb2d0b1c8.asciidoc @@ -0,0 +1,13 @@ +// This file is autogenerated, DO NOT EDIT +// migration/migrate_8_0/index-setting-changes.asciidoc:48 + +[source, python] +---- +resp = client.indices.put_settings( + index="my-index-000001", + settings={ + "index.merge.policy.max_merge_at_once_explicit": None + }, +) +print(resp) +---- diff --git a/docs/examples/2f72a63c73dd672ac2dc3997ad15dd41.asciidoc b/docs/examples/2f72a63c73dd672ac2dc3997ad15dd41.asciidoc index a5cb08edb..d2eae342c 100644 --- a/docs/examples/2f72a63c73dd672ac2dc3997ad15dd41.asciidoc +++ b/docs/examples/2f72a63c73dd672ac2dc3997ad15dd41.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/semantic-text.asciidoc:242 +// mapping/types/semantic-text.asciidoc:363 [source, python] ---- diff --git a/docs/examples/2f9574fee2ebecd6f7d917ee99b26bcc.asciidoc b/docs/examples/2f9574fee2ebecd6f7d917ee99b26bcc.asciidoc index 1017daeec..2a3abce15 100644 --- a/docs/examples/2f9574fee2ebecd6f7d917ee99b26bcc.asciidoc +++ b/docs/examples/2f9574fee2ebecd6f7d917ee99b26bcc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/params/doc-values.asciidoc:65 +// mapping/params/doc-values.asciidoc:73 [source, python] ---- diff --git a/docs/examples/2f98924c3d593ea2b60edb9cef5bee22.asciidoc b/docs/examples/2f98924c3d593ea2b60edb9cef5bee22.asciidoc index e811461d4..6b434ce62 100644 --- a/docs/examples/2f98924c3d593ea2b60edb9cef5bee22.asciidoc +++ b/docs/examples/2f98924c3d593ea2b60edb9cef5bee22.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// how-to/size-your-shards.asciidoc:484 +// how-to/size-your-shards.asciidoc:483 [source, python] ---- diff --git a/docs/examples/2fea3e324939cc7e9c396964aeee7111.asciidoc b/docs/examples/2fea3e324939cc7e9c396964aeee7111.asciidoc index aee1dc023..aad8d9d23 100644 --- a/docs/examples/2fea3e324939cc7e9c396964aeee7111.asciidoc +++ b/docs/examples/2fea3e324939cc7e9c396964aeee7111.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/match-query.asciidoc:256 +// query-dsl/match-query.asciidoc:253 [source, python] ---- diff --git a/docs/examples/30d051f534aeb884176eedb2c11dac85.asciidoc b/docs/examples/30d051f534aeb884176eedb2c11dac85.asciidoc index 7d95ecb4e..d3c2b9193 100644 --- a/docs/examples/30d051f534aeb884176eedb2c11dac85.asciidoc +++ b/docs/examples/30d051f534aeb884176eedb2c11dac85.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-elasticsearch.asciidoc:176 +// inference/service-elasticsearch.asciidoc:171 [source, python] ---- diff --git a/docs/examples/30f3e3b9df46afd12e68bc71f18483b4.asciidoc b/docs/examples/30f3e3b9df46afd12e68bc71f18483b4.asciidoc index 594027bb3..332f5b268 100644 --- a/docs/examples/30f3e3b9df46afd12e68bc71f18483b4.asciidoc +++ b/docs/examples/30f3e3b9df46afd12e68bc71f18483b4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:131 +// indices/put-mapping.asciidoc:137 [source, python] ---- diff --git a/docs/examples/3166455372f2d96622caff076e91ebe7.asciidoc b/docs/examples/3166455372f2d96622caff076e91ebe7.asciidoc index bcc74217a..ca7037697 100644 --- a/docs/examples/3166455372f2d96622caff076e91ebe7.asciidoc +++ b/docs/examples/3166455372f2d96622caff076e91ebe7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/percolate-query.asciidoc:308 +// query-dsl/percolate-query.asciidoc:306 [source, python] ---- diff --git a/docs/examples/31bc93e429ad0de11dd2dd231e8f2c5e.asciidoc b/docs/examples/31bc93e429ad0de11dd2dd231e8f2c5e.asciidoc new file mode 100644 index 000000000..85b343892 --- /dev/null +++ b/docs/examples/31bc93e429ad0de11dd2dd231e8f2c5e.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// indices/apis/unfreeze.asciidoc:57 + +[source, python] +---- +resp = client.indices.unfreeze( + index="my-index-000001", +) +print(resp) +---- diff --git a/docs/examples/320645d771e952af2a67bb7445c3688d.asciidoc b/docs/examples/320645d771e952af2a67bb7445c3688d.asciidoc index 4afe0c1b7..f29deb5b7 100644 --- a/docs/examples/320645d771e952af2a67bb7445c3688d.asciidoc +++ b/docs/examples/320645d771e952af2a67bb7445c3688d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// analysis/analyzers/lang-analyzer.asciidoc:1648 +// analysis/analyzers/lang-analyzer.asciidoc:1647 [source, python] ---- diff --git a/docs/examples/327466380bcd55361973b4a96c6dccb2.asciidoc b/docs/examples/327466380bcd55361973b4a96c6dccb2.asciidoc index 4d6c43729..307952a06 100644 --- a/docs/examples/327466380bcd55361973b4a96c6dccb2.asciidoc +++ b/docs/examples/327466380bcd55361973b4a96c6dccb2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// analysis/analyzers/lang-analyzer.asciidoc:1698 +// analysis/analyzers/lang-analyzer.asciidoc:1697 [source, python] ---- diff --git a/docs/examples/3312c82f81816bf76629db9582991812.asciidoc b/docs/examples/3312c82f81816bf76629db9582991812.asciidoc index 15244e685..fc383a8e4 100644 --- a/docs/examples/3312c82f81816bf76629db9582991812.asciidoc +++ b/docs/examples/3312c82f81816bf76629db9582991812.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// index-modules/slowlog.asciidoc:135 +// index-modules/slowlog.asciidoc:136 [source, python] ---- diff --git a/docs/examples/334811cfceb6858aeec5b3461717dd63.asciidoc b/docs/examples/334811cfceb6858aeec5b3461717dd63.asciidoc index cf1347fab..67d05520d 100644 --- a/docs/examples/334811cfceb6858aeec5b3461717dd63.asciidoc +++ b/docs/examples/334811cfceb6858aeec5b3461717dd63.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/processors/geoip.asciidoc:188 +// ingest/processors/geoip.asciidoc:189 [source, python] ---- diff --git a/docs/examples/346f28d82acb5427c304aa574fea0008.asciidoc b/docs/examples/346f28d82acb5427c304aa574fea0008.asciidoc index efd1f5771..a0683551d 100644 --- a/docs/examples/346f28d82acb5427c304aa574fea0008.asciidoc +++ b/docs/examples/346f28d82acb5427c304aa574fea0008.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// analysis/analyzers/lang-analyzer.asciidoc:1847 +// analysis/analyzers/lang-analyzer.asciidoc:1846 [source, python] ---- diff --git a/docs/examples/750ac969f9a05567f5cdf4f93d6244b6.asciidoc b/docs/examples/35b686d9d9e915d0dea7a4251781767d.asciidoc similarity index 95% rename from docs/examples/750ac969f9a05567f5cdf4f93d6244b6.asciidoc rename to docs/examples/35b686d9d9e915d0dea7a4251781767d.asciidoc index 7e47cc7c7..ee73d8b69 100644 --- a/docs/examples/750ac969f9a05567f5cdf4f93d6244b6.asciidoc +++ b/docs/examples/35b686d9d9e915d0dea7a4251781767d.asciidoc @@ -4,6 +4,7 @@ [source, python] ---- resp = client.cluster.reroute( + metric="none", commands=[ { "allocate_empty_primary": { diff --git a/docs/examples/36792c81c053e0555407d1e83e7e054f.asciidoc b/docs/examples/36792c81c053e0555407d1e83e7e054f.asciidoc index 4941727c4..040e9433d 100644 --- a/docs/examples/36792c81c053e0555407d1e83e7e054f.asciidoc +++ b/docs/examples/36792c81c053e0555407d1e83e7e054f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:452 +// search/retriever.asciidoc:486 [source, python] ---- diff --git a/docs/examples/36d229f734adcdab00be266a7ce038b1.asciidoc b/docs/examples/36d229f734adcdab00be266a7ce038b1.asciidoc index b52adb699..a6752687c 100644 --- a/docs/examples/36d229f734adcdab00be266a7ce038b1.asciidoc +++ b/docs/examples/36d229f734adcdab00be266a7ce038b1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/dense-vector.asciidoc:404 +// mapping/types/dense-vector.asciidoc:397 [source, python] ---- diff --git a/docs/examples/3758b8f2ab9f6f28a764ee6c42c85766.asciidoc b/docs/examples/3758b8f2ab9f6f28a764ee6c42c85766.asciidoc index 0e548f378..86088d303 100644 --- a/docs/examples/3758b8f2ab9f6f28a764ee6c42c85766.asciidoc +++ b/docs/examples/3758b8f2ab9f6f28a764ee6c42c85766.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/paginate-search-results.asciidoc:550 +// search/search-your-data/paginate-search-results.asciidoc:565 [source, python] ---- diff --git a/docs/examples/37c73410bf13429279cbc61a413957d8.asciidoc b/docs/examples/37c73410bf13429279cbc61a413957d8.asciidoc index b75a76507..56fc5156b 100644 --- a/docs/examples/37c73410bf13429279cbc61a413957d8.asciidoc +++ b/docs/examples/37c73410bf13429279cbc61a413957d8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// how-to/size-your-shards.asciidoc:558 +// how-to/size-your-shards.asciidoc:557 [source, python] ---- diff --git a/docs/examples/39ce44333d28ed2b833722d3e3cb06f3.asciidoc b/docs/examples/39ce44333d28ed2b833722d3e3cb06f3.asciidoc index 4aa468359..c6dda3af5 100644 --- a/docs/examples/39ce44333d28ed2b833722d3e3cb06f3.asciidoc +++ b/docs/examples/39ce44333d28ed2b833722d3e3cb06f3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/bool-query.asciidoc:187 +// query-dsl/bool-query.asciidoc:230 [source, python] ---- diff --git a/docs/examples/3a204b57072a104d9b50f3a9e064a8f6.asciidoc b/docs/examples/3a204b57072a104d9b50f3a9e064a8f6.asciidoc deleted file mode 100644 index 6ebfa63af..000000000 --- a/docs/examples/3a204b57072a104d9b50f3a9e064a8f6.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// migration/migrate_9_0.asciidoc:620 - -[source, python] ----- -resp = client.search( - index=".ml-anomalies-custom-example", - size=0, - aggs={ - "job_ids": { - "terms": { - "field": "job_id", - "size": 100 - } - } - }, -) -print(resp) ----- diff --git a/docs/examples/3afc6dacf90b42900ab571aad8a61d75.asciidoc b/docs/examples/3afc6dacf90b42900ab571aad8a61d75.asciidoc index db5b89d75..9168398c6 100644 --- a/docs/examples/3afc6dacf90b42900ab571aad8a61d75.asciidoc +++ b/docs/examples/3afc6dacf90b42900ab571aad8a61d75.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// analysis/analyzers/lang-analyzer.asciidoc:1599 +// analysis/analyzers/lang-analyzer.asciidoc:1598 [source, python] ---- diff --git a/docs/examples/3bc4a3681e3ea9cb3de49f72085807d8.asciidoc b/docs/examples/3bc4a3681e3ea9cb3de49f72085807d8.asciidoc index b58b0609d..9849eb5f6 100644 --- a/docs/examples/3bc4a3681e3ea9cb3de49f72085807d8.asciidoc +++ b/docs/examples/3bc4a3681e3ea9cb3de49f72085807d8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-examples.asciidoc:321 +// search/search-your-data/retrievers-examples.asciidoc:326 [source, python] ---- diff --git a/docs/examples/3c7621a81fa982b79f040a6d2611530e.asciidoc b/docs/examples/3c7621a81fa982b79f040a6d2611530e.asciidoc index 71e813344..de860006f 100644 --- a/docs/examples/3c7621a81fa982b79f040a6d2611530e.asciidoc +++ b/docs/examples/3c7621a81fa982b79f040a6d2611530e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/simulate-template.asciidoc:157 +// indices/simulate-template.asciidoc:156 [source, python] ---- diff --git a/docs/examples/3cd93a48906069709b76420c66930c01.asciidoc b/docs/examples/3cd93a48906069709b76420c66930c01.asciidoc index 6c3859dd8..fffdc26dd 100644 --- a/docs/examples/3cd93a48906069709b76420c66930c01.asciidoc +++ b/docs/examples/3cd93a48906069709b76420c66930c01.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// analysis/tokenfilters/stemmer-tokenfilter.asciidoc:264 +// analysis/tokenfilters/stemmer-tokenfilter.asciidoc:265 [source, python] ---- diff --git a/docs/examples/3d924850fb2372fff4739fd145660f88.asciidoc b/docs/examples/3d924850fb2372fff4739fd145660f88.asciidoc new file mode 100644 index 000000000..34cd7292a --- /dev/null +++ b/docs/examples/3d924850fb2372fff4739fd145660f88.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/retrievers-examples.asciidoc:540 + +[source, python] +---- +resp = client.search( + index="retrievers_example", + retriever={ + "rrf": { + "query": "artificial intelligence", + "fields": [ + "text", + "text_semantic" + ] + } + }, + source=False, +) +print(resp) +---- diff --git a/docs/examples/3faf5e2873de340acfe0a617017db784.asciidoc b/docs/examples/3faf5e2873de340acfe0a617017db784.asciidoc index b8b84cc75..5babde107 100644 --- a/docs/examples/3faf5e2873de340acfe0a617017db784.asciidoc +++ b/docs/examples/3faf5e2873de340acfe0a617017db784.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/query-string-query.asciidoc:283 +// query-dsl/query-string-query.asciidoc:286 [source, python] ---- diff --git a/docs/examples/40b73b5c7ca144dc3f63f5b741f33d80.asciidoc b/docs/examples/40b73b5c7ca144dc3f63f5b741f33d80.asciidoc index 0b7604dc6..609755ffd 100644 --- a/docs/examples/40b73b5c7ca144dc3f63f5b741f33d80.asciidoc +++ b/docs/examples/40b73b5c7ca144dc3f63f5b741f33d80.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/percolate-query.asciidoc:157 +// query-dsl/percolate-query.asciidoc:155 [source, python] ---- diff --git a/docs/examples/40bd86e400d27e68b8f0ae580c29d32d.asciidoc b/docs/examples/40bd86e400d27e68b8f0ae580c29d32d.asciidoc index f150be621..2cb413860 100644 --- a/docs/examples/40bd86e400d27e68b8f0ae580c29d32d.asciidoc +++ b/docs/examples/40bd86e400d27e68b8f0ae580c29d32d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// how-to/size-your-shards.asciidoc:279 +// how-to/size-your-shards.asciidoc:278 [source, python] ---- diff --git a/docs/examples/40c3e7bb1fdc125a1ab21bd7d7326694.asciidoc b/docs/examples/40c3e7bb1fdc125a1ab21bd7d7326694.asciidoc index 9a580ba46..af859cd55 100644 --- a/docs/examples/40c3e7bb1fdc125a1ab21bd7d7326694.asciidoc +++ b/docs/examples/40c3e7bb1fdc125a1ab21bd7d7326694.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/multivalued-fields.asciidoc:145 +// esql/multivalued-fields.asciidoc:151 [source, python] ---- diff --git a/docs/examples/40f97f70e8e743c6a6296c81b920aeb0.asciidoc b/docs/examples/40f97f70e8e743c6a6296c81b920aeb0.asciidoc index 06969d9fd..8f4e0432d 100644 --- a/docs/examples/40f97f70e8e743c6a6296c81b920aeb0.asciidoc +++ b/docs/examples/40f97f70e8e743c6a6296c81b920aeb0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// how-to/size-your-shards.asciidoc:314 +// how-to/size-your-shards.asciidoc:313 [source, python] ---- diff --git a/docs/examples/425eaaf9c7e3b1e77a3474fbab4183b4.asciidoc b/docs/examples/425eaaf9c7e3b1e77a3474fbab4183b4.asciidoc index 7b8038083..b6bb4fb2c 100644 --- a/docs/examples/425eaaf9c7e3b1e77a3474fbab4183b4.asciidoc +++ b/docs/examples/425eaaf9c7e3b1e77a3474fbab4183b4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/task-queue-backlog.asciidoc:36 +// troubleshooting/common-issues/task-queue-backlog.asciidoc:25 [source, python] ---- diff --git a/docs/examples/4342ccf6cc24fd80bd3cd1f9a4c2ef8e.asciidoc b/docs/examples/4342ccf6cc24fd80bd3cd1f9a4c2ef8e.asciidoc index ea2dda1ee..e86d02b47 100644 --- a/docs/examples/4342ccf6cc24fd80bd3cd1f9a4c2ef8e.asciidoc +++ b/docs/examples/4342ccf6cc24fd80bd3cd1f9a4c2ef8e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/paginate-search-results.asciidoc:515 +// search/search-your-data/paginate-search-results.asciidoc:530 [source, python] ---- diff --git a/docs/examples/43d9e314431336a6f084cea76dfd6489.asciidoc b/docs/examples/43d9e314431336a6f084cea76dfd6489.asciidoc index c1ec6eb7d..786d31f07 100644 --- a/docs/examples/43d9e314431336a6f084cea76dfd6489.asciidoc +++ b/docs/examples/43d9e314431336a6f084cea76dfd6489.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:254 +// search/retriever.asciidoc:278 [source, python] ---- diff --git a/docs/examples/6cb1dae368c945ecf7c9ec332a5743a2.asciidoc b/docs/examples/45b0b2420c807bcb31fcb051daa7099a.asciidoc similarity index 92% rename from docs/examples/6cb1dae368c945ecf7c9ec332a5743a2.asciidoc rename to docs/examples/45b0b2420c807bcb31fcb051daa7099a.asciidoc index 553747119..4100136c8 100644 --- a/docs/examples/6cb1dae368c945ecf7c9ec332a5743a2.asciidoc +++ b/docs/examples/45b0b2420c807bcb31fcb051daa7099a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/text.asciidoc:180 +// mapping/types/text.asciidoc:235 [source, python] ---- @@ -18,6 +18,7 @@ resp = client.indices.create( "properties": { "text": { "type": "text", + "store": True, "fields": { "raw": { "type": "keyword" diff --git a/docs/examples/46276fbcba7b5e9541dd56ec3f20cf2a.asciidoc b/docs/examples/46276fbcba7b5e9541dd56ec3f20cf2a.asciidoc new file mode 100644 index 000000000..44fe001b8 --- /dev/null +++ b/docs/examples/46276fbcba7b5e9541dd56ec3f20cf2a.asciidoc @@ -0,0 +1,18 @@ +// This file is autogenerated, DO NOT EDIT +// inference/service-voyageai.asciidoc:154 + +[source, python] +---- +resp = client.inference.put( + task_type="text_embedding", + inference_id="voyageai-embeddings", + inference_config={ + "service": "voyageai", + "service_settings": { + "model_id": "voyage-3-large", + "dimensions": 512 + } + }, +) +print(resp) +---- diff --git a/docs/examples/46658f00edc4865dfe472a392374cd0f.asciidoc b/docs/examples/46658f00edc4865dfe472a392374cd0f.asciidoc index 185af87c5..568634b45 100644 --- a/docs/examples/46658f00edc4865dfe472a392374cd0f.asciidoc +++ b/docs/examples/46658f00edc4865dfe472a392374cd0f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-index-template-v1.asciidoc:258 +// indices/put-index-template-v1.asciidoc:257 [source, python] ---- diff --git a/docs/examples/472ec8c57fec8457e31fe6dd7f6e3713.asciidoc b/docs/examples/472ec8c57fec8457e31fe6dd7f6e3713.asciidoc index 09064de43..e73d68276 100644 --- a/docs/examples/472ec8c57fec8457e31fe6dd7f6e3713.asciidoc +++ b/docs/examples/472ec8c57fec8457e31fe6dd7f6e3713.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/query-string-query.asciidoc:448 +// query-dsl/query-string-query.asciidoc:451 [source, python] ---- diff --git a/docs/examples/47909e194d10743093f4a22c27a85925.asciidoc b/docs/examples/47909e194d10743093f4a22c27a85925.asciidoc index 845b3e147..fba225225 100644 --- a/docs/examples/47909e194d10743093f4a22c27a85925.asciidoc +++ b/docs/examples/47909e194d10743093f4a22c27a85925.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/paginate-search-results.asciidoc:198 +// search/search-your-data/paginate-search-results.asciidoc:213 [source, python] ---- diff --git a/docs/examples/488f6df1df71972392b670ce557f7ff3.asciidoc b/docs/examples/488f6df1df71972392b670ce557f7ff3.asciidoc index 6d60ffbe5..f987f3c7d 100644 --- a/docs/examples/488f6df1df71972392b670ce557f7ff3.asciidoc +++ b/docs/examples/488f6df1df71972392b670ce557f7ff3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-index-template-v1.asciidoc:240 +// indices/put-index-template-v1.asciidoc:239 [source, python] ---- diff --git a/docs/examples/49b31e23f8b9667b6a7b2734d55fb6ed.asciidoc b/docs/examples/49b31e23f8b9667b6a7b2734d55fb6ed.asciidoc new file mode 100644 index 000000000..a3fcb5fab --- /dev/null +++ b/docs/examples/49b31e23f8b9667b6a7b2734d55fb6ed.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// search/knn-search.asciidoc:36 + +[source, python] +---- +resp = client.knn_search( + index="my-index", + knn={ + "field": "image_vector", + "query_vector": [ + 0.3, + 0.1, + 1.2 + ], + "k": 10, + "num_candidates": 100 + }, + source=[ + "name", + "file_type" + ], +) +print(resp) +---- diff --git a/docs/examples/49e8773a34fcbf825de38426cff5509c.asciidoc b/docs/examples/49e8773a34fcbf825de38426cff5509c.asciidoc index 4e7fd6c8f..4e6dd4ec4 100644 --- a/docs/examples/49e8773a34fcbf825de38426cff5509c.asciidoc +++ b/docs/examples/49e8773a34fcbf825de38426cff5509c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/profile.asciidoc:1275 +// search/profile.asciidoc:1276 [source, python] ---- diff --git a/docs/examples/643e19c3b6ac1134554dd890e2249c2b.asciidoc b/docs/examples/4ab7c13d27ec8f9e6705a0e77fe53c20.asciidoc similarity index 84% rename from docs/examples/643e19c3b6ac1134554dd890e2249c2b.asciidoc rename to docs/examples/4ab7c13d27ec8f9e6705a0e77fe53c20.asciidoc index c77ffb136..e80b9f805 100644 --- a/docs/examples/643e19c3b6ac1134554dd890e2249c2b.asciidoc +++ b/docs/examples/4ab7c13d27ec8f9e6705a0e77fe53c20.asciidoc @@ -1,12 +1,12 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/logs.asciidoc:20 +// data-streams/logs.asciidoc:22 [source, python] ---- resp = client.indices.put_index_template( name="my-index-template", index_patterns=[ - "logs-*" + "my-datastream-*" ], data_stream={}, template={ diff --git a/docs/examples/4b3a49710fafa35d6d41a8ec12434515.asciidoc b/docs/examples/4b3a49710fafa35d6d41a8ec12434515.asciidoc index 3348a97e0..5ea292e1c 100644 --- a/docs/examples/4b3a49710fafa35d6d41a8ec12434515.asciidoc +++ b/docs/examples/4b3a49710fafa35d6d41a8ec12434515.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/percolate-query.asciidoc:467 +// query-dsl/percolate-query.asciidoc:465 [source, python] ---- diff --git a/docs/examples/4bef98a2dac575a50ee0783c2269f1db.asciidoc b/docs/examples/4bef98a2dac575a50ee0783c2269f1db.asciidoc index 80814dcf0..d2788d077 100644 --- a/docs/examples/4bef98a2dac575a50ee0783c2269f1db.asciidoc +++ b/docs/examples/4bef98a2dac575a50ee0783c2269f1db.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/dense-vector.asciidoc:498 +// mapping/types/dense-vector.asciidoc:491 [source, python] ---- diff --git a/docs/examples/4c174e228b6b74497b73ef2be80de7ad.asciidoc b/docs/examples/4c174e228b6b74497b73ef2be80de7ad.asciidoc index 3dd959ac0..899054e29 100644 --- a/docs/examples/4c174e228b6b74497b73ef2be80de7ad.asciidoc +++ b/docs/examples/4c174e228b6b74497b73ef2be80de7ad.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/trained-models/apis/get-trained-models.asciidoc:1467 +// ml/trained-models/apis/get-trained-models.asciidoc:1466 [source, python] ---- diff --git a/docs/examples/4c3db8987d7b2d3d3df78ff1e71e7ede.asciidoc b/docs/examples/4c3db8987d7b2d3d3df78ff1e71e7ede.asciidoc index 4fb637fd2..b8f216834 100644 --- a/docs/examples/4c3db8987d7b2d3d3df78ff1e71e7ede.asciidoc +++ b/docs/examples/4c3db8987d7b2d3d3df78ff1e71e7ede.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/match-query.asciidoc:22 +// query-dsl/match-query.asciidoc:21 [source, python] ---- diff --git a/docs/examples/4c9350ed09b28f00e297ebe73c3b95a2.asciidoc b/docs/examples/4c9350ed09b28f00e297ebe73c3b95a2.asciidoc index 85dd4b15b..6d0c9c9ed 100644 --- a/docs/examples/4c9350ed09b28f00e297ebe73c3b95a2.asciidoc +++ b/docs/examples/4c9350ed09b28f00e297ebe73c3b95a2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-elasticsearch.asciidoc:236 +// inference/service-elasticsearch.asciidoc:231 [source, python] ---- diff --git a/docs/examples/4c95d54b32df4dc49e9762b6c1ae2c05.asciidoc b/docs/examples/4c95d54b32df4dc49e9762b6c1ae2c05.asciidoc index da1513b91..37e7f3fb3 100644 --- a/docs/examples/4c95d54b32df4dc49e9762b6c1ae2c05.asciidoc +++ b/docs/examples/4c95d54b32df4dc49e9762b6c1ae2c05.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/text.asciidoc:368 +// mapping/types/text.asciidoc:374 [source, python] ---- diff --git a/docs/examples/4ca15672fc5ab1d80a127d086b6d2837.asciidoc b/docs/examples/4ca15672fc5ab1d80a127d086b6d2837.asciidoc index b0a3784fb..0492c7100 100644 --- a/docs/examples/4ca15672fc5ab1d80a127d086b6d2837.asciidoc +++ b/docs/examples/4ca15672fc5ab1d80a127d086b6d2837.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cluster/allocation-explain.asciidoc:457 +// cluster/allocation-explain.asciidoc:456 [source, python] ---- diff --git a/docs/examples/4dab4c5168047ba596af1beb0e55b845.asciidoc b/docs/examples/4dab4c5168047ba596af1beb0e55b845.asciidoc new file mode 100644 index 000000000..275db14c9 --- /dev/null +++ b/docs/examples/4dab4c5168047ba596af1beb0e55b845.asciidoc @@ -0,0 +1,10 @@ +// This file is autogenerated, DO NOT EDIT +// migration/transient-settings-migration-guide.asciidoc:82 + +[source, python] +---- +resp = client.cluster.get_settings( + flat_settings=True, +) +print(resp) +---- diff --git a/docs/examples/4e50d9d25bfb07ac73e3a2be5d2fbbf7.asciidoc b/docs/examples/4e50d9d25bfb07ac73e3a2be5d2fbbf7.asciidoc index 59b09c26e..bf60eda88 100644 --- a/docs/examples/4e50d9d25bfb07ac73e3a2be5d2fbbf7.asciidoc +++ b/docs/examples/4e50d9d25bfb07ac73e3a2be5d2fbbf7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/paginate-search-results.asciidoc:229 +// search/search-your-data/paginate-search-results.asciidoc:244 [source, python] ---- diff --git a/docs/examples/4ed946065faa92f9950f04e402676a97.asciidoc b/docs/examples/4ed946065faa92f9950f04e402676a97.asciidoc index 72f00c320..ee956cd68 100644 --- a/docs/examples/4ed946065faa92f9950f04e402676a97.asciidoc +++ b/docs/examples/4ed946065faa92f9950f04e402676a97.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/info.asciidoc:206 +// rest-api/info.asciidoc:210 [source, python] ---- diff --git a/docs/examples/4f6694ef147a73b1163bde3c13779d26.asciidoc b/docs/examples/4f6694ef147a73b1163bde3c13779d26.asciidoc index 249c1bddd..419fb698c 100644 --- a/docs/examples/4f6694ef147a73b1163bde3c13779d26.asciidoc +++ b/docs/examples/4f6694ef147a73b1163bde3c13779d26.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/rejected-requests.asciidoc:68 +// troubleshooting/common-issues/rejected-requests.asciidoc:62 [source, python] ---- diff --git a/docs/examples/50b5c0332949d2154c72b629b5fa6222.asciidoc b/docs/examples/50b5c0332949d2154c72b629b5fa6222.asciidoc index b096a417a..abb1aabc9 100644 --- a/docs/examples/50b5c0332949d2154c72b629b5fa6222.asciidoc +++ b/docs/examples/50b5c0332949d2154c72b629b5fa6222.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:345 +// indices/put-mapping.asciidoc:351 [source, python] ---- diff --git a/docs/examples/50d9c0508ddb0fc5ba5a893eec219dd8.asciidoc b/docs/examples/50d9c0508ddb0fc5ba5a893eec219dd8.asciidoc index a078c4d75..edfa07a0d 100644 --- a/docs/examples/50d9c0508ddb0fc5ba5a893eec219dd8.asciidoc +++ b/docs/examples/50d9c0508ddb0fc5ba5a893eec219dd8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/fields/synthetic-source.asciidoc:129 +// mapping/fields/synthetic-source.asciidoc:131 [source, python] ---- diff --git a/docs/examples/51390ca10aa22d7104e8970f09ea4512.asciidoc b/docs/examples/51390ca10aa22d7104e8970f09ea4512.asciidoc new file mode 100644 index 000000000..89b0ed3ee --- /dev/null +++ b/docs/examples/51390ca10aa22d7104e8970f09ea4512.asciidoc @@ -0,0 +1,33 @@ +// This file is autogenerated, DO NOT EDIT +// mapping/types/binary.asciidoc:61 + +[source, python] +---- +resp = client.indices.create( + index="idx", + mappings={ + "_source": { + "mode": "synthetic" + }, + "properties": { + "binary": { + "type": "binary", + "doc_values": True + } + } + }, +) +print(resp) + +resp1 = client.index( + index="idx", + id="1", + document={ + "binary": [ + "IAA=", + "EAA=" + ] + }, +) +print(resp1) +---- diff --git a/docs/examples/519e46350316a33162740e5d7968aa2c.asciidoc b/docs/examples/519e46350316a33162740e5d7968aa2c.asciidoc index a0daa1e65..0759524d5 100644 --- a/docs/examples/519e46350316a33162740e5d7968aa2c.asciidoc +++ b/docs/examples/519e46350316a33162740e5d7968aa2c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/knn-search.asciidoc:1103 +// search/search-your-data/knn-search.asciidoc:1101 [source, python] ---- diff --git a/docs/examples/51b44224feee6e2e5974824334474c77.asciidoc b/docs/examples/51b44224feee6e2e5974824334474c77.asciidoc index 93e483a64..39fd07694 100644 --- a/docs/examples/51b44224feee6e2e5974824334474c77.asciidoc +++ b/docs/examples/51b44224feee6e2e5974824334474c77.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/repository-s3.asciidoc:371 +// snapshot-restore/repository-s3.asciidoc:337 [source, python] ---- diff --git a/docs/examples/51f6a62c0f0a02aa97a42c6a80542aa4.asciidoc b/docs/examples/51f6a62c0f0a02aa97a42c6a80542aa4.asciidoc new file mode 100644 index 000000000..f42cdc908 --- /dev/null +++ b/docs/examples/51f6a62c0f0a02aa97a42c6a80542aa4.asciidoc @@ -0,0 +1,28 @@ +// This file is autogenerated, DO NOT EDIT +// search/retriever.asciidoc:1012 + +[source, python] +---- +resp = client.indices.create( + index="books", + mappings={ + "properties": { + "title": { + "type": "text", + "copy_to": "title_semantic" + }, + "description": { + "type": "text", + "copy_to": "description_semantic" + }, + "title_semantic": { + "type": "semantic_text" + }, + "description_semantic": { + "type": "semantic_text" + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/52f1c1689ab35353858cdeaab7597546.asciidoc b/docs/examples/52f1c1689ab35353858cdeaab7597546.asciidoc index cd29eaa7d..83ea88334 100644 --- a/docs/examples/52f1c1689ab35353858cdeaab7597546.asciidoc +++ b/docs/examples/52f1c1689ab35353858cdeaab7597546.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/common-log-format-example.asciidoc:174 +// ingest/common-log-format-example.asciidoc:175 [source, python] ---- diff --git a/docs/examples/52f4c5eb08d39f98e2e2f5527ece9731.asciidoc b/docs/examples/52f4c5eb08d39f98e2e2f5527ece9731.asciidoc index 89fa235ad..34a3c3906 100644 --- a/docs/examples/52f4c5eb08d39f98e2e2f5527ece9731.asciidoc +++ b/docs/examples/52f4c5eb08d39f98e2e2f5527ece9731.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-alibabacloud-ai-search.asciidoc:210 +// inference/service-alibabacloud-ai-search.asciidoc:213 [source, python] ---- diff --git a/docs/examples/551799fef2f86e393db83a967e4a30d1.asciidoc b/docs/examples/551799fef2f86e393db83a967e4a30d1.asciidoc index e2b93fb61..54ada40f7 100644 --- a/docs/examples/551799fef2f86e393db83a967e4a30d1.asciidoc +++ b/docs/examples/551799fef2f86e393db83a967e4a30d1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/aggregate-metric-double.asciidoc:264 +// mapping/types/aggregate-metric-double.asciidoc:257 [source, python] ---- diff --git a/docs/examples/56a1aa4f7fa62f2289e20607e3039bf3.asciidoc b/docs/examples/56a1aa4f7fa62f2289e20607e3039bf3.asciidoc index 3d50d720a..dcaf62835 100644 --- a/docs/examples/56a1aa4f7fa62f2289e20607e3039bf3.asciidoc +++ b/docs/examples/56a1aa4f7fa62f2289e20607e3039bf3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:19 +// indices/put-mapping.asciidoc:25 [source, python] ---- diff --git a/docs/examples/56da252798b8e7b006738428aa1a7f4c.asciidoc b/docs/examples/56da252798b8e7b006738428aa1a7f4c.asciidoc index f53776159..28f81baf9 100644 --- a/docs/examples/56da252798b8e7b006738428aa1a7f4c.asciidoc +++ b/docs/examples/56da252798b8e7b006738428aa1a7f4c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/range.asciidoc:373 +// mapping/types/range.asciidoc:366 [source, python] ---- diff --git a/docs/examples/57a62f66b67a08c38942f0edb0fbaa26.asciidoc b/docs/examples/57a62f66b67a08c38942f0edb0fbaa26.asciidoc new file mode 100644 index 000000000..e4ea7ee57 --- /dev/null +++ b/docs/examples/57a62f66b67a08c38942f0edb0fbaa26.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// mapping/types/semantic-text.asciidoc:298 + +[source, python] +---- +resp = client.indices.create( + index="my-index-000004", + mappings={ + "properties": { + "inference_field": { + "type": "semantic_text", + "inference_id": "my-text-embedding-endpoint", + "index_options": { + "dense_vector": { + "type": "int4_flat" + } + }, + "chunking_settings": { + "type": "none" + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/584f502cf840134f2db5f39e2483ced1.asciidoc b/docs/examples/584f502cf840134f2db5f39e2483ced1.asciidoc index cb0cd262e..925fc1eb0 100644 --- a/docs/examples/584f502cf840134f2db5f39e2483ced1.asciidoc +++ b/docs/examples/584f502cf840134f2db5f39e2483ced1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// analysis/analyzers/lang-analyzer.asciidoc:1454 +// analysis/analyzers/lang-analyzer.asciidoc:1453 [source, python] ---- diff --git a/docs/examples/f9bad6fd369764185e1cb09b89ee39cc.asciidoc b/docs/examples/58fb1f324597070bee2cf854c162d388.asciidoc similarity index 73% rename from docs/examples/f9bad6fd369764185e1cb09b89ee39cc.asciidoc rename to docs/examples/58fb1f324597070bee2cf854c162d388.asciidoc index 64d5b6209..009ed67d4 100644 --- a/docs/examples/f9bad6fd369764185e1cb09b89ee39cc.asciidoc +++ b/docs/examples/58fb1f324597070bee2cf854c162d388.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/text.asciidoc:237 +// mapping/types/text.asciidoc:181 [source, python] ---- @@ -18,7 +18,12 @@ resp = client.indices.create( "properties": { "text": { "type": "text", - "store": True + "fields": { + "kwd": { + "type": "keyword", + "null_value": "NA" + } + } } } }, @@ -30,6 +35,7 @@ resp1 = client.index( id="1", document={ "text": [ + None, "the quick brown fox", "the quick brown fox", "jumped over the lazy dog" diff --git a/docs/examples/59aa5216630f80c5dc298fc5bba4a819.asciidoc b/docs/examples/59aa5216630f80c5dc298fc5bba4a819.asciidoc deleted file mode 100644 index 216f6cbbd..000000000 --- a/docs/examples/59aa5216630f80c5dc298fc5bba4a819.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// migration/migrate_9_0.asciidoc:415 - -[source, python] ----- -resp = client.indices.get_settings( - index=".reindexed-v9-ml-anomalies-custom-example", -) -print(resp) ----- diff --git a/docs/examples/5a4a6cf06b3a10f55012639166630290.asciidoc b/docs/examples/5a4a6cf06b3a10f55012639166630290.asciidoc new file mode 100644 index 000000000..6dd92adf7 --- /dev/null +++ b/docs/examples/5a4a6cf06b3a10f55012639166630290.asciidoc @@ -0,0 +1,31 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/retrievers-examples.asciidoc:1824 + +[source, python] +---- +resp = client.search( + index="retrievers_example", + retriever={ + "pinned": { + "docs": [ + { + "_id": "1" + }, + { + "_id": "2" + } + ], + "retriever": { + "standard": { + "query": { + "match": { + "text": "elasticsearch" + } + } + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/5b2a13366bd4e1ab4b25d04d360570dc.asciidoc b/docs/examples/5b2a13366bd4e1ab4b25d04d360570dc.asciidoc index 57c176743..e34426f1f 100644 --- a/docs/examples/5b2a13366bd4e1ab4b25d04d360570dc.asciidoc +++ b/docs/examples/5b2a13366bd4e1ab4b25d04d360570dc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-component-template.asciidoc:262 +// indices/put-component-template.asciidoc:260 [source, python] ---- diff --git a/docs/examples/5bba213a7f543190139d1a69ab2ed076.asciidoc b/docs/examples/5bba213a7f543190139d1a69ab2ed076.asciidoc index c9c33f40d..fac7ee153 100644 --- a/docs/examples/5bba213a7f543190139d1a69ab2ed076.asciidoc +++ b/docs/examples/5bba213a7f543190139d1a69ab2ed076.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-across-clusters.asciidoc:302 +// esql/esql-across-clusters.asciidoc:304 [source, python] ---- diff --git a/docs/examples/5daf8ede198be9b118da5bee9896cb00.asciidoc b/docs/examples/5daf8ede198be9b118da5bee9896cb00.asciidoc index ddad0eab9..84560ddf9 100644 --- a/docs/examples/5daf8ede198be9b118da5bee9896cb00.asciidoc +++ b/docs/examples/5daf8ede198be9b118da5bee9896cb00.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/flattened.asciidoc:333 +// mapping/types/flattened.asciidoc:326 [source, python] ---- diff --git a/docs/examples/5e21dbac92f34d236a8f0cc0d3a39cdd.asciidoc b/docs/examples/5e21dbac92f34d236a8f0cc0d3a39cdd.asciidoc index 559f8a514..2a6435589 100644 --- a/docs/examples/5e21dbac92f34d236a8f0cc0d3a39cdd.asciidoc +++ b/docs/examples/5e21dbac92f34d236a8f0cc0d3a39cdd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// security/authentication/jwt-realm.asciidoc:411 +// security/authentication/jwt-realm.asciidoc:421 [source, python] ---- diff --git a/docs/examples/5f1ed9cfdc149763b444acfbe10b0e16.asciidoc b/docs/examples/5f1ed9cfdc149763b444acfbe10b0e16.asciidoc index 118ad732e..ff7adf42f 100644 --- a/docs/examples/5f1ed9cfdc149763b444acfbe10b0e16.asciidoc +++ b/docs/examples/5f1ed9cfdc149763b444acfbe10b0e16.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:271 +// indices/put-mapping.asciidoc:277 [source, python] ---- diff --git a/docs/examples/60d3f9a99cc91b43aaa7524a9a74dba0.asciidoc b/docs/examples/60d3f9a99cc91b43aaa7524a9a74dba0.asciidoc index 6fe2dc982..c7e0d37a9 100644 --- a/docs/examples/60d3f9a99cc91b43aaa7524a9a74dba0.asciidoc +++ b/docs/examples/60d3f9a99cc91b43aaa7524a9a74dba0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/rejected-requests.asciidoc:50 +// troubleshooting/common-issues/rejected-requests.asciidoc:47 [source, python] ---- diff --git a/docs/examples/0709a38613d2de90d418ce12b36af30e.asciidoc b/docs/examples/611c1e05f4ebb48a1a8c8488238ce34d.asciidoc similarity index 74% rename from docs/examples/0709a38613d2de90d418ce12b36af30e.asciidoc rename to docs/examples/611c1e05f4ebb48a1a8c8488238ce34d.asciidoc index 340a95e60..33aa13fb3 100644 --- a/docs/examples/0709a38613d2de90d418ce12b36af30e.asciidoc +++ b/docs/examples/611c1e05f4ebb48a1a8c8488238ce34d.asciidoc @@ -3,6 +3,8 @@ [source, python] ---- -resp = client.cluster.reroute() +resp = client.cluster.reroute( + metric="none", +) print(resp) ---- diff --git a/docs/examples/6220087321e6d288024a70c6b09bd720.asciidoc b/docs/examples/6220087321e6d288024a70c6b09bd720.asciidoc index b7f8b8531..0607558a9 100644 --- a/docs/examples/6220087321e6d288024a70c6b09bd720.asciidoc +++ b/docs/examples/6220087321e6d288024a70c6b09bd720.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/percolate-query.asciidoc:358 +// query-dsl/percolate-query.asciidoc:356 [source, python] ---- diff --git a/docs/examples/62d3c8fccb11471bdc12555c1a7777f2.asciidoc b/docs/examples/62d3c8fccb11471bdc12555c1a7777f2.asciidoc index 3e5631809..4edd09c02 100644 --- a/docs/examples/62d3c8fccb11471bdc12555c1a7777f2.asciidoc +++ b/docs/examples/62d3c8fccb11471bdc12555c1a7777f2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/fields/synthetic-source.asciidoc:93 +// mapping/fields/synthetic-source.asciidoc:95 [source, python] ---- diff --git a/docs/examples/631a8d49bfbe622fd07c6868489b9fb0.asciidoc b/docs/examples/631a8d49bfbe622fd07c6868489b9fb0.asciidoc new file mode 100644 index 000000000..a5964a29b --- /dev/null +++ b/docs/examples/631a8d49bfbe622fd07c6868489b9fb0.asciidoc @@ -0,0 +1,19 @@ +// This file is autogenerated, DO NOT EDIT +// search/retriever.asciidoc:1106 + +[source, python] +---- +resp = client.search( + index="books", + retriever={ + "rrf": { + "query": "machine learning", + "fields": [ + "title*", + "*_text" + ] + } + }, +) +print(resp) +---- diff --git a/docs/examples/63a53fcb0717ae9033a679cbfc932851.asciidoc b/docs/examples/63a53fcb0717ae9033a679cbfc932851.asciidoc index 746b8a42d..404568af6 100644 --- a/docs/examples/63a53fcb0717ae9033a679cbfc932851.asciidoc +++ b/docs/examples/63a53fcb0717ae9033a679cbfc932851.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-alibabacloud-ai-search.asciidoc:174 +// inference/service-alibabacloud-ai-search.asciidoc:177 [source, python] ---- diff --git a/docs/examples/63ecdab34940af053acc409164914c32.asciidoc b/docs/examples/63ecdab34940af053acc409164914c32.asciidoc index 0c5d51106..32050c264 100644 --- a/docs/examples/63ecdab34940af053acc409164914c32.asciidoc +++ b/docs/examples/63ecdab34940af053acc409164914c32.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/sparse-vector.asciidoc:63 +// mapping/types/sparse-vector.asciidoc:126 [source, python] ---- diff --git a/docs/examples/642161d70dacf7d153767d37d3726838.asciidoc b/docs/examples/642161d70dacf7d153767d37d3726838.asciidoc index a6acd8d13..a7c877f17 100644 --- a/docs/examples/642161d70dacf7d153767d37d3726838.asciidoc +++ b/docs/examples/642161d70dacf7d153767d37d3726838.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/rollup-index-caps.asciidoc:171 +// rollup/apis/rollup-index-caps.asciidoc:169 [source, python] ---- diff --git a/docs/examples/67154a4837cf996a9a9c3e61d6e9d1b3.asciidoc b/docs/examples/67154a4837cf996a9a9c3e61d6e9d1b3.asciidoc new file mode 100644 index 000000000..647b80bda --- /dev/null +++ b/docs/examples/67154a4837cf996a9a9c3e61d6e9d1b3.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// migration/migrate_8_0/migrate_to_java_time.asciidoc:289 + +[source, python] +---- +resp = client.reindex( + source={ + "index": "my-index-000001" + }, + dest={ + "index": "my-index-000002" + }, +) +print(resp) +---- diff --git a/docs/examples/67b71a95b6fe6c83faae51ea038a1bf1.asciidoc b/docs/examples/67b71a95b6fe6c83faae51ea038a1bf1.asciidoc index f0c9152d8..c9b9f6d70 100644 --- a/docs/examples/67b71a95b6fe6c83faae51ea038a1bf1.asciidoc +++ b/docs/examples/67b71a95b6fe6c83faae51ea038a1bf1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-rest.asciidoc:407 +// esql/esql-rest.asciidoc:410 [source, python] ---- diff --git a/docs/examples/69541f0bb81ab3797926bb2a00607cda.asciidoc b/docs/examples/69541f0bb81ab3797926bb2a00607cda.asciidoc index bd34b9be6..baa6e0332 100644 --- a/docs/examples/69541f0bb81ab3797926bb2a00607cda.asciidoc +++ b/docs/examples/69541f0bb81ab3797926bb2a00607cda.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:748 +// search/retriever.asciidoc:778 [source, python] ---- diff --git a/docs/examples/6b0288acb739c4667d41339e5100c327.asciidoc b/docs/examples/6b0288acb739c4667d41339e5100c327.asciidoc index 421376449..a58107558 100644 --- a/docs/examples/6b0288acb739c4667d41339e5100c327.asciidoc +++ b/docs/examples/6b0288acb739c4667d41339e5100c327.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/match-query.asciidoc:234 +// query-dsl/match-query.asciidoc:231 [source, python] ---- diff --git a/docs/examples/6b67c6121efb86ee100d40c2646f77b5.asciidoc b/docs/examples/6b67c6121efb86ee100d40c2646f77b5.asciidoc index bc427ddf0..72e615b09 100644 --- a/docs/examples/6b67c6121efb86ee100d40c2646f77b5.asciidoc +++ b/docs/examples/6b67c6121efb86ee100d40c2646f77b5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// index-modules/slowlog.asciidoc:219 +// index-modules/slowlog.asciidoc:220 [source, python] ---- diff --git a/docs/examples/6ba332596f5eb29660c90ab2d480e7dc.asciidoc b/docs/examples/6ba332596f5eb29660c90ab2d480e7dc.asciidoc index fbd0709e9..d0227f1b3 100644 --- a/docs/examples/6ba332596f5eb29660c90ab2d480e7dc.asciidoc +++ b/docs/examples/6ba332596f5eb29660c90ab2d480e7dc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-index-template-v1.asciidoc:189 +// indices/put-index-template-v1.asciidoc:188 [source, python] ---- diff --git a/docs/examples/6baf72c04d48cb04c2f8be609ff3b3b5.asciidoc b/docs/examples/6baf72c04d48cb04c2f8be609ff3b3b5.asciidoc index cd4a70809..f0a7c3e14 100644 --- a/docs/examples/6baf72c04d48cb04c2f8be609ff3b3b5.asciidoc +++ b/docs/examples/6baf72c04d48cb04c2f8be609ff3b3b5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/semantic-text.asciidoc:132 +// mapping/types/semantic-text.asciidoc:209 [source, python] ---- diff --git a/docs/examples/6dcd3916679f6aa64f79524c75991ebd.asciidoc b/docs/examples/6dcd3916679f6aa64f79524c75991ebd.asciidoc index 3b72c1464..9e12ba604 100644 --- a/docs/examples/6dcd3916679f6aa64f79524c75991ebd.asciidoc +++ b/docs/examples/6dcd3916679f6aa64f79524c75991ebd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-rest.asciidoc:248 +// esql/esql-rest.asciidoc:251 [source, python] ---- diff --git a/docs/examples/6e498b9dc753b94abf2618c407fa5cd8.asciidoc b/docs/examples/6e498b9dc753b94abf2618c407fa5cd8.asciidoc deleted file mode 100644 index 265795311..000000000 --- a/docs/examples/6e498b9dc753b94abf2618c407fa5cd8.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// migration/migrate_9_0.asciidoc:453 - -[source, python] ----- -resp = client.reindex( - wait_for_completion=False, - source={ - "index": ".ml-anomalies-custom-example" - }, - dest={ - "index": ".reindexed-v9-ml-anomalies-custom-example" - }, -) -print(resp) ----- diff --git a/docs/examples/6e6b78e6b689a5d6aa637271b6d084e2.asciidoc b/docs/examples/6e6b78e6b689a5d6aa637271b6d084e2.asciidoc index 14522e393..a3fb0794d 100644 --- a/docs/examples/6e6b78e6b689a5d6aa637271b6d084e2.asciidoc +++ b/docs/examples/6e6b78e6b689a5d6aa637271b6d084e2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:363 +// search/retriever.asciidoc:397 [source, python] ---- diff --git a/docs/examples/7039aa15511e9b876c2e00d067751e2e.asciidoc b/docs/examples/7039aa15511e9b876c2e00d067751e2e.asciidoc new file mode 100644 index 000000000..5f5241adb --- /dev/null +++ b/docs/examples/7039aa15511e9b876c2e00d067751e2e.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/retrievers-examples.asciidoc:633 + +[source, python] +---- +resp = client.search( + index="retrievers_example", + retriever={ + "linear": { + "query": "artificial intelligence", + "fields": [ + "text", + "text_semantic", + "topic^2" + ], + "normalizer": "minmax" + } + }, + source=False, +) +print(resp) +---- diff --git a/docs/examples/2fa7ded8515b32f26c54394ea598f573.asciidoc b/docs/examples/708fe682185b46704fce563465933ded.asciidoc similarity index 85% rename from docs/examples/2fa7ded8515b32f26c54394ea598f573.asciidoc rename to docs/examples/708fe682185b46704fce563465933ded.asciidoc index bc1835a56..4e8ca244a 100644 --- a/docs/examples/2fa7ded8515b32f26c54394ea598f573.asciidoc +++ b/docs/examples/708fe682185b46704fce563465933ded.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/index-templates.asciidoc:123 +// indices/index-templates.asciidoc:122 [source, python] ---- @@ -22,8 +22,7 @@ resp = client.indices.put_index_template( "type": "keyword" }, "created_at": { - "type": "date", - "format": "EEE MMM dd HH:mm:ss Z yyyy" + "type": "date" } } }, diff --git a/docs/examples/71998bb300ac2a58419b0772cdc1c586.asciidoc b/docs/examples/71998bb300ac2a58419b0772cdc1c586.asciidoc index 0b9cdbc9f..14b9ffaf2 100644 --- a/docs/examples/71998bb300ac2a58419b0772cdc1c586.asciidoc +++ b/docs/examples/71998bb300ac2a58419b0772cdc1c586.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/version.asciidoc:85 +// mapping/types/version.asciidoc:78 [source, python] ---- diff --git a/docs/examples/e784fc00894635470adfd78a0c46b427.asciidoc b/docs/examples/725c957303ead2d509f148e0428d7250.asciidoc similarity index 85% rename from docs/examples/e784fc00894635470adfd78a0c46b427.asciidoc rename to docs/examples/725c957303ead2d509f148e0428d7250.asciidoc index a5c0ed26a..fc04bb88d 100644 --- a/docs/examples/e784fc00894635470adfd78a0c46b427.asciidoc +++ b/docs/examples/725c957303ead2d509f148e0428d7250.asciidoc @@ -18,8 +18,7 @@ resp = client.cluster.put_component_template( "type": "keyword" }, "created_at": { - "type": "date", - "format": "EEE MMM dd HH:mm:ss Z yyyy" + "type": "date" } } } diff --git a/docs/examples/72abd8a82b4a8fc2fc06f54150d23b76.asciidoc b/docs/examples/72abd8a82b4a8fc2fc06f54150d23b76.asciidoc new file mode 100644 index 000000000..121ba5c64 --- /dev/null +++ b/docs/examples/72abd8a82b4a8fc2fc06f54150d23b76.asciidoc @@ -0,0 +1,54 @@ +// This file is autogenerated, DO NOT EDIT +// query-dsl/bool-query.asciidoc:80 + +[source, python] +---- +resp = client.search( + query={ + "bool": { + "must": [ + { + "bool": { + "should": [ + { + "match": { + "user.id": "kimchy" + } + }, + { + "match": { + "user.id": "banon" + } + } + ] + } + }, + { + "match": { + "tags": "production" + } + } + ], + "should": [ + { + "bool": { + "must": [ + { + "match": { + "status": "active" + } + }, + { + "match": { + "title": "quick brown fox" + } + } + ] + } + } + ] + } + }, +) +print(resp) +---- diff --git a/docs/examples/73d1a6c5ef90b7e35d43a0bfdc1e158d.asciidoc b/docs/examples/73d1a6c5ef90b7e35d43a0bfdc1e158d.asciidoc index db79aaa94..41b4fc221 100644 --- a/docs/examples/73d1a6c5ef90b7e35d43a0bfdc1e158d.asciidoc +++ b/docs/examples/73d1a6c5ef90b7e35d43a0bfdc1e158d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/rollup-index-caps.asciidoc:95 +// rollup/apis/rollup-index-caps.asciidoc:94 [source, python] ---- diff --git a/docs/examples/744aeb2af40f519e430e21e004e3c3b7.asciidoc b/docs/examples/744aeb2af40f519e430e21e004e3c3b7.asciidoc index 952a42b78..994404bc0 100644 --- a/docs/examples/744aeb2af40f519e430e21e004e3c3b7.asciidoc +++ b/docs/examples/744aeb2af40f519e430e21e004e3c3b7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/multivalued-fields.asciidoc:99 +// esql/multivalued-fields.asciidoc:103 [source, python] ---- diff --git a/docs/examples/746e0a1cb5984f2672963b363505c7b3.asciidoc b/docs/examples/746e0a1cb5984f2672963b363505c7b3.asciidoc index 5c56c2f3f..90f930456 100644 --- a/docs/examples/746e0a1cb5984f2672963b363505c7b3.asciidoc +++ b/docs/examples/746e0a1cb5984f2672963b363505c7b3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/date.asciidoc:188 +// mapping/types/date.asciidoc:196 [source, python] ---- diff --git a/docs/examples/75e6d66e94e61bd8a555beaaee255c36.asciidoc b/docs/examples/75e6d66e94e61bd8a555beaaee255c36.asciidoc index bfb1cadf1..f1e5b8201 100644 --- a/docs/examples/75e6d66e94e61bd8a555beaaee255c36.asciidoc +++ b/docs/examples/75e6d66e94e61bd8a555beaaee255c36.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/rollup-search.asciidoc:178 +// rollup/apis/rollup-search.asciidoc:176 [source, python] ---- diff --git a/docs/examples/76e02434835630cb830724beb92df354.asciidoc b/docs/examples/76e02434835630cb830724beb92df354.asciidoc index cc7602fe4..9ab5b0f00 100644 --- a/docs/examples/76e02434835630cb830724beb92df354.asciidoc +++ b/docs/examples/76e02434835630cb830724beb92df354.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-examples.asciidoc:1433 +// search/search-your-data/retrievers-examples.asciidoc:1725 [source, python] ---- diff --git a/docs/examples/7709a48020a6cefbbe547fb944541cdb.asciidoc b/docs/examples/7709a48020a6cefbbe547fb944541cdb.asciidoc index 00ea6fa58..9476542db 100644 --- a/docs/examples/7709a48020a6cefbbe547fb944541cdb.asciidoc +++ b/docs/examples/7709a48020a6cefbbe547fb944541cdb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/dense-vector.asciidoc:421 +// mapping/types/dense-vector.asciidoc:414 [source, python] ---- diff --git a/docs/examples/7752b677825523bfb0c38ad9325a6d47.asciidoc b/docs/examples/7752b677825523bfb0c38ad9325a6d47.asciidoc index 1fb038498..e35bd7225 100644 --- a/docs/examples/7752b677825523bfb0c38ad9325a6d47.asciidoc +++ b/docs/examples/7752b677825523bfb0c38ad9325a6d47.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/delete-connector-api.asciidoc:79 +// connector/apis/delete-connector-api.asciidoc:76 [source, python] ---- diff --git a/docs/examples/78043831fd32004a82930c8ac8a1d809.asciidoc b/docs/examples/78043831fd32004a82930c8ac8a1d809.asciidoc index 5a0d6d76f..d86f2f207 100644 --- a/docs/examples/78043831fd32004a82930c8ac8a1d809.asciidoc +++ b/docs/examples/78043831fd32004a82930c8ac8a1d809.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-examples.asciidoc:1378 +// search/search-your-data/retrievers-examples.asciidoc:1670 [source, python] ---- diff --git a/docs/examples/7846974b47a3eab1832a475663d23ad9.asciidoc b/docs/examples/7846974b47a3eab1832a475663d23ad9.asciidoc index 0fc04cfc2..d39caa3d6 100644 --- a/docs/examples/7846974b47a3eab1832a475663d23ad9.asciidoc +++ b/docs/examples/7846974b47a3eab1832a475663d23ad9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/paginate-search-results.asciidoc:292 +// search/search-your-data/paginate-search-results.asciidoc:307 [source, python] ---- diff --git a/docs/examples/7885ca9d7c61050095288eef6bc6cca9.asciidoc b/docs/examples/7885ca9d7c61050095288eef6bc6cca9.asciidoc index 2367c5ae1..90021946c 100644 --- a/docs/examples/7885ca9d7c61050095288eef6bc6cca9.asciidoc +++ b/docs/examples/7885ca9d7c61050095288eef6bc6cca9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// security/authentication/jwt-realm.asciidoc:676 +// security/authentication/jwt-realm.asciidoc:686 [source, python] ---- diff --git a/docs/examples/79d206a528be704050a437adce2496dd.asciidoc b/docs/examples/79d206a528be704050a437adce2496dd.asciidoc index 0a1f7c9ca..9026321ac 100644 --- a/docs/examples/79d206a528be704050a437adce2496dd.asciidoc +++ b/docs/examples/79d206a528be704050a437adce2496dd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:629 +// search/retriever.asciidoc:659 [source, python] ---- diff --git a/docs/examples/7a32f44a1511ecb0d3f0b0ff2aca5c44.asciidoc b/docs/examples/7a32f44a1511ecb0d3f0b0ff2aca5c44.asciidoc new file mode 100644 index 000000000..984cd88e4 --- /dev/null +++ b/docs/examples/7a32f44a1511ecb0d3f0b0ff2aca5c44.asciidoc @@ -0,0 +1,23 @@ +// This file is autogenerated, DO NOT EDIT +// migration/migrate_8_0/migrate_to_java_time.asciidoc:305 + +[source, python] +---- +resp = client.indices.update_aliases( + actions=[ + { + "remove": { + "index": "my-index-000001", + "alias": "my-index" + } + }, + { + "add": { + "index": "my-index-000002", + "alias": "my-index" + } + } + ], +) +print(resp) +---- diff --git a/docs/examples/7bdc283b96c7a965fae23013647b8578.asciidoc b/docs/examples/7bdc283b96c7a965fae23013647b8578.asciidoc index 2bc21bbf0..c19b46886 100644 --- a/docs/examples/7bdc283b96c7a965fae23013647b8578.asciidoc +++ b/docs/examples/7bdc283b96c7a965fae23013647b8578.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/semantic-text.asciidoc:220 +// mapping/types/semantic-text.asciidoc:341 [source, python] ---- diff --git a/docs/examples/7cd23457e220c8b64c5b0041d2acc27a.asciidoc b/docs/examples/7cd23457e220c8b64c5b0041d2acc27a.asciidoc index 261211313..4dac054a0 100644 --- a/docs/examples/7cd23457e220c8b64c5b0041d2acc27a.asciidoc +++ b/docs/examples/7cd23457e220c8b64c5b0041d2acc27a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// setup/advanced-configuration.asciidoc:123 +// setup/advanced-configuration.asciidoc:122 [source, python] ---- diff --git a/docs/examples/7d3a74fe0ba3fe95d1c3275365ff9315.asciidoc b/docs/examples/7d3a74fe0ba3fe95d1c3275365ff9315.asciidoc index c812891e4..578034516 100644 --- a/docs/examples/7d3a74fe0ba3fe95d1c3275365ff9315.asciidoc +++ b/docs/examples/7d3a74fe0ba3fe95d1c3275365ff9315.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/flattened.asciidoc:374 +// mapping/types/flattened.asciidoc:367 [source, python] ---- diff --git a/docs/examples/7daff6b7e668ab8a762b8ab5dff7a167.asciidoc b/docs/examples/7daff6b7e668ab8a762b8ab5dff7a167.asciidoc index 75048641a..aebed399e 100644 --- a/docs/examples/7daff6b7e668ab8a762b8ab5dff7a167.asciidoc +++ b/docs/examples/7daff6b7e668ab8a762b8ab5dff7a167.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/sparse-vector-query.asciidoc:260 +// query-dsl/sparse-vector-query.asciidoc:268 [source, python] ---- diff --git a/docs/examples/7ea7ab20df76c47b391a582ae4bce803.asciidoc b/docs/examples/7ea7ab20df76c47b391a582ae4bce803.asciidoc new file mode 100644 index 000000000..b016a7d54 --- /dev/null +++ b/docs/examples/7ea7ab20df76c47b391a582ae4bce803.asciidoc @@ -0,0 +1,16 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/retrievers-examples.asciidoc:462 + +[source, python] +---- +resp = client.search( + index="retrievers_example", + retriever={ + "rrf": { + "query": "artificial intelligence" + } + }, + source=False, +) +print(resp) +---- diff --git a/docs/examples/7f1fade93225f8cf6000b93334d76ce4.asciidoc b/docs/examples/7f1fade93225f8cf6000b93334d76ce4.asciidoc index a2b9f8e5c..3c22a69ea 100644 --- a/docs/examples/7f1fade93225f8cf6000b93334d76ce4.asciidoc +++ b/docs/examples/7f1fade93225f8cf6000b93334d76ce4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/processors/ip-location.asciidoc:188 +// ingest/processors/ip-location.asciidoc:203 [source, python] ---- diff --git a/docs/examples/7f2d511cb64743c006225e5933a14bb4.asciidoc b/docs/examples/7f2d511cb64743c006225e5933a14bb4.asciidoc index 60a3269ff..efe78a8c9 100644 --- a/docs/examples/7f2d511cb64743c006225e5933a14bb4.asciidoc +++ b/docs/examples/7f2d511cb64743c006225e5933a14bb4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-across-clusters.asciidoc:69 +// esql/esql-across-clusters.asciidoc:70 [source, python] ---- diff --git a/docs/examples/7f56755fb6c42f7e6203339a6d0cb6e6.asciidoc b/docs/examples/7f56755fb6c42f7e6203339a6d0cb6e6.asciidoc index 18e25a546..2abbd8530 100644 --- a/docs/examples/7f56755fb6c42f7e6203339a6d0cb6e6.asciidoc +++ b/docs/examples/7f56755fb6c42f7e6203339a6d0cb6e6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/match-query.asciidoc:283 +// query-dsl/match-query.asciidoc:280 [source, python] ---- diff --git a/docs/examples/7fde3ff91c4a2e7080444af37d5cd287.asciidoc b/docs/examples/7fde3ff91c4a2e7080444af37d5cd287.asciidoc index 649738ffe..9c570d54b 100644 --- a/docs/examples/7fde3ff91c4a2e7080444af37d5cd287.asciidoc +++ b/docs/examples/7fde3ff91c4a2e7080444af37d5cd287.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-rest.asciidoc:289 +// esql/esql-rest.asciidoc:292 [source, python] ---- diff --git a/docs/examples/80d2ccb7e2056d66bcf95096ae61af4b.asciidoc b/docs/examples/80d2ccb7e2056d66bcf95096ae61af4b.asciidoc new file mode 100644 index 000000000..294d9dafe --- /dev/null +++ b/docs/examples/80d2ccb7e2056d66bcf95096ae61af4b.asciidoc @@ -0,0 +1,15 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/esql-search-tutorial.asciidoc:326 + +[source, python] +---- +resp = client.indices.put_mapping( + index="cooking_blog", + properties={ + "semantic_description": { + "type": "semantic_text" + } + }, +) +print(resp) +---- diff --git a/docs/examples/820f689eaaef15fc07abd1073fa880f8.asciidoc b/docs/examples/820f689eaaef15fc07abd1073fa880f8.asciidoc index 212392e24..e67211792 100644 --- a/docs/examples/820f689eaaef15fc07abd1073fa880f8.asciidoc +++ b/docs/examples/820f689eaaef15fc07abd1073fa880f8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/paginate-search-results.asciidoc:11 +// search/search-your-data/paginate-search-results.asciidoc:24 [source, python] ---- diff --git a/docs/examples/828f0045747fde4888a947bb99e190e3.asciidoc b/docs/examples/828f0045747fde4888a947bb99e190e3.asciidoc index b0c0af2a7..1a5385263 100644 --- a/docs/examples/828f0045747fde4888a947bb99e190e3.asciidoc +++ b/docs/examples/828f0045747fde4888a947bb99e190e3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:837 +// search/retriever.asciidoc:867 [source, python] ---- diff --git a/docs/examples/853fc710cea79fb4e1a85fb6d149f9c5.asciidoc b/docs/examples/853fc710cea79fb4e1a85fb6d149f9c5.asciidoc index 12888ed06..d12fb9c69 100644 --- a/docs/examples/853fc710cea79fb4e1a85fb6d149f9c5.asciidoc +++ b/docs/examples/853fc710cea79fb4e1a85fb6d149f9c5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:876 +// search/retriever.asciidoc:906 [source, python] ---- diff --git a/docs/examples/85479e02af00681210e17e3d0ff51e21.asciidoc b/docs/examples/85479e02af00681210e17e3d0ff51e21.asciidoc index 6b13cce81..4921850c3 100644 --- a/docs/examples/85479e02af00681210e17e3d0ff51e21.asciidoc +++ b/docs/examples/85479e02af00681210e17e3d0ff51e21.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/date.asciidoc:93 +// mapping/types/date.asciidoc:101 [source, python] ---- diff --git a/docs/examples/858fde15fb0a0340873b123043f8c3b4.asciidoc b/docs/examples/858fde15fb0a0340873b123043f8c3b4.asciidoc index 7e0f1de21..acb5c2d72 100644 --- a/docs/examples/858fde15fb0a0340873b123043f8c3b4.asciidoc +++ b/docs/examples/858fde15fb0a0340873b123043f8c3b4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/histogram.asciidoc:118 +// mapping/types/histogram.asciidoc:111 [source, python] ---- diff --git a/docs/examples/85f6667f148d16d075493fddf07e2932.asciidoc b/docs/examples/85f6667f148d16d075493fddf07e2932.asciidoc index b17580c51..5893e6504 100644 --- a/docs/examples/85f6667f148d16d075493fddf07e2932.asciidoc +++ b/docs/examples/85f6667f148d16d075493fddf07e2932.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/change-mappings-and-settings.asciidoc:616 +// data-streams/change-mappings-and-settings.asciidoc:620 [source, python] ---- diff --git a/docs/examples/85f9fc6f98e8573efed9b034e853d5ae.asciidoc b/docs/examples/85f9fc6f98e8573efed9b034e853d5ae.asciidoc index 107825f54..67aaf4ac1 100644 --- a/docs/examples/85f9fc6f98e8573efed9b034e853d5ae.asciidoc +++ b/docs/examples/85f9fc6f98e8573efed9b034e853d5ae.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-elasticsearch.asciidoc:289 +// inference/service-elasticsearch.asciidoc:284 [source, python] ---- diff --git a/docs/examples/8619bd17bbfe33490b1f277007f654db.asciidoc b/docs/examples/8619bd17bbfe33490b1f277007f654db.asciidoc index d48245664..785b6ac6a 100644 --- a/docs/examples/8619bd17bbfe33490b1f277007f654db.asciidoc +++ b/docs/examples/8619bd17bbfe33490b1f277007f654db.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-cohere.asciidoc:214 +// inference/service-cohere.asciidoc:216 [source, python] ---- diff --git a/docs/examples/8621c05cc7cf3880bde751f6670a0c3a.asciidoc b/docs/examples/8621c05cc7cf3880bde751f6670a0c3a.asciidoc deleted file mode 100644 index d4812b0ab..000000000 --- a/docs/examples/8621c05cc7cf3880bde751f6670a0c3a.asciidoc +++ /dev/null @@ -1,15 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// migration/migrate_9_0.asciidoc:439 - -[source, python] ----- -resp = client.indices.put_settings( - index=".reindexed-v9-ml-anomalies-custom-example", - settings={ - "index": { - "number_of_replicas": 0 - } - }, -) -print(resp) ----- diff --git a/docs/examples/863253bf0ab7d227ff72a0a384f4de8c.asciidoc b/docs/examples/863253bf0ab7d227ff72a0a384f4de8c.asciidoc index 685c48a97..31bbd9a29 100644 --- a/docs/examples/863253bf0ab7d227ff72a0a384f4de8c.asciidoc +++ b/docs/examples/863253bf0ab7d227ff72a0a384f4de8c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/change-mappings-and-settings.asciidoc:673 +// data-streams/change-mappings-and-settings.asciidoc:677 [source, python] ---- diff --git a/docs/examples/8634c9993485d622fb12d24f4f242264.asciidoc b/docs/examples/8634c9993485d622fb12d24f4f242264.asciidoc index d5a35b8a4..b0dcd030b 100644 --- a/docs/examples/8634c9993485d622fb12d24f4f242264.asciidoc +++ b/docs/examples/8634c9993485d622fb12d24f4f242264.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/downsampling-manual.asciidoc:433 +// data-streams/downsampling-manual.asciidoc:436 [source, python] ---- diff --git a/docs/examples/86f426ffa67416a50f2702f7131d35de.asciidoc b/docs/examples/86f426ffa67416a50f2702f7131d35de.asciidoc new file mode 100644 index 000000000..bc013b97c --- /dev/null +++ b/docs/examples/86f426ffa67416a50f2702f7131d35de.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// mapping/types/semantic-text.asciidoc:235 + +[source, python] +---- +resp = client.search( + index="test-index", + query={ + "match_all": {} + }, + highlight={ + "fields": { + "my_semantic_field": { + "number_of_fragments": 5 + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/87457bb3467484bec3e9df4e25942ba6.asciidoc b/docs/examples/87457bb3467484bec3e9df4e25942ba6.asciidoc index 02f191745..c000ba046 100644 --- a/docs/examples/87457bb3467484bec3e9df4e25942ba6.asciidoc +++ b/docs/examples/87457bb3467484bec3e9df4e25942ba6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/multivalued-fields.asciidoc:275 +// esql/multivalued-fields.asciidoc:287 [source, python] ---- diff --git a/docs/examples/8a0b5f759de3f27f0801c1176e616117.asciidoc b/docs/examples/8a0b5f759de3f27f0801c1176e616117.asciidoc index 4a66835cb..a8e4ef726 100644 --- a/docs/examples/8a0b5f759de3f27f0801c1176e616117.asciidoc +++ b/docs/examples/8a0b5f759de3f27f0801c1176e616117.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/semantic-search-semantic-text.asciidoc:36 +// search/search-your-data/semantic-search-semantic-text.asciidoc:34 [source, python] ---- diff --git a/docs/examples/8a1b6eae4893c5dd27b3d81fd8d70f5b.asciidoc b/docs/examples/8a1b6eae4893c5dd27b3d81fd8d70f5b.asciidoc index c46aba59b..8a6c13fbf 100644 --- a/docs/examples/8a1b6eae4893c5dd27b3d81fd8d70f5b.asciidoc +++ b/docs/examples/8a1b6eae4893c5dd27b3d81fd8d70f5b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// migration/migrate_9_0.asciidoc:467 +// search/search-your-data/semantic-search-elser.asciidoc:157 [source, python] ---- diff --git a/docs/examples/8b8b6aac2111b2d8b93758ac737e6543.asciidoc b/docs/examples/8b8b6aac2111b2d8b93758ac737e6543.asciidoc index 31bfc8362..05e97cf2a 100644 --- a/docs/examples/8b8b6aac2111b2d8b93758ac737e6543.asciidoc +++ b/docs/examples/8b8b6aac2111b2d8b93758ac737e6543.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/fields/synthetic-source.asciidoc:224 +// mapping/fields/synthetic-source.asciidoc:226 [source, python] ---- diff --git a/docs/examples/8c47c80139f40f25db44f5781ca2dfbe.asciidoc b/docs/examples/8c47c80139f40f25db44f5781ca2dfbe.asciidoc deleted file mode 100644 index 8503017e7..000000000 --- a/docs/examples/8c47c80139f40f25db44f5781ca2dfbe.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// migration/migrate_9_0.asciidoc:491 - -[source, python] ----- -resp = client.indices.get_alias( - index=".ml-anomalies-custom-example", -) -print(resp) ----- diff --git a/docs/examples/8cd00a3aba7c3c158277bc032aac2830.asciidoc b/docs/examples/8cd00a3aba7c3c158277bc032aac2830.asciidoc index 68ae5624a..f0cc6406b 100644 --- a/docs/examples/8cd00a3aba7c3c158277bc032aac2830.asciidoc +++ b/docs/examples/8cd00a3aba7c3c158277bc032aac2830.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/bulk.asciidoc:620 +// docs/bulk.asciidoc:616 [source, python] ---- diff --git a/docs/examples/8d05862be1f9e7edaba162b1888b5677.asciidoc b/docs/examples/8d05862be1f9e7edaba162b1888b5677.asciidoc index 0433809f5..eaebb31e2 100644 --- a/docs/examples/8d05862be1f9e7edaba162b1888b5677.asciidoc +++ b/docs/examples/8d05862be1f9e7edaba162b1888b5677.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// quickstart/full-text-filtering-tutorial.asciidoc:50 +// quickstart/esql-search-tutorial.asciidoc:77 [source, python] ---- diff --git a/docs/examples/8d9b04f2a97f4229dec9e620126de049.asciidoc b/docs/examples/8d9b04f2a97f4229dec9e620126de049.asciidoc index fdcc89bdb..6fa55b386 100644 --- a/docs/examples/8d9b04f2a97f4229dec9e620126de049.asciidoc +++ b/docs/examples/8d9b04f2a97f4229dec9e620126de049.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/repository-s3.asciidoc:609 +// snapshot-restore/repository-s3.asciidoc:575 [source, python] ---- diff --git a/docs/examples/8de6fed6ba2b94ce6a12ce076be2b4d7.asciidoc b/docs/examples/8de6fed6ba2b94ce6a12ce076be2b4d7.asciidoc index 13ad215ea..32ab5cc14 100644 --- a/docs/examples/8de6fed6ba2b94ce6a12ce076be2b4d7.asciidoc +++ b/docs/examples/8de6fed6ba2b94ce6a12ce076be2b4d7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/segments.asciidoc:132 +// cat/segments.asciidoc:133 [source, python] ---- diff --git a/docs/examples/8ecefdcf8f153cf91588e9fdde8f3e6b.asciidoc b/docs/examples/8ecefdcf8f153cf91588e9fdde8f3e6b.asciidoc index 1597f63d7..d854839f2 100644 --- a/docs/examples/8ecefdcf8f153cf91588e9fdde8f3e6b.asciidoc +++ b/docs/examples/8ecefdcf8f153cf91588e9fdde8f3e6b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/query-string-query.asciidoc:299 +// query-dsl/query-string-query.asciidoc:302 [source, python] ---- diff --git a/docs/examples/8ed31628081db2b6e9106d61d1e142be.asciidoc b/docs/examples/8ed31628081db2b6e9106d61d1e142be.asciidoc index 31c18d331..007c894a4 100644 --- a/docs/examples/8ed31628081db2b6e9106d61d1e142be.asciidoc +++ b/docs/examples/8ed31628081db2b6e9106d61d1e142be.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/simple-query-string-query.asciidoc:291 +// query-dsl/simple-query-string-query.asciidoc:294 [source, python] ---- diff --git a/docs/examples/e5f50b31f165462d883ecbff45f74985.asciidoc b/docs/examples/8f56ae0bf05093986e46a29fb0890a4f.asciidoc similarity index 85% rename from docs/examples/e5f50b31f165462d883ecbff45f74985.asciidoc rename to docs/examples/8f56ae0bf05093986e46a29fb0890a4f.asciidoc index 9d3eb5f2a..3767c3a15 100644 --- a/docs/examples/e5f50b31f165462d883ecbff45f74985.asciidoc +++ b/docs/examples/8f56ae0bf05093986e46a29fb0890a4f.asciidoc @@ -21,8 +21,7 @@ resp = client.indices.put_template( "type": "keyword" }, "created_at": { - "type": "date", - "format": "EEE MMM dd HH:mm:ss Z yyyy" + "type": "date" } } }, diff --git a/docs/examples/8f6f7ea5abf56152b4a5639ddf40848f.asciidoc b/docs/examples/8f6f7ea5abf56152b4a5639ddf40848f.asciidoc index 68d39126d..0e400e7cc 100644 --- a/docs/examples/8f6f7ea5abf56152b4a5639ddf40848f.asciidoc +++ b/docs/examples/8f6f7ea5abf56152b4a5639ddf40848f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// security/authentication/jwt-realm.asciidoc:471 +// security/authentication/jwt-realm.asciidoc:481 [source, python] ---- diff --git a/docs/examples/8fec06a98d0151c1d717a01491d0b8f0.asciidoc b/docs/examples/8fec06a98d0151c1d717a01491d0b8f0.asciidoc index 46d05770e..b0e25f0ad 100644 --- a/docs/examples/8fec06a98d0151c1d717a01491d0b8f0.asciidoc +++ b/docs/examples/8fec06a98d0151c1d717a01491d0b8f0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc:79 +// data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc:87 [source, python] ---- diff --git a/docs/examples/9169d19a80175ec94f80865d0f9bef4c.asciidoc b/docs/examples/9169d19a80175ec94f80865d0f9bef4c.asciidoc index 850788ba4..607fb674c 100644 --- a/docs/examples/9169d19a80175ec94f80865d0f9bef4c.asciidoc +++ b/docs/examples/9169d19a80175ec94f80865d0f9bef4c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:314 +// search/retriever.asciidoc:348 [source, python] ---- diff --git a/docs/examples/91750571c195718f0ff246e058e4bc63.asciidoc b/docs/examples/91750571c195718f0ff246e058e4bc63.asciidoc index 4b45f680e..a5de7b36b 100644 --- a/docs/examples/91750571c195718f0ff246e058e4bc63.asciidoc +++ b/docs/examples/91750571c195718f0ff246e058e4bc63.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/paginate-search-results.asciidoc:73 +// search/search-your-data/paginate-search-results.asciidoc:88 [source, python] ---- diff --git a/docs/examples/927b20a221f975b75d1227b67d0eb7e2.asciidoc b/docs/examples/927b20a221f975b75d1227b67d0eb7e2.asciidoc index 5ba6e11be..eaa000d01 100644 --- a/docs/examples/927b20a221f975b75d1227b67d0eb7e2.asciidoc +++ b/docs/examples/927b20a221f975b75d1227b67d0eb7e2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-rest.asciidoc:268 +// esql/esql-rest.asciidoc:271 [source, python] ---- diff --git a/docs/examples/9382f022086c692ba05efb0acae65946.asciidoc b/docs/examples/9382f022086c692ba05efb0acae65946.asciidoc index 02ad2f12e..70fee42d6 100644 --- a/docs/examples/9382f022086c692ba05efb0acae65946.asciidoc +++ b/docs/examples/9382f022086c692ba05efb0acae65946.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/fields/synthetic-source.asciidoc:63 +// mapping/fields/synthetic-source.asciidoc:65 [source, python] ---- diff --git a/docs/examples/944806221eb89f5af2298ccdf2902277.asciidoc b/docs/examples/944806221eb89f5af2298ccdf2902277.asciidoc index 896a4c10f..be677f846 100644 --- a/docs/examples/944806221eb89f5af2298ccdf2902277.asciidoc +++ b/docs/examples/944806221eb89f5af2298ccdf2902277.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/rollup-caps.asciidoc:171 +// rollup/apis/rollup-caps.asciidoc:169 [source, python] ---- diff --git a/docs/examples/948418e0ef1b7e7cfee2f11be715d7d2.asciidoc b/docs/examples/948418e0ef1b7e7cfee2f11be715d7d2.asciidoc index 700fe6f3a..48447243b 100644 --- a/docs/examples/948418e0ef1b7e7cfee2f11be715d7d2.asciidoc +++ b/docs/examples/948418e0ef1b7e7cfee2f11be715d7d2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-examples.asciidoc:715 +// search/search-your-data/retrievers-examples.asciidoc:986 [source, python] ---- diff --git a/docs/examples/9501e6c8e95c21838653ea15b9b7ed5f.asciidoc b/docs/examples/9501e6c8e95c21838653ea15b9b7ed5f.asciidoc index 1971c9b0d..0613ecbc0 100644 --- a/docs/examples/9501e6c8e95c21838653ea15b9b7ed5f.asciidoc +++ b/docs/examples/9501e6c8e95c21838653ea15b9b7ed5f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/percolate-query.asciidoc:791 +// query-dsl/percolate-query.asciidoc:789 [source, python] ---- diff --git a/docs/examples/95414139c7b1203e3c2d99a354415801.asciidoc b/docs/examples/95414139c7b1203e3c2d99a354415801.asciidoc index 773758b44..8cee7bc6a 100644 --- a/docs/examples/95414139c7b1203e3c2d99a354415801.asciidoc +++ b/docs/examples/95414139c7b1203e3c2d99a354415801.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/recovery.asciidoc:89 +// cat/recovery.asciidoc:185 [source, python] ---- diff --git a/docs/examples/957d2e6ddbb9a9b16549c5e67b93b41b.asciidoc b/docs/examples/957d2e6ddbb9a9b16549c5e67b93b41b.asciidoc index 4b88d0aa8..8b3478cfe 100644 --- a/docs/examples/957d2e6ddbb9a9b16549c5e67b93b41b.asciidoc +++ b/docs/examples/957d2e6ddbb9a9b16549c5e67b93b41b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/query-string-query.asciidoc:267 +// query-dsl/query-string-query.asciidoc:270 [source, python] ---- diff --git a/docs/examples/968fb5b92aa65af09544f7c002b0953e.asciidoc b/docs/examples/968fb5b92aa65af09544f7c002b0953e.asciidoc index 3746bf530..5b835081f 100644 --- a/docs/examples/968fb5b92aa65af09544f7c002b0953e.asciidoc +++ b/docs/examples/968fb5b92aa65af09544f7c002b0953e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/semantic-search-semantic-text.asciidoc:144 +// search/search-your-data/semantic-search-semantic-text.asciidoc:141 [source, python] ---- diff --git a/docs/examples/96ea0e80323d6d2d99964625c004a44d.asciidoc b/docs/examples/96ea0e80323d6d2d99964625c004a44d.asciidoc index 4f91cd731..ed5869398 100644 --- a/docs/examples/96ea0e80323d6d2d99964625c004a44d.asciidoc +++ b/docs/examples/96ea0e80323d6d2d99964625c004a44d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc:394 +// data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc:415 [source, python] ---- diff --git a/docs/examples/971fd23adb81bb5842c7750e0379336a.asciidoc b/docs/examples/971fd23adb81bb5842c7750e0379336a.asciidoc index a7eb8de57..e9641e6a1 100644 --- a/docs/examples/971fd23adb81bb5842c7750e0379336a.asciidoc +++ b/docs/examples/971fd23adb81bb5842c7750e0379336a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:764 +// search/retriever.asciidoc:794 [source, python] ---- diff --git a/docs/examples/97c6c07f46f4177f0565a04bc50924a3.asciidoc b/docs/examples/97c6c07f46f4177f0565a04bc50924a3.asciidoc index 35f2440da..723e4a9b4 100644 --- a/docs/examples/97c6c07f46f4177f0565a04bc50924a3.asciidoc +++ b/docs/examples/97c6c07f46f4177f0565a04bc50924a3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-examples.asciidoc:113 +// search/search-your-data/retrievers-examples.asciidoc:118 [source, python] ---- diff --git a/docs/examples/986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc b/docs/examples/986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc index 3e5ded035..c2d806deb 100644 --- a/docs/examples/986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc +++ b/docs/examples/986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-alibabacloud-ai-search.asciidoc:228 +// inference/service-alibabacloud-ai-search.asciidoc:231 [source, python] ---- diff --git a/docs/examples/9a203aae3e1412d919546276fb52a5ca.asciidoc b/docs/examples/9a203aae3e1412d919546276fb52a5ca.asciidoc index 3d9cb9247..311f671bc 100644 --- a/docs/examples/9a203aae3e1412d919546276fb52a5ca.asciidoc +++ b/docs/examples/9a203aae3e1412d919546276fb52a5ca.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-cohere.asciidoc:196 +// inference/service-cohere.asciidoc:198 [source, python] ---- diff --git a/docs/examples/9ab351893dae65ec97fd8cb6832950fb.asciidoc b/docs/examples/9ab351893dae65ec97fd8cb6832950fb.asciidoc index f3393b1a2..3474cad83 100644 --- a/docs/examples/9ab351893dae65ec97fd8cb6832950fb.asciidoc +++ b/docs/examples/9ab351893dae65ec97fd8cb6832950fb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/knn-search.asciidoc:1288 +// search/search-your-data/knn-search.asciidoc:1286 [source, python] ---- diff --git a/docs/examples/2c27a8eb6528126f37a843d434cd88b6.asciidoc b/docs/examples/9be8f52aab94b6bc8b8603234551475d.asciidoc similarity index 86% rename from docs/examples/2c27a8eb6528126f37a843d434cd88b6.asciidoc rename to docs/examples/9be8f52aab94b6bc8b8603234551475d.asciidoc index 5df64d938..27f9c3fbc 100644 --- a/docs/examples/2c27a8eb6528126f37a843d434cd88b6.asciidoc +++ b/docs/examples/9be8f52aab94b6bc8b8603234551475d.asciidoc @@ -9,7 +9,7 @@ resp = client.indices.analyze( { "type": "synonym_graph", "synonyms": [ - "dns, domain name system" + "internet phonebook, domain name system" ] } ], diff --git a/docs/examples/9c2ce0132e4527077443f007d27b1158.asciidoc b/docs/examples/9c2ce0132e4527077443f007d27b1158.asciidoc index 64eb514f0..2af70867a 100644 --- a/docs/examples/9c2ce0132e4527077443f007d27b1158.asciidoc +++ b/docs/examples/9c2ce0132e4527077443f007d27b1158.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/flattened.asciidoc:422 +// mapping/types/flattened.asciidoc:415 [source, python] ---- diff --git a/docs/examples/9d66cb59711f24e6b4ff85608c9b5a1b.asciidoc b/docs/examples/9d66cb59711f24e6b4ff85608c9b5a1b.asciidoc index 46b5bc2ff..dfe0eb762 100644 --- a/docs/examples/9d66cb59711f24e6b4ff85608c9b5a1b.asciidoc +++ b/docs/examples/9d66cb59711f24e6b4ff85608c9b5a1b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/task-queue-backlog.asciidoc:73 +// troubleshooting/common-issues/task-queue-backlog.asciidoc:51 [source, python] ---- diff --git a/docs/examples/9de4edafd22a8b9cb557632b2c8779cd.asciidoc b/docs/examples/9de4edafd22a8b9cb557632b2c8779cd.asciidoc index 435e9f608..855f95589 100644 --- a/docs/examples/9de4edafd22a8b9cb557632b2c8779cd.asciidoc +++ b/docs/examples/9de4edafd22a8b9cb557632b2c8779cd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-rest.asciidoc:309 +// esql/esql-rest.asciidoc:312 [source, python] ---- diff --git a/docs/examples/9f03a8340761e729e8bdc8c7b6f66d98.asciidoc b/docs/examples/9f03a8340761e729e8bdc8c7b6f66d98.asciidoc new file mode 100644 index 000000000..79ecaa066 --- /dev/null +++ b/docs/examples/9f03a8340761e729e8bdc8c7b6f66d98.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// mapping/types/semantic-text.asciidoc:163 + +[source, python] +---- +resp = client.indices.create( + index="test-index", + mappings={ + "properties": { + "my_semantic_field": { + "type": "semantic_text", + "chunking_settings": { + "strategy": "none" + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/9feff356f302ea4915347ab71cc4887a.asciidoc b/docs/examples/9feff356f302ea4915347ab71cc4887a.asciidoc index d9508e5ae..a8b12ac29 100644 --- a/docs/examples/9feff356f302ea4915347ab71cc4887a.asciidoc +++ b/docs/examples/9feff356f302ea4915347ab71cc4887a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/simulate-template.asciidoc:241 +// indices/simulate-template.asciidoc:240 [source, python] ---- diff --git a/docs/examples/a00311843b5f8f3e9f7d511334a828b1.asciidoc b/docs/examples/a00311843b5f8f3e9f7d511334a828b1.asciidoc index 9d5f860ed..5802d6251 100644 --- a/docs/examples/a00311843b5f8f3e9f7d511334a828b1.asciidoc +++ b/docs/examples/a00311843b5f8f3e9f7d511334a828b1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/rollup-caps.asciidoc:98 +// rollup/apis/rollup-caps.asciidoc:97 [source, python] ---- diff --git a/docs/examples/a1070cf2f5969d42d71cda057223f152.asciidoc b/docs/examples/a1070cf2f5969d42d71cda057223f152.asciidoc index 2de683ba9..5c00da449 100644 --- a/docs/examples/a1070cf2f5969d42d71cda057223f152.asciidoc +++ b/docs/examples/a1070cf2f5969d42d71cda057223f152.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// how-to/size-your-shards.asciidoc:248 +// how-to/size-your-shards.asciidoc:247 [source, python] ---- diff --git a/docs/examples/a1b668795243398f5bc40bcc9bead884.asciidoc b/docs/examples/a1b668795243398f5bc40bcc9bead884.asciidoc index 56d50d1b9..8b80052b2 100644 --- a/docs/examples/a1b668795243398f5bc40bcc9bead884.asciidoc +++ b/docs/examples/a1b668795243398f5bc40bcc9bead884.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/range.asciidoc:254 +// mapping/types/range.asciidoc:247 [source, python] ---- diff --git a/docs/examples/a1ccd51eef37e43c935a047b0ee15daa.asciidoc b/docs/examples/a1ccd51eef37e43c935a047b0ee15daa.asciidoc index d2ad185f9..00ce45ab7 100644 --- a/docs/examples/a1ccd51eef37e43c935a047b0ee15daa.asciidoc +++ b/docs/examples/a1ccd51eef37e43c935a047b0ee15daa.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/downsampling-manual.asciidoc:401 +// data-streams/downsampling-manual.asciidoc:404 [source, python] ---- diff --git a/docs/examples/a1dda7e7c01be96a4acf7b725d70385f.asciidoc b/docs/examples/a1dda7e7c01be96a4acf7b725d70385f.asciidoc index 304ea62c6..d8d5a4831 100644 --- a/docs/examples/a1dda7e7c01be96a4acf7b725d70385f.asciidoc +++ b/docs/examples/a1dda7e7c01be96a4acf7b725d70385f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:684 +// search/retriever.asciidoc:714 [source, python] ---- diff --git a/docs/examples/a1f70bc71b763b58206814c40a7440e7.asciidoc b/docs/examples/a1f70bc71b763b58206814c40a7440e7.asciidoc index 103eacdfc..03e109bd9 100644 --- a/docs/examples/a1f70bc71b763b58206814c40a7440e7.asciidoc +++ b/docs/examples/a1f70bc71b763b58206814c40a7440e7.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/update-settings.asciidoc:47 +// rest-api/watcher/update-settings.asciidoc:54 [source, python] ---- diff --git a/docs/examples/a2566a2ed14ecc504ca27f85771c2638.asciidoc b/docs/examples/a2566a2ed14ecc504ca27f85771c2638.asciidoc new file mode 100644 index 000000000..d8a541544 --- /dev/null +++ b/docs/examples/a2566a2ed14ecc504ca27f85771c2638.asciidoc @@ -0,0 +1,29 @@ +// This file is autogenerated, DO NOT EDIT +// esql/esql-lookup-join.asciidoc:122 + +[source, python] +---- +resp = client.indices.create( + index="firewall_logs", + mappings={ + "properties": { + "timestamp": { + "type": "date" + }, + "source.ip": { + "type": "ip" + }, + "destination.ip": { + "type": "ip" + }, + "action": { + "type": "keyword" + }, + "bytes_transferred": { + "type": "long" + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc b/docs/examples/a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc index a1a4feb8b..26d1c1ce8 100644 --- a/docs/examples/a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc +++ b/docs/examples/a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:159 +// search/retriever.asciidoc:183 [source, python] ---- diff --git a/docs/examples/a3779f21f132787c48681bfb50453592.asciidoc b/docs/examples/a3779f21f132787c48681bfb50453592.asciidoc index 6b0436b24..9e06dd98d 100644 --- a/docs/examples/a3779f21f132787c48681bfb50453592.asciidoc +++ b/docs/examples/a3779f21f132787c48681bfb50453592.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/processors/ip-location.asciidoc:85 +// ingest/processors/ip-location.asciidoc:100 [source, python] ---- diff --git a/docs/examples/a46f566ca031375658c22f89b87dc6d2.asciidoc b/docs/examples/a46f566ca031375658c22f89b87dc6d2.asciidoc deleted file mode 100644 index b046f9db2..000000000 --- a/docs/examples/a46f566ca031375658c22f89b87dc6d2.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// migration/migrate_9_0.asciidoc:379 - -[source, python] ----- -resp = client.cat.indices( - index=".ml-anomalies-custom-example", - v=True, - h="index,store.size", -) -print(resp) ----- diff --git a/docs/examples/a5aeb2c8bdf91f6146026ec8edc476b6.asciidoc b/docs/examples/a5aeb2c8bdf91f6146026ec8edc476b6.asciidoc index 48329183c..62a6b9162 100644 --- a/docs/examples/a5aeb2c8bdf91f6146026ec8edc476b6.asciidoc +++ b/docs/examples/a5aeb2c8bdf91f6146026ec8edc476b6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/date_nanos.asciidoc:155 +// mapping/types/date_nanos.asciidoc:148 [source, python] ---- diff --git a/docs/examples/a5e6ad9e65615f6f92ae6a19674dd742.asciidoc b/docs/examples/a5e6ad9e65615f6f92ae6a19674dd742.asciidoc index 35d77326c..9779c9745 100644 --- a/docs/examples/a5e6ad9e65615f6f92ae6a19674dd742.asciidoc +++ b/docs/examples/a5e6ad9e65615f6f92ae6a19674dd742.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/percolate-query.asciidoc:595 +// query-dsl/percolate-query.asciidoc:593 [source, python] ---- diff --git a/docs/examples/a5f9eb40087921e67d820775acf71522.asciidoc b/docs/examples/a5f9eb40087921e67d820775acf71522.asciidoc index c85c3543d..43e78f484 100644 --- a/docs/examples/a5f9eb40087921e67d820775acf71522.asciidoc +++ b/docs/examples/a5f9eb40087921e67d820775acf71522.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:218 +// indices/put-mapping.asciidoc:224 [source, python] ---- diff --git a/docs/examples/a692b4c0ca7825c467880b346841f5a5.asciidoc b/docs/examples/a692b4c0ca7825c467880b346841f5a5.asciidoc index 4069ff6d3..91cfd9925 100644 --- a/docs/examples/a692b4c0ca7825c467880b346841f5a5.asciidoc +++ b/docs/examples/a692b4c0ca7825c467880b346841f5a5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:162 +// indices/put-mapping.asciidoc:168 [source, python] ---- diff --git a/docs/examples/a6ccac9f80c5e5efdaab992f3a32d919.asciidoc b/docs/examples/a6ccac9f80c5e5efdaab992f3a32d919.asciidoc index d1b0285d8..e87bec20f 100644 --- a/docs/examples/a6ccac9f80c5e5efdaab992f3a32d919.asciidoc +++ b/docs/examples/a6ccac9f80c5e5efdaab992f3a32d919.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc:407 +// data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc:428 [source, python] ---- diff --git a/docs/examples/a72613de3774571ba24def4b495161b5.asciidoc b/docs/examples/a72613de3774571ba24def4b495161b5.asciidoc index 790292f7e..11a947b90 100644 --- a/docs/examples/a72613de3774571ba24def4b495161b5.asciidoc +++ b/docs/examples/a72613de3774571ba24def4b495161b5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:428 +// indices/put-mapping.asciidoc:434 [source, python] ---- diff --git a/docs/examples/a7471238a42c7cd0f8a7ac9ea14dfbcb.asciidoc b/docs/examples/a7471238a42c7cd0f8a7ac9ea14dfbcb.asciidoc new file mode 100644 index 000000000..f598509a4 --- /dev/null +++ b/docs/examples/a7471238a42c7cd0f8a7ac9ea14dfbcb.asciidoc @@ -0,0 +1,62 @@ +// This file is autogenerated, DO NOT EDIT +// esql/esql-lookup-join.asciidoc:151 + +[source, python] +---- +resp = client.bulk( + index="firewall_logs", + operations=[ + { + "index": {} + }, + { + "timestamp": "2025-04-23T10:00:01Z", + "source.ip": "192.0.2.1", + "destination.ip": "10.0.0.100", + "action": "allow", + "bytes_transferred": 1024 + }, + { + "index": {} + }, + { + "timestamp": "2025-04-23T10:00:05Z", + "source.ip": "203.0.113.5", + "destination.ip": "10.0.0.55", + "action": "allow", + "bytes_transferred": 2048 + }, + { + "index": {} + }, + { + "timestamp": "2025-04-23T10:00:08Z", + "source.ip": "198.51.100.2", + "destination.ip": "10.0.0.200", + "action": "block", + "bytes_transferred": 0 + }, + { + "index": {} + }, + { + "timestamp": "2025-04-23T10:00:15Z", + "source.ip": "203.0.113.5", + "destination.ip": "10.0.0.44", + "action": "allow", + "bytes_transferred": 4096 + }, + { + "index": {} + }, + { + "timestamp": "2025-04-23T10:00:30Z", + "source.ip": "192.0.2.1", + "destination.ip": "10.0.0.100", + "action": "allow", + "bytes_transferred": 512 + } + ], +) +print(resp) +---- diff --git a/docs/examples/a7d814caf2a995d2aeadecc3495011be.asciidoc b/docs/examples/a7d814caf2a995d2aeadecc3495011be.asciidoc index 5801198e8..f308b760b 100644 --- a/docs/examples/a7d814caf2a995d2aeadecc3495011be.asciidoc +++ b/docs/examples/a7d814caf2a995d2aeadecc3495011be.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/boolean.asciidoc:248 +// mapping/types/boolean.asciidoc:241 [source, python] ---- diff --git a/docs/examples/a8dff54362184b2732b9bd248cf6df8a.asciidoc b/docs/examples/a8dff54362184b2732b9bd248cf6df8a.asciidoc index 2881a6771..fb1449003 100644 --- a/docs/examples/a8dff54362184b2732b9bd248cf6df8a.asciidoc +++ b/docs/examples/a8dff54362184b2732b9bd248cf6df8a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/range.asciidoc:418 +// mapping/types/range.asciidoc:411 [source, python] ---- diff --git a/docs/examples/a960b43e720b4934edb74ab4b085ca77.asciidoc b/docs/examples/a960b43e720b4934edb74ab4b085ca77.asciidoc index b48cffe85..b1949984a 100644 --- a/docs/examples/a960b43e720b4934edb74ab4b085ca77.asciidoc +++ b/docs/examples/a960b43e720b4934edb74ab4b085ca77.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/list-connectors-api.asciidoc:88 +// connector/apis/list-connectors-api.asciidoc:85 [source, python] ---- diff --git a/docs/examples/a999b5661bebb802bbbfe04faacf1971.asciidoc b/docs/examples/a999b5661bebb802bbbfe04faacf1971.asciidoc index 77df3aaa7..b8c02fe91 100644 --- a/docs/examples/a999b5661bebb802bbbfe04faacf1971.asciidoc +++ b/docs/examples/a999b5661bebb802bbbfe04faacf1971.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// how-to/size-your-shards.asciidoc:511 +// how-to/size-your-shards.asciidoc:510 [source, python] ---- diff --git a/docs/examples/a9f14efc26fdd3c37a71f06c310163d9.asciidoc b/docs/examples/a9f14efc26fdd3c37a71f06c310163d9.asciidoc index 79fb403e2..68d4a5be9 100644 --- a/docs/examples/a9f14efc26fdd3c37a71f06c310163d9.asciidoc +++ b/docs/examples/a9f14efc26fdd3c37a71f06c310163d9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/retriever.asciidoc:650 +// search/retriever.asciidoc:680 [source, python] ---- diff --git a/docs/examples/aa676d54a59dee87ecd28bcc1edce59b.asciidoc b/docs/examples/aa676d54a59dee87ecd28bcc1edce59b.asciidoc index d6bbbd75a..9a8dc2088 100644 --- a/docs/examples/aa676d54a59dee87ecd28bcc1edce59b.asciidoc +++ b/docs/examples/aa676d54a59dee87ecd28bcc1edce59b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// inference/service-alibabacloud-ai-search.asciidoc:192 +// inference/service-alibabacloud-ai-search.asciidoc:195 [source, python] ---- diff --git a/docs/examples/aa814309ad5f1630886ba75255b444f5.asciidoc b/docs/examples/aa814309ad5f1630886ba75255b444f5.asciidoc index 9ced73fdc..d542f9477 100644 --- a/docs/examples/aa814309ad5f1630886ba75255b444f5.asciidoc +++ b/docs/examples/aa814309ad5f1630886ba75255b444f5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/task-queue-backlog.asciidoc:104 +// troubleshooting/common-issues/task-queue-backlog.asciidoc:80 [source, python] ---- diff --git a/docs/examples/aad7d80990a6a3c391ff555ce09ae9dc.asciidoc b/docs/examples/aad7d80990a6a3c391ff555ce09ae9dc.asciidoc index adf825bb3..db094b13f 100644 --- a/docs/examples/aad7d80990a6a3c391ff555ce09ae9dc.asciidoc +++ b/docs/examples/aad7d80990a6a3c391ff555ce09ae9dc.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/numeric.asciidoc:295 +// mapping/types/numeric.asciidoc:288 [source, python] ---- diff --git a/docs/examples/ac22cc2b0f4ad659055feed2852a2d59.asciidoc b/docs/examples/ac22cc2b0f4ad659055feed2852a2d59.asciidoc index b722f9c00..2fd88d8db 100644 --- a/docs/examples/ac22cc2b0f4ad659055feed2852a2d59.asciidoc +++ b/docs/examples/ac22cc2b0f4ad659055feed2852a2d59.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-examples.asciidoc:1485 +// search/search-your-data/retrievers-examples.asciidoc:1776 [source, python] ---- diff --git a/docs/examples/ac497917ef707538198a8458ae3d5c6b.asciidoc b/docs/examples/ac497917ef707538198a8458ae3d5c6b.asciidoc index d0c229186..ee4ab8bd9 100644 --- a/docs/examples/ac497917ef707538198a8458ae3d5c6b.asciidoc +++ b/docs/examples/ac497917ef707538198a8458ae3d5c6b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/match-query.asciidoc:165 +// query-dsl/match-query.asciidoc:169 [source, python] ---- diff --git a/docs/examples/ac5b91aa75696f9880451c9439fd9eec.asciidoc b/docs/examples/ac5b91aa75696f9880451c9439fd9eec.asciidoc index 84f62b3bf..128528250 100644 --- a/docs/examples/ac5b91aa75696f9880451c9439fd9eec.asciidoc +++ b/docs/examples/ac5b91aa75696f9880451c9439fd9eec.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/range.asciidoc:461 +// mapping/types/range.asciidoc:454 [source, python] ---- diff --git a/docs/examples/acc6cd860032167e34fa5e0c043ab3b0.asciidoc b/docs/examples/acc6cd860032167e34fa5e0c043ab3b0.asciidoc index ecfc1bbc9..83bd847a4 100644 --- a/docs/examples/acc6cd860032167e34fa5e0c043ab3b0.asciidoc +++ b/docs/examples/acc6cd860032167e34fa5e0c043ab3b0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/query-string-query.asciidoc:335 +// query-dsl/query-string-query.asciidoc:338 [source, python] ---- diff --git a/docs/examples/4de4bb55bbc0a76c75d256f245a3ee3f.asciidoc b/docs/examples/ad0204602a9ebdf748b06ce9ade218e4.asciidoc similarity index 50% rename from docs/examples/4de4bb55bbc0a76c75d256f245a3ee3f.asciidoc rename to docs/examples/ad0204602a9ebdf748b06ce9ade218e4.asciidoc index 11ed7d7dd..9ff7e3d80 100644 --- a/docs/examples/4de4bb55bbc0a76c75d256f245a3ee3f.asciidoc +++ b/docs/examples/ad0204602a9ebdf748b06ce9ade218e4.asciidoc @@ -1,15 +1,15 @@ // This file is autogenerated, DO NOT EDIT -// inference/elastic-infer-service.asciidoc:100 +// inference/service-voyageai.asciidoc:169 [source, python] ---- resp = client.inference.put( - task_type="sparse_embedding", - inference_id="elser-model-eis", + task_type="rerank", + inference_id="voyageai-rerank", inference_config={ - "service": "elastic", + "service": "voyageai", "service_settings": { - "model_name": "elser" + "model_id": "rerank-2" } }, ) diff --git a/docs/examples/ad2416ca0581316cee6c63129685bca5.asciidoc b/docs/examples/ad2416ca0581316cee6c63129685bca5.asciidoc index c7ce73d94..3d857fa1a 100644 --- a/docs/examples/ad2416ca0581316cee6c63129685bca5.asciidoc +++ b/docs/examples/ad2416ca0581316cee6c63129685bca5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/query-string-query.asciidoc:498 +// query-dsl/query-string-query.asciidoc:501 [source, python] ---- diff --git a/docs/examples/add240aa149d8b11139947502b279ee0.asciidoc b/docs/examples/add240aa149d8b11139947502b279ee0.asciidoc index eee73448f..3095f9ee1 100644 --- a/docs/examples/add240aa149d8b11139947502b279ee0.asciidoc +++ b/docs/examples/add240aa149d8b11139947502b279ee0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/paginate-search-results.asciidoc:403 +// search/search-your-data/paginate-search-results.asciidoc:418 [source, python] ---- diff --git a/docs/examples/ae4b57e167b81ffed537a8e6eaf7f855.asciidoc b/docs/examples/ae4b57e167b81ffed537a8e6eaf7f855.asciidoc new file mode 100644 index 000000000..f131f8dfa --- /dev/null +++ b/docs/examples/ae4b57e167b81ffed537a8e6eaf7f855.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// search/retriever.asciidoc:1040 + +[source, python] +---- +resp = client.search( + index="books", + retriever={ + "linear": { + "query": "elasticsearch", + "fields": [ + "title", + "description", + "title_semantic", + "description_semantic" + ], + "normalizer": "minmax" + } + }, +) +print(resp) +---- diff --git a/docs/examples/ae9ccfaa146731ab9176df90670db1c2.asciidoc b/docs/examples/ae9ccfaa146731ab9176df90670db1c2.asciidoc index 0e01584d8..b88af670d 100644 --- a/docs/examples/ae9ccfaa146731ab9176df90670db1c2.asciidoc +++ b/docs/examples/ae9ccfaa146731ab9176df90670db1c2.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/bulk.asciidoc:509 +// docs/bulk.asciidoc:505 [source, python] ---- diff --git a/docs/examples/af607715d0693587dd12962266359a96.asciidoc b/docs/examples/af607715d0693587dd12962266359a96.asciidoc index afd7cc471..2b5709f00 100644 --- a/docs/examples/af607715d0693587dd12962266359a96.asciidoc +++ b/docs/examples/af607715d0693587dd12962266359a96.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// snapshot-restore/repository-s3.asciidoc:232 +// snapshot-restore/repository-s3.asciidoc:216 [source, python] ---- diff --git a/docs/examples/afa11ebb493ebbfd77acbbe50d2ce6db.asciidoc b/docs/examples/afa11ebb493ebbfd77acbbe50d2ce6db.asciidoc index 591e3db08..fcead7c24 100644 --- a/docs/examples/afa11ebb493ebbfd77acbbe50d2ce6db.asciidoc +++ b/docs/examples/afa11ebb493ebbfd77acbbe50d2ce6db.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/downsampling-manual.asciidoc:591 +// data-streams/downsampling-manual.asciidoc:594 [source, python] ---- diff --git a/docs/examples/afef5cac988592b97ae289ab39c2f437.asciidoc b/docs/examples/afef5cac988592b97ae289ab39c2f437.asciidoc index 681761170..4ed1299b1 100644 --- a/docs/examples/afef5cac988592b97ae289ab39c2f437.asciidoc +++ b/docs/examples/afef5cac988592b97ae289ab39c2f437.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/text.asciidoc:307 +// mapping/types/text.asciidoc:313 [source, python] ---- diff --git a/docs/examples/b0ee6f19875fe5bad8aab02d60e3532c.asciidoc b/docs/examples/b0ee6f19875fe5bad8aab02d60e3532c.asciidoc index 610238476..df5a640e1 100644 --- a/docs/examples/b0ee6f19875fe5bad8aab02d60e3532c.asciidoc +++ b/docs/examples/b0ee6f19875fe5bad8aab02d60e3532c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/processors/geoip.asciidoc:85 +// ingest/processors/geoip.asciidoc:86 [source, python] ---- diff --git a/docs/examples/b11a0675e49df0709be693297ca73a2c.asciidoc b/docs/examples/b11a0675e49df0709be693297ca73a2c.asciidoc index d86bed6c9..2cee25e86 100644 --- a/docs/examples/b11a0675e49df0709be693297ca73a2c.asciidoc +++ b/docs/examples/b11a0675e49df0709be693297ca73a2c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/info.asciidoc:199 +// rest-api/info.asciidoc:203 [source, python] ---- diff --git a/docs/examples/b3cd07f02059165fd62a2f148be3dc58.asciidoc b/docs/examples/b3cd07f02059165fd62a2f148be3dc58.asciidoc index be7f57a49..8d200d0d9 100644 --- a/docs/examples/b3cd07f02059165fd62a2f148be3dc58.asciidoc +++ b/docs/examples/b3cd07f02059165fd62a2f148be3dc58.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/numeric.asciidoc:259 +// mapping/types/numeric.asciidoc:252 [source, python] ---- diff --git a/docs/examples/b557f114e21dbc6f531d4e7621a08e8f.asciidoc b/docs/examples/b557f114e21dbc6f531d4e7621a08e8f.asciidoc index 0b87c58c8..67550d6d1 100644 --- a/docs/examples/b557f114e21dbc6f531d4e7621a08e8f.asciidoc +++ b/docs/examples/b557f114e21dbc6f531d4e7621a08e8f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/fields/source-field.asciidoc:80 +// mapping/fields/source-field.asciidoc:83 [source, python] ---- diff --git a/docs/examples/b583bf8d3a2f49d633aa2cfed5606418.asciidoc b/docs/examples/b583bf8d3a2f49d633aa2cfed5606418.asciidoc index 797108412..fc4cf9551 100644 --- a/docs/examples/b583bf8d3a2f49d633aa2cfed5606418.asciidoc +++ b/docs/examples/b583bf8d3a2f49d633aa2cfed5606418.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-component-template.asciidoc:196 +// indices/put-component-template.asciidoc:194 [source, python] ---- diff --git a/docs/examples/b5bc1bb7278f2f95bc54790c78c928e0.asciidoc b/docs/examples/b5bc1bb7278f2f95bc54790c78c928e0.asciidoc index 0e8d41aca..30ad58e2e 100644 --- a/docs/examples/b5bc1bb7278f2f95bc54790c78c928e0.asciidoc +++ b/docs/examples/b5bc1bb7278f2f95bc54790c78c928e0.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/get-job.asciidoc:170 +// rollup/apis/get-job.asciidoc:169 [source, python] ---- diff --git a/docs/examples/b68ed7037042719945a2452d23e64c78.asciidoc b/docs/examples/b68ed7037042719945a2452d23e64c78.asciidoc index ff3787f68..222a5fa01 100644 --- a/docs/examples/b68ed7037042719945a2452d23e64c78.asciidoc +++ b/docs/examples/b68ed7037042719945a2452d23e64c78.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/percolate-query.asciidoc:343 +// query-dsl/percolate-query.asciidoc:341 [source, python] ---- diff --git a/docs/examples/b7c99eb38d4b37e22de1ffcb0e88ae4c.asciidoc b/docs/examples/b7c99eb38d4b37e22de1ffcb0e88ae4c.asciidoc index 878f2ab0a..df4925624 100644 --- a/docs/examples/b7c99eb38d4b37e22de1ffcb0e88ae4c.asciidoc +++ b/docs/examples/b7c99eb38d4b37e22de1ffcb0e88ae4c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/percolate-query.asciidoc:279 +// query-dsl/percolate-query.asciidoc:277 [source, python] ---- diff --git a/docs/examples/b87438263ccd68624b1d69d8750f9432.asciidoc b/docs/examples/b87438263ccd68624b1d69d8750f9432.asciidoc index 94db3486a..8e087b9ad 100644 --- a/docs/examples/b87438263ccd68624b1d69d8750f9432.asciidoc +++ b/docs/examples/b87438263ccd68624b1d69d8750f9432.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/params/doc-values.asciidoc:37 +// mapping/params/doc-values.asciidoc:39 [source, python] ---- diff --git a/docs/examples/b9370fa1aa18fe4bc00cf81ef0c0d45b.asciidoc b/docs/examples/b9370fa1aa18fe4bc00cf81ef0c0d45b.asciidoc index ce99d9c1b..022050b51 100644 --- a/docs/examples/b9370fa1aa18fe4bc00cf81ef0c0d45b.asciidoc +++ b/docs/examples/b9370fa1aa18fe4bc00cf81ef0c0d45b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/query-string-query.asciidoc:318 +// query-dsl/query-string-query.asciidoc:321 [source, python] ---- diff --git a/docs/examples/b94cee0f74f57742b3948f9b784dfdd4.asciidoc b/docs/examples/b94cee0f74f57742b3948f9b784dfdd4.asciidoc index fca964943..6234e4d54 100644 --- a/docs/examples/b94cee0f74f57742b3948f9b784dfdd4.asciidoc +++ b/docs/examples/b94cee0f74f57742b3948f9b784dfdd4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/paginate-search-results.asciidoc:537 +// search/search-your-data/paginate-search-results.asciidoc:552 [source, python] ---- diff --git a/docs/examples/ba0e7e0b18fc9ec6c623d40186d1f61b.asciidoc b/docs/examples/ba0e7e0b18fc9ec6c623d40186d1f61b.asciidoc index c25ea798e..906026b4b 100644 --- a/docs/examples/ba0e7e0b18fc9ec6c623d40186d1f61b.asciidoc +++ b/docs/examples/ba0e7e0b18fc9ec6c623d40186d1f61b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/resolve-cluster.asciidoc:271 +// indices/resolve-cluster.asciidoc:275 [source, python] ---- diff --git a/docs/examples/ba650046f9063f6c43d76f47e0f94403.asciidoc b/docs/examples/ba650046f9063f6c43d76f47e0f94403.asciidoc index 9b94f645f..567a7d3f2 100644 --- a/docs/examples/ba650046f9063f6c43d76f47e0f94403.asciidoc +++ b/docs/examples/ba650046f9063f6c43d76f47e0f94403.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/date.asciidoc:244 +// mapping/types/date.asciidoc:245 [source, python] ---- diff --git a/docs/examples/bb2ba5d1885f87506f90dbb002e518f4.asciidoc b/docs/examples/bb2ba5d1885f87506f90dbb002e518f4.asciidoc index f1cecf299..8af1b90b1 100644 --- a/docs/examples/bb2ba5d1885f87506f90dbb002e518f4.asciidoc +++ b/docs/examples/bb2ba5d1885f87506f90dbb002e518f4.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-examples.asciidoc:604 +// search/search-your-data/retrievers-examples.asciidoc:875 [source, python] ---- diff --git a/docs/examples/bb5a67e3d2d9cd3016e487e627769fe8.asciidoc b/docs/examples/bb5a67e3d2d9cd3016e487e627769fe8.asciidoc index 94bea00eb..426fbc79b 100644 --- a/docs/examples/bb5a67e3d2d9cd3016e487e627769fe8.asciidoc +++ b/docs/examples/bb5a67e3d2d9cd3016e487e627769fe8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// quickstart/full-text-filtering-tutorial.asciidoc:129 +// quickstart/esql-search-tutorial.asciidoc:151 [source, python] ---- diff --git a/docs/examples/bdaf00d791706d7fde25fd65d3735b94.asciidoc b/docs/examples/bdaf00d791706d7fde25fd65d3735b94.asciidoc index 08227226e..53567d173 100644 --- a/docs/examples/bdaf00d791706d7fde25fd65d3735b94.asciidoc +++ b/docs/examples/bdaf00d791706d7fde25fd65d3735b94.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/keyword.asciidoc:184 +// mapping/types/keyword.asciidoc:178 [source, python] ---- diff --git a/docs/examples/be5c5a9c25901737585e4fff9195da3c.asciidoc b/docs/examples/be5c5a9c25901737585e4fff9195da3c.asciidoc index 1590cce0b..370353f27 100644 --- a/docs/examples/be5c5a9c25901737585e4fff9195da3c.asciidoc +++ b/docs/examples/be5c5a9c25901737585e4fff9195da3c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/dense-vector.asciidoc:435 +// mapping/types/dense-vector.asciidoc:428 [source, python] ---- diff --git a/docs/examples/beb0b9ff4f68672273fcff1b7bae706b.asciidoc b/docs/examples/beb0b9ff4f68672273fcff1b7bae706b.asciidoc index 4f6931ca5..f039bd547 100644 --- a/docs/examples/beb0b9ff4f68672273fcff1b7bae706b.asciidoc +++ b/docs/examples/beb0b9ff4f68672273fcff1b7bae706b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:411 +// indices/put-mapping.asciidoc:417 [source, python] ---- diff --git a/docs/examples/befa73a8a419fcf3b7798548b54a20bf.asciidoc b/docs/examples/befa73a8a419fcf3b7798548b54a20bf.asciidoc index 7bd330d62..cd849bc8f 100644 --- a/docs/examples/befa73a8a419fcf3b7798548b54a20bf.asciidoc +++ b/docs/examples/befa73a8a419fcf3b7798548b54a20bf.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/knn-search.asciidoc:1146 +// search/search-your-data/knn-search.asciidoc:1144 [source, python] ---- diff --git a/docs/examples/bf1de9fa1b825fa875d27fa08821a6d1.asciidoc b/docs/examples/bf1de9fa1b825fa875d27fa08821a6d1.asciidoc index 7e2332729..4fc505db3 100644 --- a/docs/examples/bf1de9fa1b825fa875d27fa08821a6d1.asciidoc +++ b/docs/examples/bf1de9fa1b825fa875d27fa08821a6d1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-across-clusters.asciidoc:119 +// esql/esql-across-clusters.asciidoc:120 [source, python] ---- diff --git a/docs/examples/bfdad8a928ea30d7cf60d0a0a6bc6e2e.asciidoc b/docs/examples/bfdad8a928ea30d7cf60d0a0a6bc6e2e.asciidoc index f252a8b5a..a3ffebb32 100644 --- a/docs/examples/bfdad8a928ea30d7cf60d0a0a6bc6e2e.asciidoc +++ b/docs/examples/bfdad8a928ea30d7cf60d0a0a6bc6e2e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/bulk.asciidoc:721 +// docs/bulk.asciidoc:717 [source, python] ---- diff --git a/docs/examples/c067182d385f59ce5952fb9a716fbf05.asciidoc b/docs/examples/c067182d385f59ce5952fb9a716fbf05.asciidoc index 44ec44783..c71753127 100644 --- a/docs/examples/c067182d385f59ce5952fb9a716fbf05.asciidoc +++ b/docs/examples/c067182d385f59ce5952fb9a716fbf05.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/post-calendar-event.asciidoc:85 +// ml/anomaly-detection/apis/post-calendar-event.asciidoc:69 [source, python] ---- diff --git a/docs/examples/c14bd2a793721615d2f42bce5eea9f1f.asciidoc b/docs/examples/c14bd2a793721615d2f42bce5eea9f1f.asciidoc new file mode 100644 index 000000000..0774fd002 --- /dev/null +++ b/docs/examples/c14bd2a793721615d2f42bce5eea9f1f.asciidoc @@ -0,0 +1,26 @@ +// This file is autogenerated, DO NOT EDIT +// inference/service-watsonx-ai.asciidoc:147 + +[source, python] +---- +resp = client.inference.put( + task_type="rerank", + inference_id="watsonx-rerank", + inference_config={ + "service": "watsonxai", + "service_settings": { + "api_key": "", + "url": "", + "model_id": "cross-encoder/ms-marco-minilm-l-12-v2", + "project_id": "", + "api_version": "2024-05-02" + }, + "task_settings": { + "truncate_input_tokens": 50, + "return_documents": True, + "top_n": 3 + } + }, +) +print(resp) +---- diff --git a/docs/examples/c18100d62ed31bc9e05f62900156e6a8.asciidoc b/docs/examples/c18100d62ed31bc9e05f62900156e6a8.asciidoc index 0df79b777..6b33919c4 100644 --- a/docs/examples/c18100d62ed31bc9e05f62900156e6a8.asciidoc +++ b/docs/examples/c18100d62ed31bc9e05f62900156e6a8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/list-connectors-api.asciidoc:102 +// connector/apis/list-connectors-api.asciidoc:99 [source, python] ---- diff --git a/docs/examples/ffda10edaa7ce087703193c3cb95a426.asciidoc b/docs/examples/c2a9233c00ffa0aeb921edc072dd0c6f.asciidoc similarity index 94% rename from docs/examples/ffda10edaa7ce087703193c3cb95a426.asciidoc rename to docs/examples/c2a9233c00ffa0aeb921edc072dd0c6f.asciidoc index d88901a8f..4f3b13969 100644 --- a/docs/examples/ffda10edaa7ce087703193c3cb95a426.asciidoc +++ b/docs/examples/c2a9233c00ffa0aeb921edc072dd0c6f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-examples.asciidoc:14 +// search/search-your-data/retrievers-examples.asciidoc:15 [source, python] ---- @@ -20,7 +20,11 @@ resp = client.indices.create( } }, "text": { - "type": "text" + "type": "text", + "copy_to": "text_semantic" + }, + "text_semantic": { + "type": "semantic_text" }, "year": { "type": "integer" diff --git a/docs/examples/c2c21e2824fbf6b7198ede30419da82b.asciidoc b/docs/examples/c2c21e2824fbf6b7198ede30419da82b.asciidoc index 1f047c0e7..c3bf844e6 100644 --- a/docs/examples/c2c21e2824fbf6b7198ede30419da82b.asciidoc +++ b/docs/examples/c2c21e2824fbf6b7198ede30419da82b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/paginate-search-results.asciidoc:529 +// search/search-your-data/paginate-search-results.asciidoc:544 [source, python] ---- diff --git a/docs/examples/134384b8c63cfbd8d762fb01757bb3f9.asciidoc b/docs/examples/c38abc4b33115bce453748288837db8f.asciidoc similarity index 84% rename from docs/examples/134384b8c63cfbd8d762fb01757bb3f9.asciidoc rename to docs/examples/c38abc4b33115bce453748288837db8f.asciidoc index ef8d5d6ba..2834455bc 100644 --- a/docs/examples/134384b8c63cfbd8d762fb01757bb3f9.asciidoc +++ b/docs/examples/c38abc4b33115bce453748288837db8f.asciidoc @@ -6,7 +6,7 @@ resp = client.index( index="logs-debug", document={ - "date": "2019-12-12", + "@timestamp": "2019-12-12", "message": "Starting up Elasticsearch", "level": "debug" }, @@ -16,7 +16,7 @@ print(resp) resp1 = client.index( index="logs-debug", document={ - "date": "2019-12-12", + "@timestamp": "2019-12-12", "message": "Starting up Elasticsearch" }, ) diff --git a/docs/examples/c4607ca79b2bcde39305d6f4f21cad37.asciidoc b/docs/examples/c4607ca79b2bcde39305d6f4f21cad37.asciidoc index fd2d55854..fc9f61425 100644 --- a/docs/examples/c4607ca79b2bcde39305d6f4f21cad37.asciidoc +++ b/docs/examples/c4607ca79b2bcde39305d6f4f21cad37.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/esql-rest.asciidoc:226 +// esql/esql-rest.asciidoc:228 [source, python] ---- diff --git a/docs/examples/c49ce88ff64cdeadb7029959e80c8f84.asciidoc b/docs/examples/c49ce88ff64cdeadb7029959e80c8f84.asciidoc new file mode 100644 index 000000000..1f0144aee --- /dev/null +++ b/docs/examples/c49ce88ff64cdeadb7029959e80c8f84.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// mapping/types/sparse-vector.asciidoc:33 + +[source, python] +---- +resp = client.indices.create( + index="my-index", + mappings={ + "properties": { + "text.tokens": { + "type": "sparse_vector", + "index_options": { + "prune": True, + "pruning_config": { + "tokens_freq_ratio_threshold": 5, + "tokens_weight_threshold": 0.4 + } + } + } + } + }, +) +print(resp) +---- diff --git a/docs/examples/c526fca1609b4c3c1d12dfd218d69a50.asciidoc b/docs/examples/c526fca1609b4c3c1d12dfd218d69a50.asciidoc index 8ff744913..a40dd2988 100644 --- a/docs/examples/c526fca1609b4c3c1d12dfd218d69a50.asciidoc +++ b/docs/examples/c526fca1609b4c3c1d12dfd218d69a50.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:383 +// indices/put-mapping.asciidoc:389 [source, python] ---- diff --git a/docs/examples/c5ed7d83ade97a417aef28b9e2871e5d.asciidoc b/docs/examples/c5ed7d83ade97a417aef28b9e2871e5d.asciidoc index a4d7c3543..20fd3c613 100644 --- a/docs/examples/c5ed7d83ade97a417aef28b9e2871e5d.asciidoc +++ b/docs/examples/c5ed7d83ade97a417aef28b9e2871e5d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/common-log-format-example.asciidoc:189 +// ingest/common-log-format-example.asciidoc:190 [source, python] ---- diff --git a/docs/examples/c6339d09f85000a6432304b0ec63b8f6.asciidoc b/docs/examples/c6339d09f85000a6432304b0ec63b8f6.asciidoc index 514f0f8aa..b6e01c2b8 100644 --- a/docs/examples/c6339d09f85000a6432304b0ec63b8f6.asciidoc +++ b/docs/examples/c6339d09f85000a6432304b0ec63b8f6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-component-template.asciidoc:236 +// indices/put-component-template.asciidoc:234 [source, python] ---- diff --git a/docs/examples/c6bdd5c7de79d6d9ac8e33a397b511e8.asciidoc b/docs/examples/c6bdd5c7de79d6d9ac8e33a397b511e8.asciidoc index 0fe500a97..27af0e15c 100644 --- a/docs/examples/c6bdd5c7de79d6d9ac8e33a397b511e8.asciidoc +++ b/docs/examples/c6bdd5c7de79d6d9ac8e33a397b511e8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:327 +// indices/put-mapping.asciidoc:333 [source, python] ---- diff --git a/docs/examples/c793efe7280e9b6e09981c4d4f832348.asciidoc b/docs/examples/c793efe7280e9b6e09981c4d4f832348.asciidoc index dcdc437f8..e46548ee1 100644 --- a/docs/examples/c793efe7280e9b6e09981c4d4f832348.asciidoc +++ b/docs/examples/c793efe7280e9b6e09981c4d4f832348.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/ip.asciidoc:166 +// mapping/types/ip.asciidoc:159 [source, python] ---- diff --git a/docs/examples/c9c396b94bb88098477e2b08b55a12ee.asciidoc b/docs/examples/c9c396b94bb88098477e2b08b55a12ee.asciidoc index e300dd9bb..518cc6213 100644 --- a/docs/examples/c9c396b94bb88098477e2b08b55a12ee.asciidoc +++ b/docs/examples/c9c396b94bb88098477e2b08b55a12ee.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// docs/bulk.asciidoc:774 +// docs/bulk.asciidoc:770 [source, python] ---- diff --git a/docs/examples/ca5dda98e977125d40a7fe1e178e213f.asciidoc b/docs/examples/ca5dda98e977125d40a7fe1e178e213f.asciidoc index a6c8c5cb1..fccdd63d8 100644 --- a/docs/examples/ca5dda98e977125d40a7fe1e178e213f.asciidoc +++ b/docs/examples/ca5dda98e977125d40a7fe1e178e213f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/sparse-vector-query.asciidoc:134 +// query-dsl/sparse-vector-query.asciidoc:142 [source, python] ---- diff --git a/docs/examples/cc5eefcc2102aae7e87b0c87b4af10b8.asciidoc b/docs/examples/cc5eefcc2102aae7e87b0c87b4af10b8.asciidoc index b994f1ee9..f00e52a51 100644 --- a/docs/examples/cc5eefcc2102aae7e87b0c87b4af10b8.asciidoc +++ b/docs/examples/cc5eefcc2102aae7e87b0c87b4af10b8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/multivalued-fields.asciidoc:54 +// esql/multivalued-fields.asciidoc:56 [source, python] ---- diff --git a/docs/examples/cdb68b3f565df7c85e52a55864b37d40.asciidoc b/docs/examples/cdb68b3f565df7c85e52a55864b37d40.asciidoc index f3ee218e6..e5437840c 100644 --- a/docs/examples/cdb68b3f565df7c85e52a55864b37d40.asciidoc +++ b/docs/examples/cdb68b3f565df7c85e52a55864b37d40.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:364 +// indices/put-mapping.asciidoc:370 [source, python] ---- diff --git a/docs/examples/ce2c2e8f5a2e4daf051b6e10122e5aae.asciidoc b/docs/examples/ce2c2e8f5a2e4daf051b6e10122e5aae.asciidoc index 2e8eefb75..84120b35f 100644 --- a/docs/examples/ce2c2e8f5a2e4daf051b6e10122e5aae.asciidoc +++ b/docs/examples/ce2c2e8f5a2e4daf051b6e10122e5aae.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/dense-vector.asciidoc:519 +// mapping/types/dense-vector.asciidoc:512 [source, python] ---- diff --git a/docs/examples/cf23f18761df33f08bc6f6d1875496fd.asciidoc b/docs/examples/cf23f18761df33f08bc6f6d1875496fd.asciidoc index b4b4bdf4e..79ac096c2 100644 --- a/docs/examples/cf23f18761df33f08bc6f6d1875496fd.asciidoc +++ b/docs/examples/cf23f18761df33f08bc6f6d1875496fd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// how-to/size-your-shards.asciidoc:399 +// how-to/size-your-shards.asciidoc:398 [source, python] ---- diff --git a/docs/examples/d01a590fa9ea8a0cb34ed8dda502296c.asciidoc b/docs/examples/d01a590fa9ea8a0cb34ed8dda502296c.asciidoc new file mode 100644 index 000000000..2aa1436ea --- /dev/null +++ b/docs/examples/d01a590fa9ea8a0cb34ed8dda502296c.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// migration/transient-settings-migration-guide.asciidoc:38 + +[source, python] +---- +resp = client.cluster.get_settings( + flat_settings=True, + filter_path="transient", +) +print(resp) +---- diff --git a/docs/examples/b3479ee4586c15020549afae58d94d65.asciidoc b/docs/examples/d0595401ef09dc23579c9df111049c20.asciidoc similarity index 84% rename from docs/examples/b3479ee4586c15020549afae58d94d65.asciidoc rename to docs/examples/d0595401ef09dc23579c9df111049c20.asciidoc index 2ae2dff35..0159c24b9 100644 --- a/docs/examples/b3479ee4586c15020549afae58d94d65.asciidoc +++ b/docs/examples/d0595401ef09dc23579c9df111049c20.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/geo-point.asciidoc:225 +// mapping/types/geo-point.asciidoc:221 [source, python] ---- @@ -17,7 +17,8 @@ resp = client.indices.create( mappings={ "properties": { "point": { - "type": "geo_point" + "type": "geo_point", + "synthetic_source_keep": "arrays" } } }, @@ -29,13 +30,13 @@ resp1 = client.index( id="1", document={ "point": [ - { - "lat": -90, - "lon": -80 - }, { "lat": 10, "lon": 30 + }, + { + "lat": -90, + "lon": -80 } ] }, diff --git a/docs/examples/d0fde00ef381e61b8a9e99f18cb5970a.asciidoc b/docs/examples/d0fde00ef381e61b8a9e99f18cb5970a.asciidoc index bfbfbdf58..a0eed8cd0 100644 --- a/docs/examples/d0fde00ef381e61b8a9e99f18cb5970a.asciidoc +++ b/docs/examples/d0fde00ef381e61b8a9e99f18cb5970a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/simple-query-string-query.asciidoc:181 +// query-dsl/simple-query-string-query.asciidoc:184 [source, python] ---- diff --git a/docs/examples/3ab8f65fcb55a0e3664c55749ec41efd.asciidoc b/docs/examples/d1a285aa244ec461d68f13e7078a33c0.asciidoc similarity index 93% rename from docs/examples/3ab8f65fcb55a0e3664c55749ec41efd.asciidoc rename to docs/examples/d1a285aa244ec461d68f13e7078a33c0.asciidoc index 292815dbd..ee97186fe 100644 --- a/docs/examples/3ab8f65fcb55a0e3664c55749ec41efd.asciidoc +++ b/docs/examples/d1a285aa244ec461d68f13e7078a33c0.asciidoc @@ -32,8 +32,7 @@ resp = client.indices.create( "decimal_digit", "arabic_normalization", "persian_normalization", - "persian_stop", - "persian_stem" + "persian_stop" ] } } diff --git a/docs/examples/d1d8b6e642db1a7c70dbbf0fe6d8e92d.asciidoc b/docs/examples/d1d8b6e642db1a7c70dbbf0fe6d8e92d.asciidoc index 45bd12593..1ada14768 100644 --- a/docs/examples/d1d8b6e642db1a7c70dbbf0fe6d8e92d.asciidoc +++ b/docs/examples/d1d8b6e642db1a7c70dbbf0fe6d8e92d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/sparse-vector-query.asciidoc:195 +// query-dsl/sparse-vector-query.asciidoc:203 [source, python] ---- diff --git a/docs/examples/d260225cf97e068ead2a8a6bb5aefd90.asciidoc b/docs/examples/d260225cf97e068ead2a8a6bb5aefd90.asciidoc index 691f89eb5..7aaf95c43 100644 --- a/docs/examples/d260225cf97e068ead2a8a6bb5aefd90.asciidoc +++ b/docs/examples/d260225cf97e068ead2a8a6bb5aefd90.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// analysis/analyzers/lang-analyzer.asciidoc:1551 +// analysis/analyzers/lang-analyzer.asciidoc:1550 [source, python] ---- diff --git a/docs/examples/d3a0f648d0fd50b54a4e9ebe363c5047.asciidoc b/docs/examples/d3a0f648d0fd50b54a4e9ebe363c5047.asciidoc index fdf783f2b..693ffa099 100644 --- a/docs/examples/d3a0f648d0fd50b54a4e9ebe363c5047.asciidoc +++ b/docs/examples/d3a0f648d0fd50b54a4e9ebe363c5047.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-examples.asciidoc:221 +// search/search-your-data/retrievers-examples.asciidoc:226 [source, python] ---- diff --git a/docs/examples/d3dccdb15822e971ededb9f6f7d8ada1.asciidoc b/docs/examples/d3dccdb15822e971ededb9f6f7d8ada1.asciidoc index f3ce80185..c2b12bf6e 100644 --- a/docs/examples/d3dccdb15822e971ededb9f6f7d8ada1.asciidoc +++ b/docs/examples/d3dccdb15822e971ededb9f6f7d8ada1.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/query-string-query.asciidoc:354 +// query-dsl/query-string-query.asciidoc:357 [source, python] ---- diff --git a/docs/examples/d5dcddc6398b473b6ad9bce5c6adf986.asciidoc b/docs/examples/d5dcddc6398b473b6ad9bce5c6adf986.asciidoc index a432416ab..6f5abb838 100644 --- a/docs/examples/d5dcddc6398b473b6ad9bce5c6adf986.asciidoc +++ b/docs/examples/d5dcddc6398b473b6ad9bce5c6adf986.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/paginate-search-results.asciidoc:435 +// search/search-your-data/paginate-search-results.asciidoc:450 [source, python] ---- diff --git a/docs/examples/d603e76ab70131f7ec6b08758f95a0e3.asciidoc b/docs/examples/d603e76ab70131f7ec6b08758f95a0e3.asciidoc index b2a1f1635..340a46837 100644 --- a/docs/examples/d603e76ab70131f7ec6b08758f95a0e3.asciidoc +++ b/docs/examples/d603e76ab70131f7ec6b08758f95a0e3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/recovery.asciidoc:148 +// cat/recovery.asciidoc:244 [source, python] ---- diff --git a/docs/examples/d6a21afa4a94b9baa734eac430940bcf.asciidoc b/docs/examples/d6a21afa4a94b9baa734eac430940bcf.asciidoc index 44333100e..077f80277 100644 --- a/docs/examples/d6a21afa4a94b9baa734eac430940bcf.asciidoc +++ b/docs/examples/d6a21afa4a94b9baa734eac430940bcf.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/list-connectors-api.asciidoc:95 +// connector/apis/list-connectors-api.asciidoc:92 [source, python] ---- diff --git a/docs/examples/d6a4548b29e939fb197189c20c7c016f.asciidoc b/docs/examples/d6a4548b29e939fb197189c20c7c016f.asciidoc deleted file mode 100644 index 55232f34a..000000000 --- a/docs/examples/d6a4548b29e939fb197189c20c7c016f.asciidoc +++ /dev/null @@ -1,17 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// inference/elastic-infer-service.asciidoc:115 - -[source, python] ----- -resp = client.inference.put( - task_type="chat_completion", - inference_id="chat-completion-endpoint", - inference_config={ - "service": "elastic", - "service_settings": { - "model_id": "model-1" - } - }, -) -print(resp) ----- diff --git a/docs/examples/d7919fb6f4d02dde1390775eb8365b79.asciidoc b/docs/examples/d7919fb6f4d02dde1390775eb8365b79.asciidoc index 3103cc120..a22bd28f0 100644 --- a/docs/examples/d7919fb6f4d02dde1390775eb8365b79.asciidoc +++ b/docs/examples/d7919fb6f4d02dde1390775eb8365b79.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/text.asciidoc:335 +// mapping/types/text.asciidoc:341 [source, python] ---- diff --git a/docs/examples/d7a55a7c491e97079e429483085f1d58.asciidoc b/docs/examples/d7a55a7c491e97079e429483085f1d58.asciidoc index a09d7affa..4ab78399a 100644 --- a/docs/examples/d7a55a7c491e97079e429483085f1d58.asciidoc +++ b/docs/examples/d7a55a7c491e97079e429483085f1d58.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc:60 +// data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc:68 [source, python] ---- diff --git a/docs/examples/d7a5b0159ffdcdd1ab9078b38829a08b.asciidoc b/docs/examples/d7a5b0159ffdcdd1ab9078b38829a08b.asciidoc index 1992809fe..a87336b58 100644 --- a/docs/examples/d7a5b0159ffdcdd1ab9078b38829a08b.asciidoc +++ b/docs/examples/d7a5b0159ffdcdd1ab9078b38829a08b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/semantic-query.asciidoc:87 +// query-dsl/semantic-query.asciidoc:85 [source, python] ---- diff --git a/docs/examples/d7fe687201ac87b307cd06ed015dd317.asciidoc b/docs/examples/d7fe687201ac87b307cd06ed015dd317.asciidoc index e713f4ada..c1c6f8b30 100644 --- a/docs/examples/d7fe687201ac87b307cd06ed015dd317.asciidoc +++ b/docs/examples/d7fe687201ac87b307cd06ed015dd317.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:288 +// indices/put-mapping.asciidoc:294 [source, python] ---- diff --git a/docs/examples/d803ed00d8f45f81c33e415e1c1ecb8c.asciidoc b/docs/examples/d803ed00d8f45f81c33e415e1c1ecb8c.asciidoc index 8c7f478d6..e938c437f 100644 --- a/docs/examples/d803ed00d8f45f81c33e415e1c1ecb8c.asciidoc +++ b/docs/examples/d803ed00d8f45f81c33e415e1c1ecb8c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/change-mappings-and-settings.asciidoc:642 +// data-streams/change-mappings-and-settings.asciidoc:646 [source, python] ---- diff --git a/docs/examples/d8496fa0e5a394fd758617ed6a6c956f.asciidoc b/docs/examples/d8496fa0e5a394fd758617ed6a6c956f.asciidoc index 9b1fae207..7b786e31e 100644 --- a/docs/examples/d8496fa0e5a394fd758617ed6a6c956f.asciidoc +++ b/docs/examples/d8496fa0e5a394fd758617ed6a6c956f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/percolate-query.asciidoc:373 +// query-dsl/percolate-query.asciidoc:371 [source, python] ---- diff --git a/docs/examples/d851282dba548251d10db5954a339307.asciidoc b/docs/examples/d851282dba548251d10db5954a339307.asciidoc index e47caa602..c81b69f08 100644 --- a/docs/examples/d851282dba548251d10db5954a339307.asciidoc +++ b/docs/examples/d851282dba548251d10db5954a339307.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/paginate-search-results.asciidoc:136 +// search/search-your-data/paginate-search-results.asciidoc:151 [source, python] ---- diff --git a/docs/examples/da8db0769dff7305f178c12b1111bc99.asciidoc b/docs/examples/da8db0769dff7305f178c12b1111bc99.asciidoc index 5a683b93f..12d20c3d4 100644 --- a/docs/examples/da8db0769dff7305f178c12b1111bc99.asciidoc +++ b/docs/examples/da8db0769dff7305f178c12b1111bc99.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/simple-query-string-query.asciidoc:262 +// query-dsl/simple-query-string-query.asciidoc:265 [source, python] ---- diff --git a/docs/examples/dcf82f3aacae49c0bb4ccbc673f13e9f.asciidoc b/docs/examples/dcf82f3aacae49c0bb4ccbc673f13e9f.asciidoc index c98d06e98..87e687d7e 100644 --- a/docs/examples/dcf82f3aacae49c0bb4ccbc673f13e9f.asciidoc +++ b/docs/examples/dcf82f3aacae49c0bb4ccbc673f13e9f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/knn-search.asciidoc:1202 +// search/search-your-data/knn-search.asciidoc:1200 [source, python] ---- diff --git a/docs/examples/dd16c9c981551c9da47ebb5ef5105fa0.asciidoc b/docs/examples/dd16c9c981551c9da47ebb5ef5105fa0.asciidoc deleted file mode 100644 index 8ffce9166..000000000 --- a/docs/examples/dd16c9c981551c9da47ebb5ef5105fa0.asciidoc +++ /dev/null @@ -1,57 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// migration/migrate_9_0.asciidoc:535 - -[source, python] ----- -resp = client.indices.update_aliases( - actions=[ - { - "add": { - "index": ".reindexed-v9-ml-anomalies-custom-example", - "alias": ".ml-anomalies-example1", - "filter": { - "term": { - "job_id": { - "value": "example1" - } - } - }, - "is_hidden": True - } - }, - { - "add": { - "index": ".reindexed-v9-ml-anomalies-custom-example", - "alias": ".ml-anomalies-example2", - "filter": { - "term": { - "job_id": { - "value": "example2" - } - } - }, - "is_hidden": True - } - }, - { - "remove": { - "index": ".ml-anomalies-custom-example", - "aliases": ".ml-anomalies-*" - } - }, - { - "remove_index": { - "index": ".ml-anomalies-custom-example" - } - }, - { - "add": { - "index": ".reindexed-v9-ml-anomalies-custom-example", - "alias": ".ml-anomalies-custom-example", - "is_hidden": True - } - } - ], -) -print(resp) ----- diff --git a/docs/examples/dddb6a6ebd145f8411c5b4910d332f87.asciidoc b/docs/examples/dddb6a6ebd145f8411c5b4910d332f87.asciidoc index 79eadab36..79c3d4e11 100644 --- a/docs/examples/dddb6a6ebd145f8411c5b4910d332f87.asciidoc +++ b/docs/examples/dddb6a6ebd145f8411c5b4910d332f87.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// esql/multivalued-fields.asciidoc:233 +// esql/multivalued-fields.asciidoc:243 [source, python] ---- diff --git a/docs/examples/df04e2e9af66d5e30b1bfdbd458cab13.asciidoc b/docs/examples/df04e2e9af66d5e30b1bfdbd458cab13.asciidoc index ce180edf2..e4d62925c 100644 --- a/docs/examples/df04e2e9af66d5e30b1bfdbd458cab13.asciidoc +++ b/docs/examples/df04e2e9af66d5e30b1bfdbd458cab13.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// how-to/size-your-shards.asciidoc:239 +// how-to/size-your-shards.asciidoc:238 [source, python] ---- diff --git a/docs/examples/e017c2de6f93a8dd97f5c6e002dd5c4f.asciidoc b/docs/examples/e017c2de6f93a8dd97f5c6e002dd5c4f.asciidoc deleted file mode 100644 index ad5ef4204..000000000 --- a/docs/examples/e017c2de6f93a8dd97f5c6e002dd5c4f.asciidoc +++ /dev/null @@ -1,28 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// ml/anomaly-detection/apis/post-calendar-event.asciidoc:132 - -[source, python] ----- -resp = client.ml.post_calendar_events( - calendar_id="dst-germany", - events=[ - { - "description": "Fall 2024", - "start_time": 1729994400000, - "end_time": 1730167200000, - "skip_result": False, - "skip_model_update": False, - "force_time_shift": -3600 - }, - { - "description": "Spring 2025", - "start_time": 1743296400000, - "end_time": 1743469200000, - "skip_result": False, - "skip_model_update": False, - "force_time_shift": 3600 - } - ], -) -print(resp) ----- diff --git a/docs/examples/e0db84e8f7dce49b9301ce997ae831dd.asciidoc b/docs/examples/e0db84e8f7dce49b9301ce997ae831dd.asciidoc new file mode 100644 index 000000000..0cfee6490 --- /dev/null +++ b/docs/examples/e0db84e8f7dce49b9301ce997ae831dd.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// search/retriever.asciidoc:1071 + +[source, python] +---- +resp = client.search( + index="books", + retriever={ + "linear": { + "query": "elasticsearch", + "fields": [ + "title^3", + "description^2", + "title_semantic", + "description_semantic^2" + ], + "normalizer": "minmax" + } + }, +) +print(resp) +---- diff --git a/docs/examples/3649194a97d265a3bc758f8b38f7561e.asciidoc b/docs/examples/e197ab718ad6c3cfbf2dd908f0ccd60f.asciidoc similarity index 100% rename from docs/examples/3649194a97d265a3bc758f8b38f7561e.asciidoc rename to docs/examples/e197ab718ad6c3cfbf2dd908f0ccd60f.asciidoc diff --git a/docs/examples/e1c08f5774e81da31cd75aa1bdc2c548.asciidoc b/docs/examples/e1c08f5774e81da31cd75aa1bdc2c548.asciidoc index 6d6f4a99e..285006ecf 100644 --- a/docs/examples/e1c08f5774e81da31cd75aa1bdc2c548.asciidoc +++ b/docs/examples/e1c08f5774e81da31cd75aa1bdc2c548.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/percolate-query.asciidoc:688 +// query-dsl/percolate-query.asciidoc:686 [source, python] ---- diff --git a/docs/examples/e308899a306e61d1a590868308689955.asciidoc b/docs/examples/e308899a306e61d1a590868308689955.asciidoc index 8773d24c2..911aa3909 100644 --- a/docs/examples/e308899a306e61d1a590868308689955.asciidoc +++ b/docs/examples/e308899a306e61d1a590868308689955.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ingest/processors/ip-location.asciidoc:136 +// ingest/processors/ip-location.asciidoc:151 [source, python] ---- diff --git a/docs/examples/e318759f4cc932154f540b748e85d6e1.asciidoc b/docs/examples/e318759f4cc932154f540b748e85d6e1.asciidoc new file mode 100644 index 000000000..e1c12b182 --- /dev/null +++ b/docs/examples/e318759f4cc932154f540b748e85d6e1.asciidoc @@ -0,0 +1,22 @@ +// This file is autogenerated, DO NOT EDIT +// search/retriever.asciidoc:983 + +[source, python] +---- +resp = client.search( + index="books", + retriever={ + "linear": { + "query": "elasticsearch", + "fields": [ + "title^3", + "description^2", + "title_semantic", + "description_semantic^2" + ], + "normalizer": "minmax" + } + }, +) +print(resp) +---- diff --git a/docs/examples/e3fe842951dc873d7d00c8f6a010c53f.asciidoc b/docs/examples/e3fe842951dc873d7d00c8f6a010c53f.asciidoc index d9002cd4d..0109eed73 100644 --- a/docs/examples/e3fe842951dc873d7d00c8f6a010c53f.asciidoc +++ b/docs/examples/e3fe842951dc873d7d00c8f6a010c53f.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// troubleshooting/common-issues/task-queue-backlog.asciidoc:90 +// troubleshooting/common-issues/task-queue-backlog.asciidoc:67 [source, python] ---- diff --git a/docs/examples/e4b6a6a921c97b4c0bbe97bd89f4cf33.asciidoc b/docs/examples/e4b6a6a921c97b4c0bbe97bd89f4cf33.asciidoc index fcc4bd7da..a57a290f7 100644 --- a/docs/examples/e4b6a6a921c97b4c0bbe97bd89f4cf33.asciidoc +++ b/docs/examples/e4b6a6a921c97b4c0bbe97bd89f4cf33.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/promote-data-stream-api.asciidoc:32 +// data-streams/promote-data-stream-api.asciidoc:28 [source, python] ---- diff --git a/docs/examples/e4be53736bcc02b03068fd72fdbfe271.asciidoc b/docs/examples/e4be53736bcc02b03068fd72fdbfe271.asciidoc index ee96d3031..6a863ea8c 100644 --- a/docs/examples/e4be53736bcc02b03068fd72fdbfe271.asciidoc +++ b/docs/examples/e4be53736bcc02b03068fd72fdbfe271.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:114 +// indices/put-mapping.asciidoc:120 [source, python] ---- diff --git a/docs/examples/e563da4a2054efcdf0e53ead11caac1d.asciidoc b/docs/examples/e563da4a2054efcdf0e53ead11caac1d.asciidoc new file mode 100644 index 000000000..974feda9c --- /dev/null +++ b/docs/examples/e563da4a2054efcdf0e53ead11caac1d.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/semantic-text-hybrid-search:219 + +[source, python] +---- +resp = client.esql.query( + format="txt", + query="\n FROM semantic-embeddings METADATA _score \n | WHERE content: \"muscle soreness running?\" OR match(semantic_text, \"How to avoid muscle soreness while running?\", { \"boost\": 0.75 }) \n | SORT _score DESC \n | LIMIT 1000\n ", +) +print(resp) +---- diff --git a/docs/examples/e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc b/docs/examples/e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc index 28e0f8426..a5824b98c 100644 --- a/docs/examples/e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc +++ b/docs/examples/e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/retrievers-examples.asciidoc:448 +// search/search-your-data/retrievers-examples.asciidoc:719 [source, python] ---- diff --git a/docs/examples/e715fb8c792bf09ac98f0ceca99beb84.asciidoc b/docs/examples/e715fb8c792bf09ac98f0ceca99beb84.asciidoc deleted file mode 100644 index e799bf64f..000000000 --- a/docs/examples/e715fb8c792bf09ac98f0ceca99beb84.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// migration/migrate_9_0.asciidoc:345 - -[source, python] ----- -resp = client.migration.deprecations( - index=".ml-anomalies-*", -) -print(resp) ----- diff --git a/docs/examples/e821d27a8b810821707ba860e31f8b78.asciidoc b/docs/examples/e821d27a8b810821707ba860e31f8b78.asciidoc index d5cfc9760..2151278c1 100644 --- a/docs/examples/e821d27a8b810821707ba860e31f8b78.asciidoc +++ b/docs/examples/e821d27a8b810821707ba860e31f8b78.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// indices/put-mapping.asciidoc:238 +// indices/put-mapping.asciidoc:244 [source, python] ---- diff --git a/docs/examples/e8ea65153d7775f25b08dfdfe6954498.asciidoc b/docs/examples/e8ea65153d7775f25b08dfdfe6954498.asciidoc index 089cb7473..a3539abb9 100644 --- a/docs/examples/e8ea65153d7775f25b08dfdfe6954498.asciidoc +++ b/docs/examples/e8ea65153d7775f25b08dfdfe6954498.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/simple-query-string-query.asciidoc:245 +// query-dsl/simple-query-string-query.asciidoc:248 [source, python] ---- diff --git a/docs/examples/e95e61988dc3073a007f7b7445dd233b.asciidoc b/docs/examples/e95e61988dc3073a007f7b7445dd233b.asciidoc index 58ddd86d2..61c72e9d6 100644 --- a/docs/examples/e95e61988dc3073a007f7b7445dd233b.asciidoc +++ b/docs/examples/e95e61988dc3073a007f7b7445dd233b.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc:192 +// data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc:204 [source, python] ---- diff --git a/docs/examples/ea68e3428cc2ca3455bf312d09451489.asciidoc b/docs/examples/ea68e3428cc2ca3455bf312d09451489.asciidoc index a6ec582ef..af12d96b8 100644 --- a/docs/examples/ea68e3428cc2ca3455bf312d09451489.asciidoc +++ b/docs/examples/ea68e3428cc2ca3455bf312d09451489.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/knn-search.asciidoc:1244 +// search/search-your-data/knn-search.asciidoc:1242 [source, python] ---- diff --git a/docs/examples/eb54506fbc71a7d250e86b22d0600114.asciidoc b/docs/examples/eb54506fbc71a7d250e86b22d0600114.asciidoc index 1b7faae1f..8168d2136 100644 --- a/docs/examples/eb54506fbc71a7d250e86b22d0600114.asciidoc +++ b/docs/examples/eb54506fbc71a7d250e86b22d0600114.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/list-connectors-api.asciidoc:117 +// connector/apis/list-connectors-api.asciidoc:114 [source, python] ---- diff --git a/docs/examples/ec135f0cc0d3f526df68000b2a95c65b.asciidoc b/docs/examples/ec135f0cc0d3f526df68000b2a95c65b.asciidoc deleted file mode 100644 index 10f0be019..000000000 --- a/docs/examples/ec135f0cc0d3f526df68000b2a95c65b.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -// This file is autogenerated, DO NOT EDIT -// migration/migrate_9_0.asciidoc:403 - -[source, python] ----- -resp = client.indices.create_from( - source=".ml-anomalies-custom-example", - dest=".reindexed-v9-ml-anomalies-custom-example", - create_from=None, -) -print(resp) ----- diff --git a/docs/examples/ec4b43c3ebd8816799fa004596b2f0cb.asciidoc b/docs/examples/ec4b43c3ebd8816799fa004596b2f0cb.asciidoc index effaf77bd..cfc579861 100644 --- a/docs/examples/ec4b43c3ebd8816799fa004596b2f0cb.asciidoc +++ b/docs/examples/ec4b43c3ebd8816799fa004596b2f0cb.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// index-modules/slowlog.asciidoc:232 +// index-modules/slowlog.asciidoc:233 [source, python] ---- diff --git a/docs/examples/ec5a2ce156c36aaa267fa31dd9367307.asciidoc b/docs/examples/ec5a2ce156c36aaa267fa31dd9367307.asciidoc index 08b09c56b..2b0f00fd7 100644 --- a/docs/examples/ec5a2ce156c36aaa267fa31dd9367307.asciidoc +++ b/docs/examples/ec5a2ce156c36aaa267fa31dd9367307.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// transform/checkpoints.asciidoc:80 +// transform/checkpoints.asciidoc:82 [source, python] ---- diff --git a/docs/examples/edb25dc0162b039d477cb06aed2d6275.asciidoc b/docs/examples/edb25dc0162b039d477cb06aed2d6275.asciidoc index 76902ca50..835de5295 100644 --- a/docs/examples/edb25dc0162b039d477cb06aed2d6275.asciidoc +++ b/docs/examples/edb25dc0162b039d477cb06aed2d6275.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/sparse-vector-query.asciidoc:152 +// query-dsl/sparse-vector-query.asciidoc:160 [source, python] ---- diff --git a/docs/examples/ef866d06ffd96099957b077a53127c6c.asciidoc b/docs/examples/ef866d06ffd96099957b077a53127c6c.asciidoc new file mode 100644 index 000000000..20e857dc0 --- /dev/null +++ b/docs/examples/ef866d06ffd96099957b077a53127c6c.asciidoc @@ -0,0 +1,17 @@ +// This file is autogenerated, DO NOT EDIT +// mapping/types/semantic-text.asciidoc:182 + +[source, python] +---- +resp = client.index( + index="test-index", + id="1", + document={ + "my_semantic_field": [ + "my first chunk", + "my second chunk" + ] + }, +) +print(resp) +---- diff --git a/docs/examples/f097c02541056f3c0fc855e7bbeef8a8.asciidoc b/docs/examples/f097c02541056f3c0fc855e7bbeef8a8.asciidoc index dedc1caaa..373fbf824 100644 --- a/docs/examples/f097c02541056f3c0fc855e7bbeef8a8.asciidoc +++ b/docs/examples/f097c02541056f3c0fc855e7bbeef8a8.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// analysis/analyzers/lang-analyzer.asciidoc:1746 +// analysis/analyzers/lang-analyzer.asciidoc:1745 [source, python] ---- diff --git a/docs/examples/f298c4eb50ea97b34c57f8756eb350d3.asciidoc b/docs/examples/f298c4eb50ea97b34c57f8756eb350d3.asciidoc index e6b401225..53ca9632d 100644 --- a/docs/examples/f298c4eb50ea97b34c57f8756eb350d3.asciidoc +++ b/docs/examples/f298c4eb50ea97b34c57f8756eb350d3.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// cat/pending_tasks.asciidoc:57 +// cat/pending_tasks.asciidoc:58 [source, python] ---- diff --git a/docs/examples/f29b2674299ddf51a25ed87619025ede.asciidoc b/docs/examples/f29b2674299ddf51a25ed87619025ede.asciidoc index dfae20141..ce3e50526 100644 --- a/docs/examples/f29b2674299ddf51a25ed87619025ede.asciidoc +++ b/docs/examples/f29b2674299ddf51a25ed87619025ede.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/rollup-search.asciidoc:122 +// rollup/apis/rollup-search.asciidoc:121 [source, python] ---- diff --git a/docs/examples/f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc b/docs/examples/f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc index 322a5d096..864a3b8ba 100644 --- a/docs/examples/f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc +++ b/docs/examples/f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// connector/apis/get-connector-api.asciidoc:74 +// connector/apis/get-connector-api.asciidoc:71 [source, python] ---- diff --git a/docs/examples/f32f0c19b42de3b87dd764fe4ca17e7c.asciidoc b/docs/examples/f32f0c19b42de3b87dd764fe4ca17e7c.asciidoc index 34cd44cc4..cb8604ae9 100644 --- a/docs/examples/f32f0c19b42de3b87dd764fe4ca17e7c.asciidoc +++ b/docs/examples/f32f0c19b42de3b87dd764fe4ca17e7c.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/query-string-query.asciidoc:420 +// query-dsl/query-string-query.asciidoc:423 [source, python] ---- diff --git a/docs/examples/f39512478cae2db8f4566a1e4af9e8f5.asciidoc b/docs/examples/f39512478cae2db8f4566a1e4af9e8f5.asciidoc index 569f9d2ad..271a51425 100644 --- a/docs/examples/f39512478cae2db8f4566a1e4af9e8f5.asciidoc +++ b/docs/examples/f39512478cae2db8f4566a1e4af9e8f5.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/rollup-getting-started.asciidoc:217 +// rollup/rollup-getting-started.asciidoc:213 [source, python] ---- diff --git a/docs/examples/f3fb52680482925c202c2e2f8af6f044.asciidoc b/docs/examples/f3fb52680482925c202c2e2f8af6f044.asciidoc index bd1263028..260106272 100644 --- a/docs/examples/f3fb52680482925c202c2e2f8af6f044.asciidoc +++ b/docs/examples/f3fb52680482925c202c2e2f8af6f044.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// how-to/size-your-shards.asciidoc:459 +// how-to/size-your-shards.asciidoc:458 [source, python] ---- diff --git a/docs/examples/b3f442a7d9eb391121dcab991787f9d6.asciidoc b/docs/examples/f4c0e6a9b58b640200d02047f5aa36bf.asciidoc similarity index 70% rename from docs/examples/b3f442a7d9eb391121dcab991787f9d6.asciidoc rename to docs/examples/f4c0e6a9b58b640200d02047f5aa36bf.asciidoc index 0f1dbf105..ab3c64711 100644 --- a/docs/examples/b3f442a7d9eb391121dcab991787f9d6.asciidoc +++ b/docs/examples/f4c0e6a9b58b640200d02047f5aa36bf.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/binary.asciidoc:68 +// mapping/types/keyword.asciidoc:292 [source, python] ---- @@ -16,9 +16,9 @@ resp = client.indices.create( }, mappings={ "properties": { - "binary": { - "type": "binary", - "doc_values": True + "kwd": { + "type": "keyword", + "null_value": "NA" } } }, @@ -29,9 +29,10 @@ resp1 = client.index( index="idx", id="1", document={ - "binary": [ - "IAA=", - "EAA=" + "kwd": [ + "foo", + None, + "bar" ] }, ) diff --git a/docs/examples/f60d6a7a9e7fd63f9ce1384f88c044cb.asciidoc b/docs/examples/f60d6a7a9e7fd63f9ce1384f88c044cb.asciidoc new file mode 100644 index 000000000..518951a31 --- /dev/null +++ b/docs/examples/f60d6a7a9e7fd63f9ce1384f88c044cb.asciidoc @@ -0,0 +1,11 @@ +// This file is autogenerated, DO NOT EDIT +// search/search-your-data/semantic-search-semantic-text.asciidoc:166 + +[source, python] +---- +resp = client.esql.query( + format="txt", + query="\n FROM semantic-embeddings METADATA _score \n | WHERE content: \"How to avoid muscle soreness while running?\" \n | SORT _score DESC \n | LIMIT 1000 \n ", +) +print(resp) +---- diff --git a/docs/examples/f6566395f85d3afe917228643d7318d6.asciidoc b/docs/examples/f6566395f85d3afe917228643d7318d6.asciidoc index c5bc986e9..1a38176a3 100644 --- a/docs/examples/f6566395f85d3afe917228643d7318d6.asciidoc +++ b/docs/examples/f6566395f85d3afe917228643d7318d6.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// how-to/size-your-shards.asciidoc:469 +// how-to/size-your-shards.asciidoc:468 [source, python] ---- diff --git a/docs/examples/f6d6d1f5c84cf4f6ccd71a84d193e483.asciidoc b/docs/examples/f6d6d1f5c84cf4f6ccd71a84d193e483.asciidoc new file mode 100644 index 000000000..fbfba946c --- /dev/null +++ b/docs/examples/f6d6d1f5c84cf4f6ccd71a84d193e483.asciidoc @@ -0,0 +1,24 @@ +// This file is autogenerated, DO NOT EDIT +// quickstart/esql-search-tutorial.asciidoc:340 + +[source, python] +---- +resp = client.index( + index="cooking_blog", + document={ + "title": "Mediterranean Quinoa Bowl", + "semantic_description": "A protein-rich bowl with quinoa, chickpeas, fresh vegetables, and herbs. This nutritious Mediterranean-inspired dish is easy to prepare and perfect for a quick, healthy dinner.", + "author": "Jamie Oliver", + "date": "2023-06-01", + "category": "Main Course", + "tags": [ + "vegetarian", + "healthy", + "mediterranean", + "quinoa" + ], + "rating": 4.7 + }, +) +print(resp) +---- diff --git a/docs/examples/f70a54cd9a9f4811bf962e469f2ca2ea.asciidoc b/docs/examples/f70a54cd9a9f4811bf962e469f2ca2ea.asciidoc index 9506f1299..f86a6169f 100644 --- a/docs/examples/f70a54cd9a9f4811bf962e469f2ca2ea.asciidoc +++ b/docs/examples/f70a54cd9a9f4811bf962e469f2ca2ea.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/bool-query.asciidoc:91 +// query-dsl/bool-query.asciidoc:134 [source, python] ---- diff --git a/docs/examples/f7ed127048694a1f5735e07a208b93ad.asciidoc b/docs/examples/f7ed127048694a1f5735e07a208b93ad.asciidoc new file mode 100644 index 000000000..a1e5b3a69 --- /dev/null +++ b/docs/examples/f7ed127048694a1f5735e07a208b93ad.asciidoc @@ -0,0 +1,20 @@ +// This file is autogenerated, DO NOT EDIT +// data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc:15 + +[source, python] +---- +resp = client.indices.delete_data_stream( + name="dsl-data-stream", +) +print(resp) + +resp1 = client.indices.delete_index_template( + name="dsl-data-stream-template", +) +print(resp1) + +resp2 = client.ilm.delete_lifecycle( + name="pre-dsl-ilm-policy", +) +print(resp2) +---- diff --git a/docs/examples/f8a0010753b1ff563dc42d703902d2fa.asciidoc b/docs/examples/f8a0010753b1ff563dc42d703902d2fa.asciidoc index ef6a5206b..efde9a426 100644 --- a/docs/examples/f8a0010753b1ff563dc42d703902d2fa.asciidoc +++ b/docs/examples/f8a0010753b1ff563dc42d703902d2fa.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/bool-query.asciidoc:39 +// query-dsl/bool-query.asciidoc:32 [source, python] ---- diff --git a/docs/examples/f8cb1a04c2e487ff006b5ae0e1a7afbd.asciidoc b/docs/examples/f8cb1a04c2e487ff006b5ae0e1a7afbd.asciidoc index f851ff1bc..d66ed0a8c 100644 --- a/docs/examples/f8cb1a04c2e487ff006b5ae0e1a7afbd.asciidoc +++ b/docs/examples/f8cb1a04c2e487ff006b5ae0e1a7afbd.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/apis/rollup-caps.asciidoc:181 +// rollup/apis/rollup-caps.asciidoc:178 [source, python] ---- diff --git a/docs/examples/f8f960550104c33e00dc78bc8723ccef.asciidoc b/docs/examples/f8f960550104c33e00dc78bc8723ccef.asciidoc index 9b0699474..e79356e78 100644 --- a/docs/examples/f8f960550104c33e00dc78bc8723ccef.asciidoc +++ b/docs/examples/f8f960550104c33e00dc78bc8723ccef.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// quickstart/full-text-filtering-tutorial.asciidoc:42 +// quickstart/esql-search-tutorial.asciidoc:69 [source, python] ---- diff --git a/docs/examples/f96d8131e8a592fbf6dfd686173940a9.asciidoc b/docs/examples/f96d8131e8a592fbf6dfd686173940a9.asciidoc index 051fb6d52..7c9ed5b9c 100644 --- a/docs/examples/f96d8131e8a592fbf6dfd686173940a9.asciidoc +++ b/docs/examples/f96d8131e8a592fbf6dfd686173940a9.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rest-api/watcher/update-settings.asciidoc:22 +// rest-api/watcher/update-settings.asciidoc:29 [source, python] ---- diff --git a/docs/examples/f994498dd6576be657dedce2822d2b9e.asciidoc b/docs/examples/f994498dd6576be657dedce2822d2b9e.asciidoc index b29501e73..88b368f7b 100644 --- a/docs/examples/f994498dd6576be657dedce2822d2b9e.asciidoc +++ b/docs/examples/f994498dd6576be657dedce2822d2b9e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// search/search-your-data/semantic-text-hybrid-search:119 +// search/search-your-data/semantic-text-hybrid-search:126 [source, python] ---- diff --git a/docs/examples/fa88f6f5a7d728ec4f1d05244228cb09.asciidoc b/docs/examples/fa88f6f5a7d728ec4f1d05244228cb09.asciidoc index f123ea93c..c1a005182 100644 --- a/docs/examples/fa88f6f5a7d728ec4f1d05244228cb09.asciidoc +++ b/docs/examples/fa88f6f5a7d728ec4f1d05244228cb09.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// query-dsl/bool-query.asciidoc:110 +// query-dsl/bool-query.asciidoc:153 [source, python] ---- diff --git a/docs/examples/fad26f4fb5a1bc9c38db33394e877d94.asciidoc b/docs/examples/fad26f4fb5a1bc9c38db33394e877d94.asciidoc index 3fb4bdc81..8c642c3db 100644 --- a/docs/examples/fad26f4fb5a1bc9c38db33394e877d94.asciidoc +++ b/docs/examples/fad26f4fb5a1bc9c38db33394e877d94.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// ml/df-analytics/apis/get-dfanalytics-stats.asciidoc:539 +// ml/df-analytics/apis/get-dfanalytics-stats.asciidoc:538 [source, python] ---- diff --git a/docs/examples/fb0152f6c70f647a8b6709969113486d.asciidoc b/docs/examples/fb0152f6c70f647a8b6709969113486d.asciidoc index 1548e7f1b..ae302dda7 100644 --- a/docs/examples/fb0152f6c70f647a8b6709969113486d.asciidoc +++ b/docs/examples/fb0152f6c70f647a8b6709969113486d.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/keyword.asciidoc:222 +// mapping/types/keyword.asciidoc:216 [source, python] ---- diff --git a/docs/examples/fe208d94ec93eabf3bd06139fa70701e.asciidoc b/docs/examples/fe208d94ec93eabf3bd06139fa70701e.asciidoc index f38b22cb3..21882266f 100644 --- a/docs/examples/fe208d94ec93eabf3bd06139fa70701e.asciidoc +++ b/docs/examples/fe208d94ec93eabf3bd06139fa70701e.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// rollup/migrating-to-downsampling.asciidoc:59 +// rollup/migrating-to-downsampling.asciidoc:58 [source, python] ---- diff --git a/docs/examples/fe54f3e53dbe7dee40ec3108a461d19a.asciidoc b/docs/examples/fe54f3e53dbe7dee40ec3108a461d19a.asciidoc index 3b0334fe2..a1cf06093 100644 --- a/docs/examples/fe54f3e53dbe7dee40ec3108a461d19a.asciidoc +++ b/docs/examples/fe54f3e53dbe7dee40ec3108a461d19a.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// security/authentication/jwt-realm.asciidoc:522 +// security/authentication/jwt-realm.asciidoc:532 [source, python] ---- diff --git a/docs/examples/fe6429d0d82174aa5acf95e96e237380.asciidoc b/docs/examples/fe6429d0d82174aa5acf95e96e237380.asciidoc index efcf594b4..2b0e615d4 100644 --- a/docs/examples/fe6429d0d82174aa5acf95e96e237380.asciidoc +++ b/docs/examples/fe6429d0d82174aa5acf95e96e237380.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/range.asciidoc:324 +// mapping/types/range.asciidoc:317 [source, python] ---- diff --git a/docs/examples/fe7169bab8e626f582c9ea87585d0f35.asciidoc b/docs/examples/fe7169bab8e626f582c9ea87585d0f35.asciidoc index 311ac7917..e78fb939b 100644 --- a/docs/examples/fe7169bab8e626f582c9ea87585d0f35.asciidoc +++ b/docs/examples/fe7169bab8e626f582c9ea87585d0f35.asciidoc @@ -1,5 +1,5 @@ // This file is autogenerated, DO NOT EDIT -// mapping/types/histogram.asciidoc:98 +// mapping/types/histogram.asciidoc:91 [source, python] ---- diff --git a/docs/guide/release-notes.asciidoc b/docs/guide/release-notes.asciidoc index 6973dc807..0ec09aaec 100644 --- a/docs/guide/release-notes.asciidoc +++ b/docs/guide/release-notes.asciidoc @@ -1,6 +1,38 @@ [[release-notes]] == Release notes +=== 8.19.0 (2025-07-30) + +- ES|QL query builder (technical preview) (https://github.com/elastic/elasticsearch-py/pull/2997[#2997]) +- Add option to disable accurate reporting of file and line location in warnings (Fixes #3003) (https://github.com/elastic/elasticsearch-py/pull/3006[#3006]) + +API updates + +- Remove `if_primary_term`, `if_seq_no` and `op_type` from Create API +- Remove `stored_fields` from Get Source API +- Remove `min_compatible_shard_node` from Async Search Submit API +- Remove `master_timeout` from Cat Aliases API +- Remove `master_timeout` from Ingest Get Ip Location Database API +- Remove `application`, `priviledge` and `username` from the Security Get User API +- Rename `type_query_string` to `type` in License Post Start Trial API +- Add `require_data_stream` to Index API +- Add `settings_filter` to Cluster Get Component Template API +- Add `cause` to Cluster Put Component Template API +- Add `ccs_minimize_roundtrips` to EQL Search API +- Add `keep_alive` and `keep_on_completion` to ES|QL Async Query API +- Add `format` to ES|QL Async Query Get API +- Add `allow_no_indices`, `expand_wildcards` and `ignore_available` to Indices Recovery API +- Add `input_type` to Inference API +- Add `timeout` to all Inference Put APIs +- Add Inference Put Custom API +- Add `refresh` to Security Get User Profile API +- Add `wait_for_completion` to the Snapshot Delete API + + +DSL Updates +- Handle lists in `copy_to` option in DSL field declarations correctly (Fixes #2992) (https://github.com/elastic/elasticsearch-py/pull/2994[#2994]) +- Add `key` to FiltersBucket type + === 8.18.1 (2025-04-29) - Update APIs diff --git a/elasticsearch/_version.py b/elasticsearch/_version.py index 29b586d2c..b1460500a 100644 --- a/elasticsearch/_version.py +++ b/elasticsearch/_version.py @@ -15,4 +15,4 @@ # specific language governing permissions and limitations # under the License. -__versionstr__ = "8.18.1" +__versionstr__ = "8.19.0" diff --git a/utils/generate-docs-examples/package-lock.json b/utils/generate-docs-examples/package-lock.json index 1d07e7c5e..d27f20d3d 100644 --- a/utils/generate-docs-examples/package-lock.json +++ b/utils/generate-docs-examples/package-lock.json @@ -103,9 +103,9 @@ } }, "node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", "license": "MIT", "dependencies": { "balanced-match": "^1.0.0", @@ -152,17 +152,6 @@ "url": "https://github.com/chalk/chalk?sponsor=1" } }, - "node_modules/child-process-promise": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/child-process-promise/-/child-process-promise-2.2.1.tgz", - "integrity": "sha512-Fi4aNdqBsr0mv+jgWxcZ/7rAIC2mgihrptyVI4foh/rrjY/3BNjfP9+oaiFx/fzim+1ZyCNBae0DlyfQhSugog==", - "license": "MIT", - "dependencies": { - "cross-spawn": "^4.0.2", - "node-version": "^1.0.0", - "promise-polyfill": "^6.0.1" - } - }, "node_modules/cli-cursor": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", @@ -229,16 +218,6 @@ "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", "license": "MIT" }, - "node_modules/cross-spawn": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-4.0.2.tgz", - "integrity": "sha512-yAXz/pA1tD8Gtg2S98Ekf/sewp3Lcp3YoFKJ4Hkp5h5yLWnKVTDU0kwjKJ8NDCYcfTLfyGkzTikst+jWypT1iA==", - "license": "MIT", - "dependencies": { - "lru-cache": "^4.0.1", - "which": "^1.2.9" - } - }, "node_modules/defaults": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/defaults/-/defaults-1.0.4.tgz", @@ -252,9 +231,9 @@ } }, "node_modules/find-my-way-ts": { - "version": "0.1.5", - "resolved": "https://registry.npmjs.org/find-my-way-ts/-/find-my-way-ts-0.1.5.tgz", - "integrity": "sha512-4GOTMrpGQVzsCH2ruUn2vmwzV/02zF4q+ybhCIrw/Rkt3L8KWcycdC6aJMctJzwN4fXD4SD5F/4B9Sksh5rE0A==", + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/find-my-way-ts/-/find-my-way-ts-0.1.6.tgz", + "integrity": "sha512-a85L9ZoXtNAey3Y6Z+eBWW658kO/MwR7zIafkIUPUMf3isZG0NCs2pjW2wtjxAKuJPxMAsHUIP4ZPGv0o5gyTA==", "license": "MIT" }, "node_modules/fs.realpath": { @@ -372,12 +351,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "license": "ISC" - }, "node_modules/log-symbols": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", @@ -394,16 +367,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/lru-cache": { - "version": "4.1.5", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.5.tgz", - "integrity": "sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==", - "license": "ISC", - "dependencies": { - "pseudomap": "^1.0.2", - "yallist": "^2.1.2" - } - }, "node_modules/mimic-fn": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", @@ -460,15 +423,6 @@ } } }, - "node_modules/node-version": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/node-version/-/node-version-1.2.0.tgz", - "integrity": "sha512-ma6oU4Sk0qOoKEAymVoTvk8EdXEobdS7m/mAGhDJ8Rouugho48crHBORAmy5BoOcv8wraPM6xumapQp5hl4iIQ==", - "license": "MIT", - "engines": { - "node": ">=6.0.0" - } - }, "node_modules/once": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", @@ -540,18 +494,6 @@ "url": "https://github.com/prettier/prettier?sponsor=1" } }, - "node_modules/promise-polyfill": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/promise-polyfill/-/promise-polyfill-6.1.0.tgz", - "integrity": "sha512-g0LWaH0gFsxovsU7R5LrrhHhWAWiHRnh1GPrhXnPgYsDkIqjRYUYSZEsej/wtleDrz5xVSIDbeKfidztp2XHFQ==", - "license": "MIT" - }, - "node_modules/pseudomap": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/pseudomap/-/pseudomap-1.0.2.tgz", - "integrity": "sha512-b/YwNhb8lk1Zz2+bXXpS/LK9OisiZZ1SNsSLxN1x2OXVEhW2Ckr/7mWE5vrC1ZTiJlD9g19jWszTmJsB+oEpFQ==", - "license": "ISC" - }, "node_modules/readable-stream": { "version": "3.6.2", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", @@ -713,18 +655,6 @@ "webidl-conversions": "^3.0.0" } }, - "node_modules/which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", - "license": "ISC", - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "which": "bin/which" - } - }, "node_modules/wordwrap": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", @@ -736,12 +666,6 @@ "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", "license": "ISC" - }, - "node_modules/yallist": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz", - "integrity": "sha512-ncTzHV7NvsQZkYe1DW7cbDLm0YpzHmZF5r/iyP3ZnQtMiJ+pjzisCiMNI+Sj+xQF5pXhSHxSB3uDbsBTzY/c2A==", - "license": "ISC" } } } From aed893c34fbfcea5eb0b65aae52f32c403fd6791 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 1 Aug 2025 17:02:11 +0100 Subject: [PATCH 63/65] Address integration test failures in Python 3.8 (#3018) (#3019) (cherry picked from commit 67c444c8143bcbaf7c9d80516538575c989e6cd6) Co-authored-by: Miguel Grinberg --- test_elasticsearch/test_server/test_rest_api_spec.py | 9 ++++++++- test_elasticsearch/utils.py | 2 +- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/test_elasticsearch/test_server/test_rest_api_spec.py b/test_elasticsearch/test_server/test_rest_api_spec.py index a84f0822a..f12db87aa 100644 --- a/test_elasticsearch/test_server/test_rest_api_spec.py +++ b/test_elasticsearch/test_server/test_rest_api_spec.py @@ -78,6 +78,7 @@ "cluster/voting_config_exclusions", "entsearch/10_basic", "indices/clone", + "indices/data_stream_mappings[0]", "indices/resolve_cluster", "indices/settings", "indices/split", @@ -501,7 +502,13 @@ def remove_implicit_resolver(cls, tag_to_remove): ) # Download the zip and start reading YAML from the files in memory - package_zip = zipfile.ZipFile(io.BytesIO(http.request("GET", yaml_tests_url).data)) + package_zip = zipfile.ZipFile( + io.BytesIO( + http.request( + "GET", yaml_tests_url, retries=urllib3.Retry(3, redirect=10) + ).data + ) + ) for yaml_file in package_zip.namelist(): if not re.match(r"^.*\/tests\/.*\.ya?ml$", yaml_file): diff --git a/test_elasticsearch/utils.py b/test_elasticsearch/utils.py index 021deb76e..cfcb5259c 100644 --- a/test_elasticsearch/utils.py +++ b/test_elasticsearch/utils.py @@ -179,7 +179,7 @@ def wipe_data_streams(client): def wipe_indices(client): indices = client.cat.indices().strip().splitlines() if len(indices) > 0: - index_names = [i.split(" ")[2] for i in indices] + index_names = [i.split()[2] for i in indices] client.options(ignore_status=404).indices.delete( index=",".join(index_names), expand_wildcards="all", From edb1857bdd553ed447676cf696041834258e2c64 Mon Sep 17 00:00:00 2001 From: Miguel Grinberg Date: Fri, 1 Aug 2025 19:57:15 +0100 Subject: [PATCH 64/65] ES|QL query builder robustness fixes (#3017) (#3026) * Add note on how to prevent ES|QL injection attacks * Various additional query builder fixes * linter fixes --- docs/guide/esql-query-builder.asciidoc | 23 +++- elasticsearch/esql/__init__.py | 1 + elasticsearch/esql/esql.py | 119 +++++++++++++----- elasticsearch/esql/functions.py | 56 +++++---- .../_async/test_esql.py | 12 +- .../{ => test_integration}/_sync/test_esql.py | 12 +- test_elasticsearch/test_esql.py | 16 ++- 7 files changed, 176 insertions(+), 63 deletions(-) rename test_elasticsearch/test_dsl/{ => test_integration}/_async/test_esql.py (88%) rename test_elasticsearch/test_dsl/{ => test_integration}/_sync/test_esql.py (88%) diff --git a/docs/guide/esql-query-builder.asciidoc b/docs/guide/esql-query-builder.asciidoc index bf254cba1..eedeb2a1b 100644 --- a/docs/guide/esql-query-builder.asciidoc +++ b/docs/guide/esql-query-builder.asciidoc @@ -214,6 +214,27 @@ query = ( ) ---------------------------- +==== Preventing injection attacks + +ES|QL, like most query languages, is vulnerable to https://en.wikipedia.org/wiki/Code_injection[code injection attacks] if untrusted data provided by users is added to a query. To eliminate this risk, ES|QL allows untrusted data to be given separately from the query as parameters. + +Continuing with the example above, let's assume that the application needs a `find_employee_by_name()` function that searches for the name given as an argument. If this argument is received by the application from users, then it is considered untrusted and should not be added to the query directly. Here is how to code the function in a secure manner: + +[source, python] +---------------------------- +def find_employee_by_name(name): + query = ( + ESQL.from_("employees") + .keep("first_name", "last_name", "height") + .where(E("first_name") == E("?")) + ) + return client.esql.query(query=str(query), params=[name]) +---------------------------- + +Here the part of the query in which the untrusted data needs to be inserted is replaced with a parameter, which in ES|QL is defined by the question mark. When using Python expressions, the parameter must be given as `E("?")` so that it is treated as an expression and not as a literal string. + +The list of values given in the `params` argument to the query endpoint are assigned in order to the parameters defined in the query. + === Using ES|QL functions The ES|QL language includes a rich set of functions that can be used in expressions and conditionals. These can be included in expressions given as strings, as shown in the example below: @@ -248,6 +269,6 @@ query = ( ) ---------------------------- -Note that arguments passed to functions are assumed to be literals. When passing field names, it is necessary to wrap them with the `E()` helper function so that they are interpreted correctly. +Note that arguments passed to functions are assumed to be literals. When passing field names, parameters or other ES|QL expressions, it is necessary to wrap them with the `E()` helper function so that they are interpreted correctly. You can find the complete list of available functions in the Python client's https://elasticsearch-py.readthedocs.io/en/stable/esql.html#module-elasticsearch.esql.functions[ES|QL API reference documentation]. diff --git a/elasticsearch/esql/__init__.py b/elasticsearch/esql/__init__.py index d872c329a..8da8f852a 100644 --- a/elasticsearch/esql/__init__.py +++ b/elasticsearch/esql/__init__.py @@ -15,4 +15,5 @@ # specific language governing permissions and limitations # under the License. +from ..dsl import E # noqa: F401 from .esql import ESQL, and_, not_, or_ # noqa: F401 diff --git a/elasticsearch/esql/esql.py b/elasticsearch/esql/esql.py index 07ccdf839..05f4e3e3e 100644 --- a/elasticsearch/esql/esql.py +++ b/elasticsearch/esql/esql.py @@ -16,6 +16,7 @@ # under the License. import json +import re from abc import ABC, abstractmethod from typing import Any, Dict, Optional, Tuple, Type, Union @@ -111,6 +112,29 @@ def render(self) -> str: def _render_internal(self) -> str: pass + @staticmethod + def _format_index(index: IndexType) -> str: + return index._index._name if hasattr(index, "_index") else str(index) + + @staticmethod + def _format_id(id: FieldType, allow_patterns: bool = False) -> str: + s = str(id) # in case it is an InstrumentedField + if allow_patterns and "*" in s: + return s # patterns cannot be escaped + if re.fullmatch(r"[a-zA-Z_@][a-zA-Z0-9_\.]*", s): + return s + # this identifier needs to be escaped + s.replace("`", "``") + return f"`{s}`" + + @staticmethod + def _format_expr(expr: ExpressionType) -> str: + return ( + json.dumps(expr) + if not isinstance(expr, (str, InstrumentedExpression)) + else str(expr) + ) + def _is_forked(self) -> bool: if self.__class__.__name__ == "Fork": return True @@ -427,7 +451,7 @@ def sample(self, probability: float) -> "Sample": """ return Sample(self, probability) - def sort(self, *columns: FieldType) -> "Sort": + def sort(self, *columns: ExpressionType) -> "Sort": """The ``SORT`` processing command sorts a table on one or more columns. :param columns: The columns to sort on. @@ -570,15 +594,12 @@ def metadata(self, *fields: FieldType) -> "From": return self def _render_internal(self) -> str: - indices = [ - index if isinstance(index, str) else index._index._name - for index in self._indices - ] + indices = [self._format_index(index) for index in self._indices] s = f'{self.__class__.__name__.upper()} {", ".join(indices)}' if self._metadata_fields: s = ( s - + f' METADATA {", ".join([str(field) for field in self._metadata_fields])}' + + f' METADATA {", ".join([self._format_id(field) for field in self._metadata_fields])}' ) return s @@ -594,7 +615,11 @@ class Row(ESQLBase): def __init__(self, **params: ExpressionType): super().__init__() self._params = { - k: json.dumps(v) if not isinstance(v, InstrumentedExpression) else v + self._format_id(k): ( + json.dumps(v) + if not isinstance(v, InstrumentedExpression) + else self._format_expr(v) + ) for k, v in params.items() } @@ -615,7 +640,7 @@ def __init__(self, item: str): self._item = item def _render_internal(self) -> str: - return f"SHOW {self._item}" + return f"SHOW {self._format_id(self._item)}" class Branch(ESQLBase): @@ -667,11 +692,11 @@ def as_(self, type_name: str, pvalue_name: str) -> "ChangePoint": return self def _render_internal(self) -> str: - key = "" if not self._key else f" ON {self._key}" + key = "" if not self._key else f" ON {self._format_id(self._key)}" names = ( "" if not self._type_name and not self._pvalue_name - else f' AS {self._type_name or "type"}, {self._pvalue_name or "pvalue"}' + else f' AS {self._format_id(self._type_name or "type")}, {self._format_id(self._pvalue_name or "pvalue")}' ) return f"CHANGE_POINT {self._value}{key}{names}" @@ -709,12 +734,13 @@ def with_(self, inference_id: str) -> "Completion": def _render_internal(self) -> str: if self._inference_id is None: raise ValueError("The completion command requires an inference ID") + with_ = {"inference_id": self._inference_id} if self._named_prompt: column = list(self._named_prompt.keys())[0] prompt = list(self._named_prompt.values())[0] - return f"COMPLETION {column} = {prompt} WITH {self._inference_id}" + return f"COMPLETION {self._format_id(column)} = {self._format_id(prompt)} WITH {json.dumps(with_)}" else: - return f"COMPLETION {self._prompt[0]} WITH {self._inference_id}" + return f"COMPLETION {self._format_id(self._prompt[0])} WITH {json.dumps(with_)}" class Dissect(ESQLBase): @@ -742,9 +768,13 @@ def append_separator(self, separator: str) -> "Dissect": def _render_internal(self) -> str: sep = ( - "" if self._separator is None else f' APPEND_SEPARATOR="{self._separator}"' + "" + if self._separator is None + else f" APPEND_SEPARATOR={json.dumps(self._separator)}" + ) + return ( + f"DISSECT {self._format_id(self._input)} {json.dumps(self._pattern)}{sep}" ) - return f"DISSECT {self._input} {json.dumps(self._pattern)}{sep}" class Drop(ESQLBase): @@ -760,7 +790,7 @@ def __init__(self, parent: ESQLBase, *columns: FieldType): self._columns = columns def _render_internal(self) -> str: - return f'DROP {", ".join([str(col) for col in self._columns])}' + return f'DROP {", ".join([self._format_id(col, allow_patterns=True) for col in self._columns])}' class Enrich(ESQLBase): @@ -814,12 +844,18 @@ def with_(self, *fields: FieldType, **named_fields: FieldType) -> "Enrich": return self def _render_internal(self) -> str: - on = "" if self._match_field is None else f" ON {self._match_field}" + on = ( + "" + if self._match_field is None + else f" ON {self._format_id(self._match_field)}" + ) with_ = "" if self._named_fields: - with_ = f' WITH {", ".join([f"{name} = {field}" for name, field in self._named_fields.items()])}' + with_ = f' WITH {", ".join([f"{self._format_id(name)} = {self._format_id(field)}" for name, field in self._named_fields.items()])}' elif self._fields is not None: - with_ = f' WITH {", ".join([str(field) for field in self._fields])}' + with_ = ( + f' WITH {", ".join([self._format_id(field) for field in self._fields])}' + ) return f"ENRICH {self._policy}{on}{with_}" @@ -832,7 +868,10 @@ class Eval(ESQLBase): """ def __init__( - self, parent: ESQLBase, *columns: FieldType, **named_columns: FieldType + self, + parent: ESQLBase, + *columns: ExpressionType, + **named_columns: ExpressionType, ): if columns and named_columns: raise ValueError( @@ -844,10 +883,13 @@ def __init__( def _render_internal(self) -> str: if isinstance(self._columns, dict): cols = ", ".join( - [f"{name} = {value}" for name, value in self._columns.items()] + [ + f"{self._format_id(name)} = {self._format_expr(value)}" + for name, value in self._columns.items() + ] ) else: - cols = ", ".join([f"{col}" for col in self._columns]) + cols = ", ".join([f"{self._format_expr(col)}" for col in self._columns]) return f"EVAL {cols}" @@ -900,7 +942,7 @@ def __init__(self, parent: ESQLBase, input: FieldType, pattern: str): self._pattern = pattern def _render_internal(self) -> str: - return f"GROK {self._input} {json.dumps(self._pattern)}" + return f"GROK {self._format_id(self._input)} {json.dumps(self._pattern)}" class Keep(ESQLBase): @@ -916,7 +958,7 @@ def __init__(self, parent: ESQLBase, *columns: FieldType): self._columns = columns def _render_internal(self) -> str: - return f'KEEP {", ".join([f"{col}" for col in self._columns])}' + return f'KEEP {", ".join([f"{self._format_id(col, allow_patterns=True)}" for col in self._columns])}' class Limit(ESQLBase): @@ -932,7 +974,7 @@ def __init__(self, parent: ESQLBase, max_number_of_rows: int): self._max_number_of_rows = max_number_of_rows def _render_internal(self) -> str: - return f"LIMIT {self._max_number_of_rows}" + return f"LIMIT {json.dumps(self._max_number_of_rows)}" class LookupJoin(ESQLBase): @@ -967,7 +1009,9 @@ def _render_internal(self) -> str: if isinstance(self._lookup_index, str) else self._lookup_index._index._name ) - return f"LOOKUP JOIN {index} ON {self._field}" + return ( + f"LOOKUP JOIN {self._format_index(index)} ON {self._format_id(self._field)}" + ) class MvExpand(ESQLBase): @@ -983,7 +1027,7 @@ def __init__(self, parent: ESQLBase, column: FieldType): self._column = column def _render_internal(self) -> str: - return f"MV_EXPAND {self._column}" + return f"MV_EXPAND {self._format_id(self._column)}" class Rename(ESQLBase): @@ -999,7 +1043,7 @@ def __init__(self, parent: ESQLBase, **columns: FieldType): self._columns = columns def _render_internal(self) -> str: - return f'RENAME {", ".join([f"{old_name} AS {new_name}" for old_name, new_name in self._columns.items()])}' + return f'RENAME {", ".join([f"{self._format_id(old_name)} AS {self._format_id(new_name)}" for old_name, new_name in self._columns.items()])}' class Sample(ESQLBase): @@ -1015,7 +1059,7 @@ def __init__(self, parent: ESQLBase, probability: float): self._probability = probability def _render_internal(self) -> str: - return f"SAMPLE {self._probability}" + return f"SAMPLE {json.dumps(self._probability)}" class Sort(ESQLBase): @@ -1026,12 +1070,16 @@ class Sort(ESQLBase): in a single expression. """ - def __init__(self, parent: ESQLBase, *columns: FieldType): + def __init__(self, parent: ESQLBase, *columns: ExpressionType): super().__init__(parent) self._columns = columns def _render_internal(self) -> str: - return f'SORT {", ".join([f"{col}" for col in self._columns])}' + sorts = [ + " ".join([self._format_id(term) for term in str(col).split(" ")]) + for col in self._columns + ] + return f'SORT {", ".join([f"{sort}" for sort in sorts])}' class Stats(ESQLBase): @@ -1062,14 +1110,17 @@ def by(self, *grouping_expressions: ExpressionType) -> "Stats": def _render_internal(self) -> str: if isinstance(self._expressions, dict): - exprs = [f"{key} = {value}" for key, value in self._expressions.items()] + exprs = [ + f"{self._format_id(key)} = {self._format_expr(value)}" + for key, value in self._expressions.items() + ] else: - exprs = [f"{expr}" for expr in self._expressions] + exprs = [f"{self._format_expr(expr)}" for expr in self._expressions] expression_separator = ",\n " by = ( "" if self._grouping_expressions is None - else f'\n BY {", ".join([f"{expr}" for expr in self._grouping_expressions])}' + else f'\n BY {", ".join([f"{self._format_expr(expr)}" for expr in self._grouping_expressions])}' ) return f'STATS {expression_separator.join([f"{expr}" for expr in exprs])}{by}' @@ -1087,7 +1138,7 @@ def __init__(self, parent: ESQLBase, *expressions: ExpressionType): self._expressions = expressions def _render_internal(self) -> str: - return f'WHERE {" AND ".join([f"{expr}" for expr in self._expressions])}' + return f'WHERE {" AND ".join([f"{self._format_expr(expr)}" for expr in self._expressions])}' def and_(*expressions: InstrumentedExpression) -> "InstrumentedExpression": diff --git a/elasticsearch/esql/functions.py b/elasticsearch/esql/functions.py index 515e3ddfc..91f18d2d8 100644 --- a/elasticsearch/esql/functions.py +++ b/elasticsearch/esql/functions.py @@ -19,11 +19,15 @@ from typing import Any from elasticsearch.dsl.document_base import InstrumentedExpression -from elasticsearch.esql.esql import ExpressionType +from elasticsearch.esql.esql import ESQLBase, ExpressionType def _render(v: Any) -> str: - return json.dumps(v) if not isinstance(v, InstrumentedExpression) else str(v) + return ( + json.dumps(v) + if not isinstance(v, InstrumentedExpression) + else ESQLBase._format_expr(v) + ) def abs(number: ExpressionType) -> InstrumentedExpression: @@ -69,7 +73,9 @@ def atan2( :param y_coordinate: y coordinate. If `null`, the function returns `null`. :param x_coordinate: x coordinate. If `null`, the function returns `null`. """ - return InstrumentedExpression(f"ATAN2({y_coordinate}, {x_coordinate})") + return InstrumentedExpression( + f"ATAN2({_render(y_coordinate)}, {_render(x_coordinate)})" + ) def avg(number: ExpressionType) -> InstrumentedExpression: @@ -114,7 +120,7 @@ def bucket( :param to: End of the range. Can be a number, a date or a date expressed as a string. """ return InstrumentedExpression( - f"BUCKET({_render(field)}, {_render(buckets)}, {from_}, {_render(to)})" + f"BUCKET({_render(field)}, {_render(buckets)}, {_render(from_)}, {_render(to)})" ) @@ -169,7 +175,7 @@ def cidr_match(ip: ExpressionType, block_x: ExpressionType) -> InstrumentedExpre :param ip: IP address of type `ip` (both IPv4 and IPv6 are supported). :param block_x: CIDR block to test the IP against. """ - return InstrumentedExpression(f"CIDR_MATCH({_render(ip)}, {block_x})") + return InstrumentedExpression(f"CIDR_MATCH({_render(ip)}, {_render(block_x)})") def coalesce(first: ExpressionType, rest: ExpressionType) -> InstrumentedExpression: @@ -264,7 +270,7 @@ def date_diff( :param end_timestamp: A string representing an end timestamp """ return InstrumentedExpression( - f"DATE_DIFF({_render(unit)}, {start_timestamp}, {end_timestamp})" + f"DATE_DIFF({_render(unit)}, {_render(start_timestamp)}, {_render(end_timestamp)})" ) @@ -285,7 +291,9 @@ def date_extract( the function returns `null`. :param date: Date expression. If `null`, the function returns `null`. """ - return InstrumentedExpression(f"DATE_EXTRACT({date_part}, {_render(date)})") + return InstrumentedExpression( + f"DATE_EXTRACT({_render(date_part)}, {_render(date)})" + ) def date_format( @@ -301,7 +309,7 @@ def date_format( """ if date_format is not None: return InstrumentedExpression( - f"DATE_FORMAT({json.dumps(date_format)}, {_render(date)})" + f"DATE_FORMAT({_render(date_format)}, {_render(date)})" ) else: return InstrumentedExpression(f"DATE_FORMAT({_render(date)})") @@ -317,7 +325,9 @@ def date_parse( :param date_string: Date expression as a string. If `null` or an empty string, the function returns `null`. """ - return InstrumentedExpression(f"DATE_PARSE({date_pattern}, {date_string})") + return InstrumentedExpression( + f"DATE_PARSE({_render(date_pattern)}, {_render(date_string)})" + ) def date_trunc( @@ -929,7 +939,7 @@ def replace( :param new_string: Replacement string. """ return InstrumentedExpression( - f"REPLACE({_render(string)}, {_render(regex)}, {new_string})" + f"REPLACE({_render(string)}, {_render(regex)}, {_render(new_string)})" ) @@ -1004,7 +1014,7 @@ def scalb(d: ExpressionType, scale_factor: ExpressionType) -> InstrumentedExpres :param scale_factor: Numeric expression for the scale factor. If `null`, the function returns `null`. """ - return InstrumentedExpression(f"SCALB({_render(d)}, {scale_factor})") + return InstrumentedExpression(f"SCALB({_render(d)}, {_render(scale_factor)})") def sha1(input: ExpressionType) -> InstrumentedExpression: @@ -1116,7 +1126,7 @@ def st_contains( first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters. """ - return InstrumentedExpression(f"ST_CONTAINS({geom_a}, {geom_b})") + return InstrumentedExpression(f"ST_CONTAINS({_render(geom_a)}, {_render(geom_b)})") def st_disjoint( @@ -1135,7 +1145,7 @@ def st_disjoint( first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters. """ - return InstrumentedExpression(f"ST_DISJOINT({geom_a}, {geom_b})") + return InstrumentedExpression(f"ST_DISJOINT({_render(geom_a)}, {_render(geom_b)})") def st_distance( @@ -1153,7 +1163,7 @@ def st_distance( also have the same coordinate system as the first. This means it is not possible to combine `geo_point` and `cartesian_point` parameters. """ - return InstrumentedExpression(f"ST_DISTANCE({geom_a}, {geom_b})") + return InstrumentedExpression(f"ST_DISTANCE({_render(geom_a)}, {_render(geom_b)})") def st_envelope(geometry: ExpressionType) -> InstrumentedExpression: @@ -1208,7 +1218,7 @@ def st_geohash_to_long(grid_id: ExpressionType) -> InstrumentedExpression: :param grid_id: Input geohash grid-id. The input can be a single- or multi-valued column or an expression. """ - return InstrumentedExpression(f"ST_GEOHASH_TO_LONG({grid_id})") + return InstrumentedExpression(f"ST_GEOHASH_TO_LONG({_render(grid_id)})") def st_geohash_to_string(grid_id: ExpressionType) -> InstrumentedExpression: @@ -1218,7 +1228,7 @@ def st_geohash_to_string(grid_id: ExpressionType) -> InstrumentedExpression: :param grid_id: Input geohash grid-id. The input can be a single- or multi-valued column or an expression. """ - return InstrumentedExpression(f"ST_GEOHASH_TO_STRING({grid_id})") + return InstrumentedExpression(f"ST_GEOHASH_TO_STRING({_render(grid_id)})") def st_geohex( @@ -1254,7 +1264,7 @@ def st_geohex_to_long(grid_id: ExpressionType) -> InstrumentedExpression: :param grid_id: Input geohex grid-id. The input can be a single- or multi-valued column or an expression. """ - return InstrumentedExpression(f"ST_GEOHEX_TO_LONG({grid_id})") + return InstrumentedExpression(f"ST_GEOHEX_TO_LONG({_render(grid_id)})") def st_geohex_to_string(grid_id: ExpressionType) -> InstrumentedExpression: @@ -1264,7 +1274,7 @@ def st_geohex_to_string(grid_id: ExpressionType) -> InstrumentedExpression: :param grid_id: Input Geohex grid-id. The input can be a single- or multi-valued column or an expression. """ - return InstrumentedExpression(f"ST_GEOHEX_TO_STRING({grid_id})") + return InstrumentedExpression(f"ST_GEOHEX_TO_STRING({_render(grid_id)})") def st_geotile( @@ -1300,7 +1310,7 @@ def st_geotile_to_long(grid_id: ExpressionType) -> InstrumentedExpression: :param grid_id: Input geotile grid-id. The input can be a single- or multi-valued column or an expression. """ - return InstrumentedExpression(f"ST_GEOTILE_TO_LONG({grid_id})") + return InstrumentedExpression(f"ST_GEOTILE_TO_LONG({_render(grid_id)})") def st_geotile_to_string(grid_id: ExpressionType) -> InstrumentedExpression: @@ -1310,7 +1320,7 @@ def st_geotile_to_string(grid_id: ExpressionType) -> InstrumentedExpression: :param grid_id: Input geotile grid-id. The input can be a single- or multi-valued column or an expression. """ - return InstrumentedExpression(f"ST_GEOTILE_TO_STRING({grid_id})") + return InstrumentedExpression(f"ST_GEOTILE_TO_STRING({_render(grid_id)})") def st_intersects( @@ -1330,7 +1340,9 @@ def st_intersects( first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters. """ - return InstrumentedExpression(f"ST_INTERSECTS({geom_a}, {geom_b})") + return InstrumentedExpression( + f"ST_INTERSECTS({_render(geom_a)}, {_render(geom_b)})" + ) def st_within(geom_a: ExpressionType, geom_b: ExpressionType) -> InstrumentedExpression: @@ -1346,7 +1358,7 @@ def st_within(geom_a: ExpressionType, geom_b: ExpressionType) -> InstrumentedExp first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters. """ - return InstrumentedExpression(f"ST_WITHIN({geom_a}, {geom_b})") + return InstrumentedExpression(f"ST_WITHIN({_render(geom_a)}, {_render(geom_b)})") def st_x(point: ExpressionType) -> InstrumentedExpression: diff --git a/test_elasticsearch/test_dsl/_async/test_esql.py b/test_elasticsearch/test_dsl/test_integration/_async/test_esql.py similarity index 88% rename from test_elasticsearch/test_dsl/_async/test_esql.py rename to test_elasticsearch/test_dsl/test_integration/_async/test_esql.py index 7aacb833c..27d26ca99 100644 --- a/test_elasticsearch/test_dsl/_async/test_esql.py +++ b/test_elasticsearch/test_dsl/test_integration/_async/test_esql.py @@ -17,7 +17,7 @@ import pytest -from elasticsearch.dsl import AsyncDocument, M +from elasticsearch.dsl import AsyncDocument, E, M from elasticsearch.esql import ESQL, functions @@ -91,3 +91,13 @@ async def test_esql(async_client): ) r = await async_client.esql.query(query=str(query)) assert r.body["values"] == [[1.95]] + + # find employees by name using a parameter + query = ( + ESQL.from_(Employee) + .where(Employee.first_name == E("?")) + .keep(Employee.last_name) + .sort(Employee.last_name.desc()) + ) + r = await async_client.esql.query(query=str(query), params=["Maria"]) + assert r.body["values"] == [["Luna"], ["Cannon"]] diff --git a/test_elasticsearch/test_dsl/_sync/test_esql.py b/test_elasticsearch/test_dsl/test_integration/_sync/test_esql.py similarity index 88% rename from test_elasticsearch/test_dsl/_sync/test_esql.py rename to test_elasticsearch/test_dsl/test_integration/_sync/test_esql.py index 1c4084fc7..85ceee5ae 100644 --- a/test_elasticsearch/test_dsl/_sync/test_esql.py +++ b/test_elasticsearch/test_dsl/test_integration/_sync/test_esql.py @@ -17,7 +17,7 @@ import pytest -from elasticsearch.dsl import Document, M +from elasticsearch.dsl import Document, E, M from elasticsearch.esql import ESQL, functions @@ -91,3 +91,13 @@ def test_esql(client): ) r = client.esql.query(query=str(query)) assert r.body["values"] == [[1.95]] + + # find employees by name using a parameter + query = ( + ESQL.from_(Employee) + .where(Employee.first_name == E("?")) + .keep(Employee.last_name) + .sort(Employee.last_name.desc()) + ) + r = client.esql.query(query=str(query), params=["Maria"]) + assert r.body["values"] == [["Luna"], ["Cannon"]] diff --git a/test_elasticsearch/test_esql.py b/test_elasticsearch/test_esql.py index 70c9ec679..35b026fb5 100644 --- a/test_elasticsearch/test_esql.py +++ b/test_elasticsearch/test_esql.py @@ -84,7 +84,7 @@ def test_completion(): assert ( query.render() == """ROW question = "What is Elasticsearch?" -| COMPLETION question WITH test_completion_model +| COMPLETION question WITH {"inference_id": "test_completion_model"} | KEEP question, completion""" ) @@ -97,7 +97,7 @@ def test_completion(): assert ( query.render() == """ROW question = "What is Elasticsearch?" -| COMPLETION answer = question WITH test_completion_model +| COMPLETION answer = question WITH {"inference_id": "test_completion_model"} | KEEP question, answer""" ) @@ -128,7 +128,7 @@ def test_completion(): "Synopsis: ", synopsis, "\\n", "Actors: ", MV_CONCAT(actors, ", "), "\\n", ) -| COMPLETION summary = prompt WITH test_completion_model +| COMPLETION summary = prompt WITH {"inference_id": "test_completion_model"} | KEEP title, summary, rating""" ) @@ -160,7 +160,7 @@ def test_completion(): | SORT rating DESC | LIMIT 10 | EVAL prompt = CONCAT("Summarize this movie using the following information: \\n", "Title: ", title, "\\n", "Synopsis: ", synopsis, "\\n", "Actors: ", MV_CONCAT(actors, ", "), "\\n") -| COMPLETION summary = prompt WITH test_completion_model +| COMPLETION summary = prompt WITH {"inference_id": "test_completion_model"} | KEEP title, summary, rating""" ) @@ -713,3 +713,11 @@ def test_match_operator(): == """FROM books | WHERE author:"Faulkner\"""" ) + + +def test_parameters(): + query = ESQL.from_("employees").where("name == ?") + assert query.render() == "FROM employees\n| WHERE name == ?" + + query = ESQL.from_("employees").where(E("name") == E("?")) + assert query.render() == "FROM employees\n| WHERE name == ?" From 292553f058814446a8d7396af6f11bf32fc974d6 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 5 Aug 2025 15:17:39 +0100 Subject: [PATCH 65/65] Minor improvement to fix in #3018 (#3031) (#3032) (cherry picked from commit a67c2eef139b1f55fca8ffa79f8c19d6a03dc6cc) Co-authored-by: Miguel Grinberg --- test_elasticsearch/test_server/test_rest_api_spec.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/test_elasticsearch/test_server/test_rest_api_spec.py b/test_elasticsearch/test_server/test_rest_api_spec.py index f12db87aa..768453c10 100644 --- a/test_elasticsearch/test_server/test_rest_api_spec.py +++ b/test_elasticsearch/test_server/test_rest_api_spec.py @@ -495,20 +495,14 @@ def remove_implicit_resolver(cls, tag_to_remove): # Try loading the REST API test specs from the Elastic Artifacts API try: # Construct the HTTP and Elasticsearch client - http = urllib3.PoolManager(retries=10) + http = urllib3.PoolManager(retries=urllib3.Retry(total=10)) yaml_tests_url = ( "https://api.github.com/repos/elastic/elasticsearch-clients-tests/zipball/main" ) # Download the zip and start reading YAML from the files in memory - package_zip = zipfile.ZipFile( - io.BytesIO( - http.request( - "GET", yaml_tests_url, retries=urllib3.Retry(3, redirect=10) - ).data - ) - ) + package_zip = zipfile.ZipFile(io.BytesIO(http.request("GET", yaml_tests_url).data)) for yaml_file in package_zip.namelist(): if not re.match(r"^.*\/tests\/.*\.ya?ml$", yaml_file):